repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
googleapis/googleapis-gen | google/devtools/build/v1/devtools-build-v1-py/noxfile.py | 1 | 3581 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/devtools/build_v1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| apache-2.0 |
chylli/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/message.py | 261 | 9669 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): We should just make these methods all "pure-virtual" and move
# all implementation out, into reflection.py for now.
"""Contains an abstract base class for protocol messages."""
__author__ = 'robinson@google.com (Will Robinson)'
class Error(Exception): pass
class DecodeError(Error): pass
class EncodeError(Error): pass
class Message(object):
"""Abstract base class for protocol messages.
Protocol message classes are almost always generated by the protocol
compiler. These generated types subclass Message and implement the methods
shown below.
TODO(robinson): Link to an HTML document here.
TODO(robinson): Document that instances of this class will also
have an Extensions attribute with __getitem__ and __setitem__.
Again, not sure how to best convey this.
TODO(robinson): Document that the class must also have a static
RegisterExtension(extension_field) method.
Not sure how to best express at this point.
"""
# TODO(robinson): Document these fields and methods.
__slots__ = []
DESCRIPTOR = None
def __deepcopy__(self, memo=None):
clone = type(self)()
clone.MergeFrom(self)
return clone
def __eq__(self, other_msg):
raise NotImplementedError
def __ne__(self, other_msg):
# Can't just say self != other_msg, since that would infinitely recurse. :)
return not self == other_msg
def __hash__(self):
raise TypeError('unhashable object')
def __str__(self):
raise NotImplementedError
def __unicode__(self):
raise NotImplementedError
def MergeFrom(self, other_msg):
"""Merges the contents of the specified message into current message.
This method merges the contents of the specified message into the current
message. Singular fields that are set in the specified message overwrite
the corresponding fields in the current message. Repeated fields are
appended. Singular sub-messages and groups are recursively merged.
Args:
other_msg: Message to merge into the current message.
"""
raise NotImplementedError
def CopyFrom(self, other_msg):
"""Copies the content of the specified message into the current message.
The method clears the current message and then merges the specified
message using MergeFrom.
Args:
other_msg: Message to copy into the current one.
"""
if self is other_msg:
return
self.Clear()
self.MergeFrom(other_msg)
def Clear(self):
"""Clears all data that was set in the message."""
raise NotImplementedError
def SetInParent(self):
"""Mark this as present in the parent.
This normally happens automatically when you assign a field of a
sub-message, but sometimes you want to make the sub-message
present while keeping it empty. If you find yourself using this,
you may want to reconsider your design."""
raise NotImplementedError
def IsInitialized(self):
"""Checks if the message is initialized.
Returns:
The method returns True if the message is initialized (i.e. all of its
required fields are set).
"""
raise NotImplementedError
# TODO(robinson): MergeFromString() should probably return None and be
# implemented in terms of a helper that returns the # of bytes read. Our
# deserialization routines would use the helper when recursively
# deserializing, but the end user would almost always just want the no-return
# MergeFromString().
def MergeFromString(self, serialized):
"""Merges serialized protocol buffer data into this message.
When we find a field in |serialized| that is already present
in this message:
- If it's a "repeated" field, we append to the end of our list.
- Else, if it's a scalar, we overwrite our field.
- Else, (it's a nonrepeated composite), we recursively merge
into the existing composite.
TODO(robinson): Document handling of unknown fields.
Args:
serialized: Any object that allows us to call buffer(serialized)
to access a string of bytes using the buffer interface.
TODO(robinson): When we switch to a helper, this will return None.
Returns:
The number of bytes read from |serialized|.
For non-group messages, this will always be len(serialized),
but for messages which are actually groups, this will
generally be less than len(serialized), since we must
stop when we reach an END_GROUP tag. Note that if
we *do* stop because of an END_GROUP tag, the number
of bytes returned does not include the bytes
for the END_GROUP tag information.
"""
raise NotImplementedError
def ParseFromString(self, serialized):
"""Like MergeFromString(), except we clear the object first."""
self.Clear()
self.MergeFromString(serialized)
def SerializeToString(self):
"""Serializes the protocol message to a binary string.
Returns:
A binary string representation of the message if all of the required
fields in the message are set (i.e. the message is initialized).
Raises:
message.EncodeError if the message isn't initialized.
"""
raise NotImplementedError
def SerializePartialToString(self):
"""Serializes the protocol message to a binary string.
This method is similar to SerializeToString but doesn't check if the
message is initialized.
Returns:
A string representation of the partial message.
"""
raise NotImplementedError
# TODO(robinson): Decide whether we like these better
# than auto-generated has_foo() and clear_foo() methods
# on the instances themselves. This way is less consistent
# with C++, but it makes reflection-type access easier and
# reduces the number of magically autogenerated things.
#
# TODO(robinson): Be sure to document (and test) exactly
# which field names are accepted here. Are we case-sensitive?
# What do we do with fields that share names with Python keywords
# like 'lambda' and 'yield'?
#
# nnorwitz says:
# """
# Typically (in python), an underscore is appended to names that are
# keywords. So they would become lambda_ or yield_.
# """
def ListFields(self):
"""Returns a list of (FieldDescriptor, value) tuples for all
fields in the message which are not empty. A singular field is non-empty
if HasField() would return true, and a repeated field is non-empty if
it contains at least one element. The fields are ordered by field
number"""
raise NotImplementedError
def HasField(self, field_name):
"""Checks if a certain field is set for the message. Note if the
field_name is not defined in the message descriptor, ValueError will be
raised."""
raise NotImplementedError
def ClearField(self, field_name):
raise NotImplementedError
def HasExtension(self, extension_handle):
raise NotImplementedError
def ClearExtension(self, extension_handle):
raise NotImplementedError
def ByteSize(self):
"""Returns the serialized size of this message.
Recursively calls ByteSize() on all contained messages.
"""
raise NotImplementedError
def _SetListener(self, message_listener):
"""Internal method used by the protocol message implementation.
Clients should not call this directly.
Sets a listener that this message will call on certain state transitions.
The purpose of this method is to register back-edges from children to
parents at runtime, for the purpose of setting "has" bits and
byte-size-dirty bits in the parent and ancestor objects whenever a child or
descendant object is modified.
If the client wants to disconnect this Message from the object tree, she
explicitly sets callback to None.
If message_listener is None, unregisters any existing listener. Otherwise,
message_listener must implement the MessageListener interface in
internal/message_listener.py, and we discard any listener registered
via a previous _SetListener() call.
"""
raise NotImplementedError
| bsd-3-clause |
rob356/SickRage | lib/github/tests/Authentication.py | 39 | 3397 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import github
class Authentication(Framework.BasicTestCase):
def testNoAuthentication(self):
g = github.Github()
self.assertEqual(g.get_user("jacquev6").name, "Vincent Jacques")
def testBasicAuthentication(self):
g = github.Github(self.login, self.password)
self.assertEqual(g.get_user("jacquev6").name, "Vincent Jacques")
def testOAuthAuthentication(self):
g = github.Github(self.oauth_token)
self.assertEqual(g.get_user("jacquev6").name, "Vincent Jacques")
# Warning: I don't have a scret key, so the requests for this test are forged
def testSecretKeyAuthentication(self):
g = github.Github(client_id=self.client_id, client_secret=self.client_secret)
self.assertListKeyEqual(g.get_organization("BeaverSoftware").get_repos("public"), lambda r: r.name, ["FatherBeaver", "PyGithub"])
def testUserAgent(self):
g = github.Github(user_agent="PyGithubTester")
self.assertEqual(g.get_user("jacquev6").name, "Vincent Jacques")
def testAuthorizationHeaderWithLogin(self):
# See special case in Framework.fixAuthorizationHeader
g = github.Github("fake_login", "fake_password")
try:
g.get_user().name
except github.GithubException:
pass
def testAuthorizationHeaderWithToken(self):
# See special case in Framework.fixAuthorizationHeader
g = github.Github("ZmFrZV9sb2dpbjpmYWtlX3Bhc3N3b3Jk")
try:
g.get_user().name
except github.GithubException:
pass
| gpl-3.0 |
CasparLi/calibre | src/calibre/ebooks/mobi/reader/mobi6.py | 11 | 36864 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (absolute_import, print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import shutil, os, re, struct, textwrap, cStringIO
from lxml import html, etree
from calibre import (xml_entity_to_unicode, entity_to_unicode)
from calibre.utils.cleantext import clean_ascii_chars
from calibre.ebooks import DRMError, unit_convert
from calibre.ebooks.chardet import ENCODING_PATS
from calibre.ebooks.mobi import MobiError
from calibre.ebooks.mobi.huffcdic import HuffReader
from calibre.ebooks.compression.palmdoc import decompress_doc
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPFCreator, OPF
from calibre.ebooks.metadata.toc import TOC
from calibre.ebooks.mobi.reader.headers import BookHeader
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.imghdr import what
class TopazError(ValueError):
pass
class MobiReader(object):
PAGE_BREAK_PAT = re.compile(
r'<\s*/{0,1}\s*mbp:pagebreak((?:\s+[^/>]*){0,1})/{0,1}\s*>\s*(?:<\s*/{0,1}\s*mbp:pagebreak\s*/{0,1}\s*>)*',
re.IGNORECASE)
IMAGE_ATTRS = ('lowrecindex', 'recindex', 'hirecindex')
def __init__(self, filename_or_stream, log, user_encoding=None, debug=None,
try_extra_data_fix=False):
self.log = log
self.debug = debug
self.embedded_mi = None
self.base_css_rules = textwrap.dedent('''
body { text-align: justify }
blockquote { margin: 0em 0em 0em 2em; }
p { margin: 0em; text-indent: 1.5em }
.bold { font-weight: bold }
.italic { font-style: italic }
.underline { text-decoration: underline }
.mbp_pagebreak {
page-break-after: always; margin: 0; display: block
}
''')
self.tag_css_rules = {}
self.left_margins = {}
self.text_indents = {}
if hasattr(filename_or_stream, 'read'):
stream = filename_or_stream
stream.seek(0)
else:
stream = open(filename_or_stream, 'rb')
raw = stream.read()
if raw.startswith('TPZ'):
raise TopazError(_('This is an Amazon Topaz book. It cannot be processed.'))
self.header = raw[0:72]
self.name = self.header[:32].replace('\x00', '')
self.num_sections, = struct.unpack('>H', raw[76:78])
self.ident = self.header[0x3C:0x3C + 8].upper()
if self.ident not in ['BOOKMOBI', 'TEXTREAD']:
raise MobiError('Unknown book type: %s' % repr(self.ident))
self.sections = []
self.section_headers = []
for i in range(self.num_sections):
offset, a1, a2, a3, a4 = struct.unpack('>LBBBB', raw[78 + i * 8:78 + i * 8 + 8])
flags, val = a1, a2 << 16 | a3 << 8 | a4
self.section_headers.append((offset, flags, val))
def section(section_number):
if section_number == self.num_sections - 1:
end_off = len(raw)
else:
end_off = self.section_headers[section_number + 1][0]
off = self.section_headers[section_number][0]
return raw[off:end_off]
for i in range(self.num_sections):
self.sections.append((section(i), self.section_headers[i]))
self.book_header = bh = BookHeader(self.sections[0][0], self.ident,
user_encoding, self.log, try_extra_data_fix=try_extra_data_fix)
self.name = self.name.decode(self.book_header.codec, 'replace')
self.kf8_type = None
k8i = getattr(self.book_header.exth, 'kf8_header', None)
# Ancient PRC files from Baen can have random values for
# mobi_version, so be conservative
if (self.book_header.mobi_version == 8 and hasattr(self.book_header,
'skelidx')):
self.kf8_type = 'standalone'
elif k8i is not None: # Check for joint mobi 6 and kf 8 file
try:
raw = self.sections[k8i-1][0]
except:
raw = None
if raw == b'BOUNDARY':
try:
self.book_header = BookHeader(self.sections[k8i][0],
self.ident, user_encoding, self.log)
self.book_header.kf8_first_image_index = self.book_header.first_image_index + k8i
self.book_header.mobi6_records = bh.records
# Need the first_image_index from the mobi 6 header as well
for x in ('first_image_index',):
setattr(self.book_header, x, getattr(bh, x))
# We need to do this because the MOBI 6 text extract code
# does not know anything about the kf8 offset
if hasattr(self.book_header, 'huff_offset'):
self.book_header.huff_offset += k8i
self.kf8_type = 'joint'
self.kf8_boundary = k8i-1
except:
self.book_header = bh
def check_for_drm(self):
if self.book_header.encryption_type != 0:
try:
name = self.book_header.exth.mi.title
except:
name = self.name
if not name:
name = self.name
raise DRMError(name)
def extract_content(self, output_dir, parse_cache):
output_dir = os.path.abspath(output_dir)
self.check_for_drm()
processed_records = self.extract_text()
if self.debug is not None:
parse_cache['calibre_raw_mobi_markup'] = self.mobi_html
self.add_anchors()
self.processed_html = self.processed_html.decode(self.book_header.codec,
'ignore')
self.processed_html = self.processed_html.replace('</</', '</')
self.processed_html = re.sub(r'</([a-zA-Z]+)<', r'</\1><',
self.processed_html)
self.processed_html = self.processed_html.replace(u'\ufeff', '')
# Remove tags of the form <xyz: ...> as they can cause issues further
# along the pipeline
self.processed_html = re.sub(r'</{0,1}[a-zA-Z]+:\s+[^>]*>', '',
self.processed_html)
for pat in ENCODING_PATS:
self.processed_html = pat.sub('', self.processed_html)
self.processed_html = re.sub(r'&(\S+?);', xml_entity_to_unicode,
self.processed_html)
self.extract_images(processed_records, output_dir)
self.replace_page_breaks()
self.cleanup_html()
self.log.debug('Parsing HTML...')
self.processed_html = clean_ascii_chars(self.processed_html)
try:
root = html.fromstring(self.processed_html)
if len(root.xpath('//html')) > 5:
root = html.fromstring(self.processed_html.replace('\x0c',
'').replace('\x14', ''))
except:
self.log.warning('MOBI markup appears to contain random bytes. Stripping.')
self.processed_html = self.remove_random_bytes(self.processed_html)
root = html.fromstring(self.processed_html)
if root.xpath('descendant::p/descendant::p'):
from calibre.utils.soupparser import fromstring
self.log.warning('Malformed markup, parsing using BeautifulSoup')
try:
root = fromstring(self.processed_html)
except Exception:
self.log.warning('MOBI markup appears to contain random bytes. Stripping.')
self.processed_html = self.remove_random_bytes(self.processed_html)
root = fromstring(self.processed_html)
if len(root.xpath('body/descendant::*')) < 1:
# There are probably stray </html>s in the markup
self.processed_html = self.processed_html.replace('</html>',
'')
root = fromstring(self.processed_html)
if root.tag != 'html':
self.log.warn('File does not have opening <html> tag')
nroot = html.fromstring('<html><head></head><body></body></html>')
bod = nroot.find('body')
for child in list(root):
child.getparent().remove(child)
bod.append(child)
root = nroot
htmls = list(root.xpath('//html'))
if len(htmls) > 1:
self.log.warn('Markup contains multiple <html> tags, merging.')
# Merge all <head> and <body> sections
for h in htmls:
p = h.getparent()
if hasattr(p, 'remove'):
p.remove(h)
bodies, heads = root.xpath('//body'), root.xpath('//head')
for x in root:
root.remove(x)
head, body = map(root.makeelement, ('head', 'body'))
for h in heads:
for x in h:
h.remove(x)
head.append(x)
for b in bodies:
for x in b:
b.remove(x)
body.append(x)
root.append(head), root.append(body)
for x in root.xpath('//script'):
x.getparent().remove(x)
head = root.xpath('//head')
if head:
head = head[0]
else:
head = root.makeelement('head', {})
root.insert(0, head)
head.text = '\n\t'
link = head.makeelement('link', {'type':'text/css',
'href':'styles.css', 'rel':'stylesheet'})
head.insert(0, link)
link.tail = '\n\t'
title = head.xpath('descendant::title')
m = head.makeelement('meta', {'http-equiv':'Content-Type',
'content':'text/html; charset=utf-8'})
head.insert(0, m)
if not title:
title = head.makeelement('title', {})
try:
title.text = self.book_header.title
except ValueError:
title.text = clean_ascii_chars(self.book_header.title)
title.tail = '\n\t'
head.insert(0, title)
head.text = '\n\t'
self.upshift_markup(root)
guides = root.xpath('//guide')
guide = guides[0] if guides else None
metadata_elems = root.xpath('//metadata')
if metadata_elems and self.book_header.exth is None:
self.read_embedded_metadata(root, metadata_elems[0], guide)
for elem in guides + metadata_elems:
elem.getparent().remove(elem)
htmlfile = os.path.join(output_dir, 'index.html')
try:
for ref in guide.xpath('descendant::reference'):
if 'href' in ref.attrib:
ref.attrib['href'] = os.path.basename(htmlfile) + ref.attrib['href']
except AttributeError:
pass
parse_cache[htmlfile] = root
self.htmlfile = htmlfile
ncx = cStringIO.StringIO()
opf, ncx_manifest_entry = self.create_opf(htmlfile, guide, root)
self.created_opf_path = os.path.splitext(htmlfile)[0] + '.opf'
opf.render(open(self.created_opf_path, 'wb'), ncx,
ncx_manifest_entry=ncx_manifest_entry)
ncx = ncx.getvalue()
if ncx:
ncx_path = os.path.join(os.path.dirname(htmlfile), 'toc.ncx')
open(ncx_path, 'wb').write(ncx)
with open('styles.css', 'wb') as s:
s.write(self.base_css_rules + '\n\n')
for cls, rule in self.tag_css_rules.items():
if isinstance(rule, unicode):
rule = rule.encode('utf-8')
s.write('.%s { %s }\n\n' % (cls, rule))
if self.book_header.exth is not None or self.embedded_mi is not None:
self.log.debug('Creating OPF...')
ncx = cStringIO.StringIO()
opf, ncx_manifest_entry = self.create_opf(htmlfile, guide, root)
opf.render(open(os.path.splitext(htmlfile)[0] + '.opf', 'wb'), ncx,
ncx_manifest_entry)
ncx = ncx.getvalue()
if ncx:
open(os.path.splitext(htmlfile)[0] + '.ncx', 'wb').write(ncx)
def read_embedded_metadata(self, root, elem, guide):
raw = '<?xml version="1.0" encoding="utf-8" ?>\n<package>' + \
html.tostring(elem, encoding='utf-8') + '</package>'
stream = cStringIO.StringIO(raw)
opf = OPF(stream)
self.embedded_mi = opf.to_book_metadata()
if guide is not None:
for ref in guide.xpath('descendant::reference'):
if 'cover' in ref.get('type', '').lower():
href = ref.get('href', '')
if href.startswith('#'):
href = href[1:]
anchors = root.xpath('//*[@id="%s"]' % href)
if anchors:
cpos = anchors[0]
reached = False
for elem in root.iter():
if elem is cpos:
reached = True
if reached and elem.tag == 'img':
cover = elem.get('src', None)
self.embedded_mi.cover = cover
elem.getparent().remove(elem)
break
break
def cleanup_html(self):
self.log.debug('Cleaning up HTML...')
self.processed_html = re.sub(r'<div height="0(pt|px|ex|em|%){0,1}"></div>', '', self.processed_html)
if self.book_header.ancient and '<html' not in self.mobi_html[:300].lower():
self.processed_html = '<html><p>' + self.processed_html.replace('\n\n', '<p>') + '</html>'
self.processed_html = self.processed_html.replace('\r\n', '\n')
self.processed_html = self.processed_html.replace('> <', '>\n<')
self.processed_html = self.processed_html.replace('<mbp: ', '<mbp:')
self.processed_html = re.sub(r'<\?xml[^>]*>', '', self.processed_html)
self.processed_html = re.sub(r'<\s*(/?)\s*o:p[^>]*>', r'', self.processed_html)
# Swap inline and block level elements, and order block level elements according to priority
# - lxml and beautifulsoup expect/assume a specific order based on xhtml spec
self.processed_html = re.sub(
r'(?i)(?P<styletags>(<(h\d+|i|b|u|em|small|big|strong|tt)>\s*){1,})(?P<para><p[^>]*>)', '\g<para>'+'\g<styletags>', self.processed_html)
self.processed_html = re.sub(
r'(?i)(?P<para></p[^>]*>)\s*(?P<styletags>(</(h\d+|i|b|u|em|small|big|strong|tt)>\s*){1,})', '\g<styletags>'+'\g<para>', self.processed_html)
self.processed_html = re.sub(
r'(?i)(?P<blockquote>(</(blockquote|div)[^>]*>\s*){1,})(?P<para></p[^>]*>)', '\g<para>'+'\g<blockquote>', self.processed_html)
self.processed_html = re.sub(
r'(?i)(?P<para><p[^>]*>)\s*(?P<blockquote>(<(blockquote|div)[^>]*>\s*){1,})', '\g<blockquote>'+'\g<para>', self.processed_html)
bods = htmls = 0
for x in re.finditer(ur'</body>|</html>', self.processed_html):
if x == '</body>':
bods +=1
else:
htmls += 1
if bods > 1 and htmls > 1:
break
if bods > 1:
self.processed_html = self.processed_html.replace('</body>', '')
if htmls > 1:
self.processed_html = self.processed_html.replace('</html>', '')
def remove_random_bytes(self, html):
return re.sub('\x14|\x15|\x19|\x1c|\x1d|\xef|\x12|\x13|\xec|\x08|\x01|\x02|\x03|\x04|\x05|\x06|\x07',
'', html)
def ensure_unit(self, raw, unit='px'):
if re.search(r'\d+$', raw) is not None:
raw += unit
return raw
def upshift_markup(self, root):
self.log.debug('Converting style information to CSS...')
size_map = {
'xx-small': '0.5',
'x-small': '1',
'small': '2',
'medium': '3',
'large': '4',
'x-large': '5',
'xx-large': '6',
}
def barename(x):
return x.rpartition(':')[-1]
mobi_version = self.book_header.mobi_version
for x in root.xpath('//ncx'):
x.getparent().remove(x)
svg_tags = []
forwardable_anchors = []
pagebreak_anchors = []
BLOCK_TAGS = {'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'div', 'p'}
for i, tag in enumerate(root.iter(etree.Element)):
tag.attrib.pop('xmlns', '')
for x in tag.attrib:
if ':' in x:
del tag.attrib[x]
if tag.tag and barename(tag.tag) == 'svg':
svg_tags.append(tag)
if tag.tag and barename(tag.tag.lower()) in \
('country-region', 'place', 'placetype', 'placename',
'state', 'city', 'street', 'address', 'content', 'form'):
tag.tag = 'div' if tag.tag in ('content', 'form') else 'span'
for key in tag.attrib.keys():
tag.attrib.pop(key)
continue
styles, attrib = [], tag.attrib
if 'style' in attrib:
style = attrib.pop('style').strip()
if style:
styles.append(style)
if 'height' in attrib:
height = attrib.pop('height').strip()
if height and '<' not in height and '>' not in height and \
re.search(r'\d+', height):
if tag.tag in ('table', 'td', 'tr'):
pass
elif tag.tag == 'img':
tag.set('height', height)
else:
if tag.tag == 'div' and not tag.text and \
(not tag.tail or not tag.tail.strip()) and \
not len(list(tag.iterdescendants())):
# Paragraph spacer
# Insert nbsp so that the element is never
# discarded by a renderer
tag.text = u'\u00a0' # nbsp
styles.append('height: %s' %
self.ensure_unit(height))
else:
styles.append('margin-top: %s' % self.ensure_unit(height))
if 'width' in attrib:
width = attrib.pop('width').strip()
if width and re.search(r'\d+', width):
if tag.tag in ('table', 'td', 'tr'):
pass
elif tag.tag == 'img':
tag.set('width', width)
else:
ewidth = self.ensure_unit(width)
styles.append('text-indent: %s' % ewidth)
try:
ewidth_val = unit_convert(ewidth, 12, 500, 166)
self.text_indents[tag] = ewidth_val
except:
pass
if width.startswith('-'):
styles.append('margin-left: %s' % self.ensure_unit(width[1:]))
try:
ewidth_val = unit_convert(ewidth[1:], 12, 500, 166)
self.left_margins[tag] = ewidth_val
except:
pass
if 'align' in attrib:
align = attrib.pop('align').strip()
if align:
align = align.lower()
if align == 'baseline':
styles.append('vertical-align: '+align)
else:
styles.append('text-align: %s' % align)
if tag.tag == 'hr':
if mobi_version == 1:
tag.tag = 'div'
styles.append('page-break-before: always')
styles.append('display: block')
styles.append('margin: 0')
elif tag.tag == 'i':
tag.tag = 'span'
tag.attrib['class'] = 'italic'
elif tag.tag == 'u':
tag.tag = 'span'
tag.attrib['class'] = 'underline'
elif tag.tag == 'b':
tag.tag = 'span'
tag.attrib['class'] = 'bold'
elif tag.tag == 'font':
sz = tag.get('size', '').lower()
try:
float(sz)
except ValueError:
if sz in size_map.keys():
attrib['size'] = size_map[sz]
elif tag.tag == 'img':
recindex = None
for attr in self.IMAGE_ATTRS:
recindex = attrib.pop(attr, None) or recindex
if recindex is not None:
try:
recindex = '%05d'%int(recindex)
except:
pass
attrib['src'] = 'images/%s.jpg' % recindex
for attr in ('width', 'height'):
if attr in attrib:
val = attrib[attr]
if val.lower().endswith('em'):
try:
nval = float(val[:-2])
nval *= 16 * (168.451/72) # Assume this was set using the Kindle profile
attrib[attr] = "%dpx"%int(nval)
except:
del attrib[attr]
elif val.lower().endswith('%'):
del attrib[attr]
elif tag.tag == 'pre':
if not tag.text:
tag.tag = 'div'
if (attrib.get('class', None) == 'mbp_pagebreak' and tag.tag ==
'div' and 'filepos-id' in attrib):
pagebreak_anchors.append(tag)
if 'color' in attrib:
styles.append('color: ' + attrib.pop('color'))
if 'bgcolor' in attrib:
styles.append('background-color: ' + attrib.pop('bgcolor'))
if 'filepos-id' in attrib:
attrib['id'] = attrib.pop('filepos-id')
if 'name' in attrib and attrib['name'] != attrib['id']:
attrib['name'] = attrib['id']
if 'filepos' in attrib:
filepos = attrib.pop('filepos')
try:
attrib['href'] = "#filepos%d" % int(filepos)
except ValueError:
pass
if (tag.tag == 'a' and attrib.get('id', '').startswith('filepos') and
not tag.text and len(tag) == 0 and (tag.tail is None or not
tag.tail.strip()) and getattr(tag.getnext(), 'tag',
None) in BLOCK_TAGS):
# This is an empty anchor immediately before a block tag, move
# the id onto the block tag instead
forwardable_anchors.append(tag)
if styles:
ncls = None
rule = '; '.join(styles)
for sel, srule in self.tag_css_rules.items():
if srule == rule:
ncls = sel
break
if ncls is None:
ncls = 'calibre_%d' % i
self.tag_css_rules[ncls] = rule
cls = attrib.get('class', '')
cls = cls + (' ' if cls else '') + ncls
attrib['class'] = cls
for tag in svg_tags:
images = tag.xpath('descendant::img[@src]')
parent = tag.getparent()
if images and hasattr(parent, 'find'):
index = parent.index(tag)
for img in images:
img.getparent().remove(img)
img.tail = img.text = None
parent.insert(index, img)
if hasattr(parent, 'remove'):
parent.remove(tag)
for tag in pagebreak_anchors:
anchor = tag.attrib['id']
del tag.attrib['id']
if 'name' in tag.attrib:
del tag.attrib['name']
p = tag.getparent()
a = p.makeelement('a')
a.attrib['id'] = anchor
p.insert(p.index(tag)+1, a)
if getattr(a.getnext(), 'tag', None) in BLOCK_TAGS:
forwardable_anchors.append(a)
for tag in forwardable_anchors:
block = tag.getnext()
tag.getparent().remove(tag)
if 'id' in block.attrib:
tag.tail = block.text
block.text = None
block.insert(0, tag)
else:
block.attrib['id'] = tag.attrib['id']
# WebKit fails to navigate to anchors located on <br> tags
for br in root.xpath('/body/br[@id]'):
br.tag = 'div'
def get_left_whitespace(self, tag):
def whitespace(tag):
lm = ti = 0.0
if tag.tag == 'p':
ti = unit_convert('1.5em', 12, 500, 166)
if tag.tag == 'blockquote':
lm = unit_convert('2em', 12, 500, 166)
lm = self.left_margins.get(tag, lm)
ti = self.text_indents.get(tag, ti)
try:
lm = float(lm)
except:
lm = 0.0
try:
ti = float(ti)
except:
ti = 0.0
return lm + ti
parent = tag
ans = 0.0
while parent is not None:
ans += whitespace(parent)
parent = parent.getparent()
return ans
def create_opf(self, htmlfile, guide=None, root=None):
mi = getattr(self.book_header.exth, 'mi', self.embedded_mi)
if mi is None:
mi = MetaInformation(self.book_header.title, [_('Unknown')])
opf = OPFCreator(os.path.dirname(htmlfile), mi)
if hasattr(self.book_header.exth, 'cover_offset'):
opf.cover = u'images/%05d.jpg' % (self.book_header.exth.cover_offset + 1)
elif mi.cover is not None:
opf.cover = mi.cover
else:
opf.cover = u'images/%05d.jpg' % 1
if not os.path.exists(os.path.join(os.path.dirname(htmlfile),
* opf.cover.split('/'))):
opf.cover = None
cover = opf.cover
cover_copied = None
if cover is not None:
cover = cover.replace('/', os.sep)
if os.path.exists(cover):
ncover = u'images'+os.sep+u'calibre_cover.jpg'
if os.path.exists(ncover):
os.remove(ncover)
shutil.copyfile(cover, ncover)
cover_copied = os.path.abspath(ncover)
opf.cover = ncover.replace(os.sep, '/')
manifest = [(htmlfile, 'application/xhtml+xml'),
(os.path.abspath(u'styles.css'), 'text/css')]
bp = os.path.dirname(htmlfile)
added = set([])
for i in getattr(self, 'image_names', []):
path = os.path.join(bp, 'images', i)
added.add(path)
manifest.append((path, 'image/jpeg'))
if cover_copied is not None:
manifest.append((cover_copied, 'image/jpeg'))
opf.create_manifest(manifest)
opf.create_spine([os.path.basename(htmlfile)])
toc = None
if guide is not None:
opf.create_guide(guide)
for ref in opf.guide:
if ref.type.lower() == 'toc':
toc = ref.href()
ncx_manifest_entry = None
if toc:
ncx_manifest_entry = 'toc.ncx'
elems = root.xpath('//*[@id="%s"]' % toc.partition('#')[-1])
tocobj = None
ent_pat = re.compile(r'&(\S+?);')
if elems:
tocobj = TOC()
found = False
reached = False
for x in root.iter():
if x == elems[-1]:
reached = True
continue
if reached and x.tag == 'a':
href = x.get('href', '')
if href and re.match('\w+://', href) is None:
try:
text = u' '.join([t.strip() for t in
x.xpath('descendant::text()')])
except:
text = ''
text = ent_pat.sub(entity_to_unicode, text)
item = tocobj.add_item(toc.partition('#')[0], href[1:],
text)
item.left_space = int(self.get_left_whitespace(x))
found = True
if reached and found and x.get('class', None) == 'mbp_pagebreak':
break
if tocobj is not None:
tocobj = self.structure_toc(tocobj)
opf.set_toc(tocobj)
return opf, ncx_manifest_entry
def structure_toc(self, toc):
indent_vals = set()
for item in toc:
indent_vals.add(item.left_space)
if len(indent_vals) > 6 or len(indent_vals) < 2:
# Too many or too few levels, give up
return toc
indent_vals = sorted(indent_vals)
last_found = [None for i in indent_vals]
newtoc = TOC()
def find_parent(level):
candidates = last_found[:level]
for x in reversed(candidates):
if x is not None:
return x
return newtoc
for item in toc:
level = indent_vals.index(item.left_space)
parent = find_parent(level)
last_found[level] = parent.add_item(item.href, item.fragment,
item.text)
return newtoc
def sizeof_trailing_entries(self, data):
def sizeof_trailing_entry(ptr, psize):
bitpos, result = 0, 0
while True:
v = ord(ptr[psize-1])
result |= (v & 0x7F) << bitpos
bitpos += 7
psize -= 1
if (v & 0x80) != 0 or (bitpos >= 28) or (psize == 0):
return result
num = 0
size = len(data)
flags = self.book_header.extra_flags >> 1
while flags:
if flags & 1:
num += sizeof_trailing_entry(data, size - num)
flags >>= 1
if self.book_header.extra_flags & 1:
num += (ord(data[size - num - 1]) & 0x3) + 1
return num
def text_section(self, index):
data = self.sections[index][0]
trail_size = self.sizeof_trailing_entries(data)
return data[:len(data)-trail_size]
def extract_text(self, offset=1):
self.log.debug('Extracting text...')
text_sections = [self.text_section(i) for i in xrange(offset,
min(self.book_header.records + offset, len(self.sections)))]
processed_records = list(range(offset-1, self.book_header.records +
offset))
self.mobi_html = b''
if self.book_header.compression_type == 'DH':
huffs = [self.sections[i][0] for i in
xrange(self.book_header.huff_offset,
self.book_header.huff_offset + self.book_header.huff_number)]
processed_records += list(xrange(self.book_header.huff_offset,
self.book_header.huff_offset + self.book_header.huff_number))
huff = HuffReader(huffs)
unpack = huff.unpack
elif self.book_header.compression_type == '\x00\x02':
unpack = decompress_doc
elif self.book_header.compression_type == '\x00\x01':
unpack = lambda x: x
else:
raise MobiError('Unknown compression algorithm: %s' % repr(self.book_header.compression_type))
self.mobi_html = b''.join(map(unpack, text_sections))
if self.mobi_html.endswith(b'#'):
self.mobi_html = self.mobi_html[:-1]
if self.book_header.ancient and '<html' not in self.mobi_html[:300].lower():
self.mobi_html = self.mobi_html.replace('\r ', '\n\n ')
self.mobi_html = self.mobi_html.replace('\0', '')
if self.book_header.codec == 'cp1252':
self.mobi_html = self.mobi_html.replace('\x1e', '') # record separator
self.mobi_html = self.mobi_html.replace('\x02', '') # start of text
return processed_records
def replace_page_breaks(self):
self.processed_html = self.PAGE_BREAK_PAT.sub(
r'<div \1 class="mbp_pagebreak" />',
self.processed_html)
def add_anchors(self):
self.log.debug('Adding anchors...')
positions = set([])
link_pattern = re.compile(r'''<[^<>]+filepos=['"]{0,1}(\d+)[^<>]*>''',
re.IGNORECASE)
for match in link_pattern.finditer(self.mobi_html):
positions.add(int(match.group(1)))
pos = 0
processed_html = cStringIO.StringIO()
end_tag_re = re.compile(r'<\s*/')
for end in sorted(positions):
if end == 0:
continue
oend = end
l = self.mobi_html.find('<', end)
r = self.mobi_html.find('>', end)
anchor = '<a id="filepos%d"></a>'
if r > -1 and (r < l or l == end or l == -1):
p = self.mobi_html.rfind('<', 0, end + 1)
if pos < end and p > -1 and \
not end_tag_re.match(self.mobi_html[p:r]) and \
not self.mobi_html[p:r + 1].endswith('/>'):
anchor = ' filepos-id="filepos%d"'
end = r
else:
end = r + 1
processed_html.write(self.mobi_html[pos:end] + (anchor % oend))
pos = end
processed_html.write(self.mobi_html[pos:])
processed_html = processed_html.getvalue()
# Remove anchors placed inside entities
self.processed_html = re.sub(r'&([^;]*?)(<a id="filepos\d+"></a>)([^;]*);',
r'&\1\3;\2', processed_html)
def extract_images(self, processed_records, output_dir):
self.log.debug('Extracting images...')
output_dir = os.path.abspath(os.path.join(output_dir, 'images'))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_index = 0
self.image_names = []
start = getattr(self.book_header, 'first_image_index', -1)
if start > self.num_sections or start < 0:
# BAEN PRC files have bad headers
start = 0
for i in range(start, self.num_sections):
if i in processed_records:
continue
processed_records.append(i)
data = self.sections[i][0]
image_index += 1
if data[:4] in {b'FLIS', b'FCIS', b'SRCS', b'\xe9\x8e\r\n',
b'RESC', b'BOUN', b'FDST', b'DATP', b'AUDI', b'VIDE'}:
# This record is a known non image type, not need to try to
# load the image
continue
path = os.path.join(output_dir, '%05d.jpg' % image_index)
try:
if what(None, data) not in {'jpg', 'jpeg', 'gif', 'png', 'bmp', 'webp'}:
continue
save_cover_data_to(data, path, minify_to=(10000, 10000))
except Exception:
continue
self.image_names.append(os.path.basename(path))
def test_mbp_regex():
for raw, m in {
'<mbp:pagebreak></mbp:pagebreak>':'',
'<mbp:pagebreak xxx></mbp:pagebreak>yyy':' xxxyyy',
'<mbp:pagebreak> </mbp:pagebreak>':'',
'<mbp:pagebreak>xxx':'xxx',
'<mbp:pagebreak/>xxx':'xxx',
'<mbp:pagebreak sdf/ >xxx':' sdfxxx',
'<mbp:pagebreak / >':' ',
'</mbp:pagebreak>':'',
'</mbp:pagebreak sdf>':' sdf',
'</mbp:pagebreak><mbp:pagebreak></mbp:pagebreak>xxx':'xxx',
}.iteritems():
ans = MobiReader.PAGE_BREAK_PAT.sub(r'\1', raw)
if ans != m:
raise Exception('%r != %r for %r'%(ans, m, raw))
| gpl-3.0 |
cesarmarinhorj/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py | 121 | 3174 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
import unittest2 as unittest
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.host_mock import MockHost
from webkitpy.port import test
from webkitpy.layout_tests.servers.apache_http_server import LayoutTestApacheHttpd
from webkitpy.layout_tests.servers.http_server_base import ServerError
class TestLayoutTestApacheHttpd(unittest.TestCase):
def test_start_cmd(self):
# Fails on win - see https://bugs.webkit.org/show_bug.cgi?id=84726
if sys.platform in ('cygwin', 'win32'):
return
def fake_pid(_):
host.filesystem.write_text_file('/tmp/WebKit/httpd.pid', '42')
return True
host = MockHost()
host.executive = MockExecutive(should_log=True)
test_port = test.TestPort(host)
host.filesystem.write_text_file(test_port._path_to_apache_config_file(), '')
server = LayoutTestApacheHttpd(test_port, "/mock/output_dir", number_of_servers=4)
server._check_that_all_ports_are_available = lambda: True
server._is_server_running_on_all_ports = lambda: True
server._wait_for_action = fake_pid
oc = OutputCapture()
try:
oc.capture_output()
server.start()
server.stop()
finally:
_, _, logs = oc.restore_output()
self.assertIn("StartServers 4", logs)
self.assertIn("MinSpareServers 4", logs)
self.assertIn("MaxSpareServers 4", logs)
self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
| bsd-3-clause |
hsiaoyi0504/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
benthomasson/ansible | lib/ansible/module_utils/ipa.py | 97 | 6567 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.urls import fetch_url
class IPAClient(object):
def __init__(self, module, host, port, protocol):
self.host = host
self.port = port
self.protocol = protocol
self.module = module
self.headers = None
def get_base_url(self):
return '%s://%s/ipa' % (self.protocol, self.host)
def get_json_url(self):
return '%s/session/json' % self.get_base_url()
def login(self, username, password):
url = '%s/session/login_password' % self.get_base_url()
data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
headers = {'referer': self.get_base_url(),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail('login', info['msg'])
self.headers = {'referer': self.get_base_url(),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Cookie': resp.info().get('Set-Cookie')}
except Exception:
e = get_exception()
self._fail('login', str(e))
def _fail(self, msg, e):
if 'message' in e:
err_string = e.get('message')
else:
err_string = e
self.module.fail_json(msg='%s: %s' % (msg, err_string))
def _post_json(self, method, name, item=None):
if item is None:
item = {}
url = '%s/session/json' % self.get_base_url()
data = {'method': method, 'params': [[name], item]}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), headers=self.headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail(method, info['msg'])
except Exception:
e = get_exception()
self._fail('post %s' % method, str(e))
if PY3:
charset = resp.headers.get_content_charset('latin-1')
else:
response_charset = resp.headers.getparam('charset')
if response_charset:
charset = response_charset
else:
charset = 'latin-1'
resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset)
err = resp.get('error')
if err is not None:
self._fail('repsonse %s' % method, err)
if 'result' in resp:
result = resp.get('result')
if 'result' in result:
result = result.get('result')
if isinstance(result, list):
if len(result) > 0:
return result[0]
else:
return {}
return result
return None
def get_diff(self, ipa_data, module_data):
result = []
for key in module_data.keys():
mod_value = module_data.get(key, None)
if isinstance(mod_value, list):
default = []
else:
default = None
ipa_value = ipa_data.get(key, default)
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
mod_value = [mod_value]
if isinstance(ipa_value, list) and isinstance(mod_value, list):
mod_value = sorted(mod_value)
ipa_value = sorted(ipa_value)
if mod_value != ipa_value:
result.append(key)
return result
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
changed = False
diff = list(set(ipa_list) - set(module_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
remove_method(name=name, item={item: diff})
else:
remove_method(name=name, item=diff)
diff = list(set(module_list) - set(ipa_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
add_method(name=name, item={item: diff})
else:
add_method(name=name, item=diff)
return changed
| gpl-3.0 |
bop/bauhaus | lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/coordseq.py | 624 | 3112 | from ctypes import c_double, c_int, c_uint, POINTER
from django.contrib.gis.geos.libgeos import GEOM_PTR, CS_PTR
from django.contrib.gis.geos.prototypes.errcheck import last_arg_byref, GEOSException
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Error-checking routines specific to coordinate sequences. ##
def check_cs_ptr(result, func, cargs):
"Error checking on routines that return Geometries."
if not result:
raise GEOSException('Error encountered checking Coordinate Sequence returned from GEOS C function "%s".' % func.__name__)
return result
def check_cs_op(result, func, cargs):
"Checks the status code of a coordinate sequence operation."
if result == 0:
raise GEOSException('Could not set value on coordinate sequence')
else:
return result
def check_cs_get(result, func, cargs):
"Checking the coordinate sequence retrieval."
check_cs_op(result, func, cargs)
# Object in by reference, return its value.
return last_arg_byref(cargs)
## Coordinate sequence prototype generation functions. ##
def cs_int(func):
"For coordinate sequence routines that return an integer."
func.argtypes = [CS_PTR, POINTER(c_uint)]
func.restype = c_int
func.errcheck = check_cs_get
return func
def cs_operation(func, ordinate=False, get=False):
"For coordinate sequence operations."
if get:
# Get routines get double parameter passed-in by reference.
func.errcheck = check_cs_get
dbl_param = POINTER(c_double)
else:
func.errcheck = check_cs_op
dbl_param = c_double
if ordinate:
# Get/Set ordinate routines have an extra uint parameter.
func.argtypes = [CS_PTR, c_uint, c_uint, dbl_param]
else:
func.argtypes = [CS_PTR, c_uint, dbl_param]
func.restype = c_int
return func
def cs_output(func, argtypes):
"For routines that return a coordinate sequence."
func.argtypes = argtypes
func.restype = CS_PTR
func.errcheck = check_cs_ptr
return func
## Coordinate Sequence ctypes prototypes ##
# Coordinate Sequence constructors & cloning.
cs_clone = cs_output(GEOSFunc('GEOSCoordSeq_clone'), [CS_PTR])
create_cs = cs_output(GEOSFunc('GEOSCoordSeq_create'), [c_uint, c_uint])
get_cs = cs_output(GEOSFunc('GEOSGeom_getCoordSeq'), [GEOM_PTR])
# Getting, setting ordinate
cs_getordinate = cs_operation(GEOSFunc('GEOSCoordSeq_getOrdinate'), ordinate=True, get=True)
cs_setordinate = cs_operation(GEOSFunc('GEOSCoordSeq_setOrdinate'), ordinate=True)
# For getting, x, y, z
cs_getx = cs_operation(GEOSFunc('GEOSCoordSeq_getX'), get=True)
cs_gety = cs_operation(GEOSFunc('GEOSCoordSeq_getY'), get=True)
cs_getz = cs_operation(GEOSFunc('GEOSCoordSeq_getZ'), get=True)
# For setting, x, y, z
cs_setx = cs_operation(GEOSFunc('GEOSCoordSeq_setX'))
cs_sety = cs_operation(GEOSFunc('GEOSCoordSeq_setY'))
cs_setz = cs_operation(GEOSFunc('GEOSCoordSeq_setZ'))
# These routines return size & dimensions.
cs_getsize = cs_int(GEOSFunc('GEOSCoordSeq_getSize'))
cs_getdims = cs_int(GEOSFunc('GEOSCoordSeq_getDimensions'))
| mit |
liamgh/liamgreenhughes-sl4a-tf101 | python-build/python-libs/gdata/src/gdata/tlslite/integration/AsyncStateMachine.py | 238 | 7198 | """
A state machine for using TLS Lite with asynchronous I/O.
"""
class AsyncStateMachine:
"""
This is an abstract class that's used to integrate TLS Lite with
asyncore and Twisted.
This class signals wantsReadsEvent() and wantsWriteEvent(). When
the underlying socket has become readable or writeable, the event
should be passed to this class by calling inReadEvent() or
inWriteEvent(). This class will then try to read or write through
the socket, and will update its state appropriately.
This class will forward higher-level events to its subclass. For
example, when a complete TLS record has been received,
outReadEvent() will be called with the decrypted data.
"""
def __init__(self):
self._clear()
def _clear(self):
#These store the various asynchronous operations (i.e.
#generators). Only one of them, at most, is ever active at a
#time.
self.handshaker = None
self.closer = None
self.reader = None
self.writer = None
#This stores the result from the last call to the
#currently active operation. If 0 it indicates that the
#operation wants to read, if 1 it indicates that the
#operation wants to write. If None, there is no active
#operation.
self.result = None
def _checkAssert(self, maxActive=1):
#This checks that only one operation, at most, is
#active, and that self.result is set appropriately.
activeOps = 0
if self.handshaker:
activeOps += 1
if self.closer:
activeOps += 1
if self.reader:
activeOps += 1
if self.writer:
activeOps += 1
if self.result == None:
if activeOps != 0:
raise AssertionError()
elif self.result in (0,1):
if activeOps != 1:
raise AssertionError()
else:
raise AssertionError()
if activeOps > maxActive:
raise AssertionError()
def wantsReadEvent(self):
"""If the state machine wants to read.
If an operation is active, this returns whether or not the
operation wants to read from the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to read.
"""
if self.result != None:
return self.result == 0
return None
def wantsWriteEvent(self):
"""If the state machine wants to write.
If an operation is active, this returns whether or not the
operation wants to write to the socket. If an operation is
not active, this returns None.
@rtype: bool or None
@return: If the state machine wants to write.
"""
if self.result != None:
return self.result == 1
return None
def outConnectEvent(self):
"""Called when a handshake operation completes.
May be overridden in subclass.
"""
pass
def outCloseEvent(self):
"""Called when a close operation completes.
May be overridden in subclass.
"""
pass
def outReadEvent(self, readBuffer):
"""Called when a read operation completes.
May be overridden in subclass."""
pass
def outWriteEvent(self):
"""Called when a write operation completes.
May be overridden in subclass."""
pass
def inReadEvent(self):
"""Tell the state machine it can read from the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.reader = self.tlsConnection.readAsync(16384)
self._doReadOp()
except:
self._clear()
raise
def inWriteEvent(self):
"""Tell the state machine it can write to the socket."""
try:
self._checkAssert()
if self.handshaker:
self._doHandshakeOp()
elif self.closer:
self._doCloseOp()
elif self.reader:
self._doReadOp()
elif self.writer:
self._doWriteOp()
else:
self.outWriteEvent()
except:
self._clear()
raise
def _doHandshakeOp(self):
try:
self.result = self.handshaker.next()
except StopIteration:
self.handshaker = None
self.result = None
self.outConnectEvent()
def _doCloseOp(self):
try:
self.result = self.closer.next()
except StopIteration:
self.closer = None
self.result = None
self.outCloseEvent()
def _doReadOp(self):
self.result = self.reader.next()
if not self.result in (0,1):
readBuffer = self.result
self.reader = None
self.result = None
self.outReadEvent(readBuffer)
def _doWriteOp(self):
try:
self.result = self.writer.next()
except StopIteration:
self.writer = None
self.result = None
def setHandshakeOp(self, handshaker):
"""Start a handshake operation.
@type handshaker: generator
@param handshaker: A generator created by using one of the
asynchronous handshake functions (i.e. handshakeServerAsync, or
handshakeClientxxx(..., async=True).
"""
try:
self._checkAssert(0)
self.handshaker = handshaker
self._doHandshakeOp()
except:
self._clear()
raise
def setServerHandshakeOp(self, **args):
"""Start a handshake operation.
The arguments passed to this function will be forwarded to
L{tlslite.TLSConnection.TLSConnection.handshakeServerAsync}.
"""
handshaker = self.tlsConnection.handshakeServerAsync(**args)
self.setHandshakeOp(handshaker)
def setCloseOp(self):
"""Start a close operation.
"""
try:
self._checkAssert(0)
self.closer = self.tlsConnection.closeAsync()
self._doCloseOp()
except:
self._clear()
raise
def setWriteOp(self, writeBuffer):
"""Start a write operation.
@type writeBuffer: str
@param writeBuffer: The string to transmit.
"""
try:
self._checkAssert(0)
self.writer = self.tlsConnection.writeAsync(writeBuffer)
self._doWriteOp()
except:
self._clear()
raise
| apache-2.0 |
tchellomello/home-assistant | tests/components/http/test_view.py | 13 | 2094 | """Tests for Home Assistant View."""
from aiohttp.web_exceptions import (
HTTPBadRequest,
HTTPInternalServerError,
HTTPUnauthorized,
)
import pytest
import voluptuous as vol
from homeassistant.components.http.view import (
HomeAssistantView,
request_handler_factory,
)
from homeassistant.exceptions import ServiceNotFound, Unauthorized
from tests.async_mock import AsyncMock, Mock
@pytest.fixture
def mock_request():
"""Mock a request."""
return Mock(app={"hass": Mock(is_stopping=False)}, match_info={})
@pytest.fixture
def mock_request_with_stopping():
"""Mock a request."""
return Mock(app={"hass": Mock(is_stopping=True)}, match_info={})
async def test_invalid_json(caplog):
"""Test trying to return invalid JSON."""
view = HomeAssistantView()
with pytest.raises(HTTPInternalServerError):
view.json(float("NaN"))
assert str(float("NaN")) in caplog.text
async def test_handling_unauthorized(mock_request):
"""Test handling unauth exceptions."""
with pytest.raises(HTTPUnauthorized):
await request_handler_factory(
Mock(requires_auth=False), AsyncMock(side_effect=Unauthorized)
)(mock_request)
async def test_handling_invalid_data(mock_request):
"""Test handling unauth exceptions."""
with pytest.raises(HTTPBadRequest):
await request_handler_factory(
Mock(requires_auth=False), AsyncMock(side_effect=vol.Invalid("yo"))
)(mock_request)
async def test_handling_service_not_found(mock_request):
"""Test handling unauth exceptions."""
with pytest.raises(HTTPInternalServerError):
await request_handler_factory(
Mock(requires_auth=False),
AsyncMock(side_effect=ServiceNotFound("test", "test")),
)(mock_request)
async def test_not_running(mock_request_with_stopping):
"""Test we get a 503 when not running."""
response = await request_handler_factory(
Mock(requires_auth=False), AsyncMock(side_effect=Unauthorized)
)(mock_request_with_stopping)
assert response.status == 503
| apache-2.0 |
rofehr/linux-wetek | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
mrquim/mrquimrepo | repo/plugin.video.salts/scrapers/scraper.py | 4 | 29936 | """
Stream All The Sources Addon
Copyright (C) 2014 k3l3vra
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import abc
import cookielib
import datetime
import os
import re
import urllib
import urllib2
import urlparse
from salts_lib import cloudflare
from salts_lib import cf_captcha
import kodi
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import Q_ORDER
from salts_lib.constants import SHORT_MONS
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import DEFAULT_TIMEOUT
from salts_lib.db_utils import DB_Connection
from salts_lib.utils2 import i18n, ungz
try:
import urlresolver
except:
kodi.notify(msg=i18n('smu_failed'), duration=5000)
logger = log_utils.Logger.get_logger()
BASE_URL = ''
COOKIEPATH = kodi.translate_path(kodi.get_profile())
MONTHS = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
MAX_RESPONSE = 1024 * 1024 * 5
CF_CAPCHA_ENABLED = kodi.get_setting('cf_captcha') == 'true'
class ScrapeError(Exception):
pass
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response): # @UnusedVariable
logger.log('Stopping Redirect', log_utils.LOGDEBUG)
return response
https_response = http_response
abstractstaticmethod = abc.abstractmethod
class abstractclassmethod(classmethod):
__isabstractmethod__ = True
def __init__(self, callable):
callable.__isabstractmethod__ = True
super(abstractclassmethod, self).__init__(callable)
class Scraper(object):
__metaclass__ = abc.ABCMeta
base_url = BASE_URL
__db_connection = None
worker_id = None
debrid_resolvers = None
row_pattern = '\s*<a\s+href="(?P<link>[^"]+)">(?P<title>[^<]+)</a>\s+(?P<date>\d+-[a-zA-Z]+-\d+ \d+:\d+)\s+(?P<size>-|\d+)'
def __init__(self, timeout=DEFAULT_TIMEOUT):
pass
@abstractclassmethod
def provides(cls):
"""
Must return a list/set/frozenset of VIDEO_TYPES that are supported by this scraper. Is a class method so that instances of the class
don't have to be instantiated to determine they are not useful
* Datatypes set or frozenset are preferred as existence checking is faster with sets
"""
raise NotImplementedError
@abstractclassmethod
def get_name(cls):
"""
Must return a string that is a name that will be used through out the UI and DB to refer to urls from this source
Should be descriptive enough to be recognized but short enough to be presented in the UI
"""
raise NotImplementedError
def resolve_link(self, link):
"""
Must return a string that is a urlresolver resolvable link given a link that this scraper supports
link: a url fragment associated with this site that can be resolved to a hoster link
* The purpose is many streaming sites provide the actual hoster link in a separate page from link
on the video page.
* This method is called for the user selected source before calling urlresolver on it.
"""
if link.startswith('//'):
return 'http:' + link
elif not link.startswith('http'):
return scraper_utils.urljoin(self.base_url, link)
else:
return link
def format_source_label(self, item):
"""
Must return a string that is to be the label to be used for this source in the "Choose Source" dialog
item: one element of the list that is returned from get_sources for this scraper
"""
label = '[%s]' % (item['quality'])
if '3D' in item and item['3D']:
label += ' (3D)'
if 'format' in item:
label += ' (%s)' % (item['format'])
if 'version' in item:
label += ' %s' % (item['version'])
label += ' %s' % (item['host'])
if 'views' in item and item['views'] is not None:
label += ' (%s views)' % (item['views'])
if 'rating' in item and item['rating'] is not None:
label += ' (%s/100)' % (item['rating'])
if 'size' in item:
label += ' (%s)' % (item['size'])
if 'subs' in item and item['subs']:
label += ' (%s)' % (item['subs'])
if 'extra' in item:
label += ' [%s]' % (item['extra'])
return label
@abc.abstractmethod
def get_sources(self, video):
"""
Must return a list of dictionaries that are potential link to hoster sites (or links to links to hoster sites)
Each dictionary must contain elements of at least:
* multi-part: True if this source is one part of a whole
* class: a reference to an instance of the scraper itself
* host: the hostname of the hoster
* url: the url that is a link to a hoster, or a link to a page that this scraper can resolve to a link to a hoster
* quality: one of the QUALITIES values, or None if unknown; users can sort sources by quality
* views: count of the views from the site for this source or None is unknown; Users can sort sources by views
* rating: a value between 0 and 100; 0 being worst, 100 the best, or None if unknown. Users can sort sources by rating.
* direct: True if url is a direct link to a media file; False if not. If not present; assumption is direct
* other keys are allowed as needed if they would be useful (e.g. for format_source_label)
video is an object of type ScraperVideo:
video_type: one of VIDEO_TYPES for whatever the sources should be for
title: the title of the tv show or movie
year: the year of the tv show or movie
season: only present for tv shows; the season number of the video for which sources are requested
episode: only present for tv shows; the episode number of the video for which sources are requested
ep_title: only present for tv shows; the episode title if available
"""
raise NotImplementedError
def get_url(self, video):
"""
Must return a url for the site this scraper is associated with that is related to this video.
video is an object of type ScraperVideo:
video_type: one of VIDEO_TYPES this url is for (e.g. EPISODE urls might be different than TVSHOW urls)
title: the title of the tv show or movie
year: the year of the tv show or movie
season: only present for season or episode VIDEO_TYPES; the season number for the url being requested
episode: only present for season or episode VIDEO_TYPES; the episode number for the url being requested
ep_title: only present for tv shows; the episode title if available
* Generally speaking, domain should not be included
"""
return self._default_get_url(video)
@abc.abstractmethod
def search(self, video_type, title, year, season=''):
"""
Must return a list of results returned from the site associated with this scraper when doing a search using the input parameters
If it does return results, it must be a list of dictionaries. Each dictionary must contain at least the following:
* title: title of the result
* year: year of the result
* url: a url fragment that is the url on the site associated with this scraper for this season result item
video_type: one of the VIDEO_TYPES being searched for. Only tvshows and movies are expected generally
title: the title being search for
year: the year being search for
season: the season being searched for (only required if video_type == VIDEO_TYPES.SEASON)
* Method must be provided, but can raise NotImplementedError if search not available on the site
"""
raise NotImplementedError
@classmethod
def get_settings(cls):
"""
Returns a list of settings to be used for this scraper. Settings are automatically checked for updates every time scrapers are imported
The list returned by each scraper is aggregated into a big settings.xml string, and then if it differs from the current settings xml in the Scrapers category
the existing settings.xml fragment is removed and replaced by the new string
"""
name = cls.get_name()
return [
' <setting id="%s-enable" type="bool" label="%s %s" default="true" visible="true"/>' % (name, name, i18n('enabled')),
' <setting id="%s-base_url" type="text" label=" %s" default="%s" visible="eq(-1,true)"/>' % (name, i18n('base_url'), cls.base_url),
' <setting id="%s-sub_check" type="bool" label=" %s" default="true" visible="eq(-2,true)"/>' % (name, i18n('page_existence')),
]
@classmethod
def has_proxy(cls):
return False
def _default_get_url(self, video):
url = None
temp_video_type = video.video_type
if video.video_type == VIDEO_TYPES.EPISODE:
if VIDEO_TYPES.TVSHOW in self.provides():
temp_video_type = VIDEO_TYPES.TVSHOW
elif VIDEO_TYPES.SEASON in self.provides():
temp_video_type = VIDEO_TYPES.SEASON
season = video.season if temp_video_type == VIDEO_TYPES.SEASON else ''
if temp_video_type != VIDEO_TYPES.EPISODE:
result = self.db_connection().get_related_url(temp_video_type, video.title, video.year, self.get_name(), season)
if result:
url = result[0][0]
logger.log('Got local related url: |%s|%s|%s|%s|%s|%s|' % (temp_video_type, video.title, video.year, season, self.get_name(), url), log_utils.LOGDEBUG)
else:
results = self.search(temp_video_type, video.title, video.year, season)
if results:
url = results[0]['url']
self.db_connection().set_related_url(temp_video_type, video.title, video.year, self.get_name(), url, season)
if isinstance(url, unicode): url = url.encode('utf-8')
if video.video_type == VIDEO_TYPES.EPISODE:
if url == FORCE_NO_MATCH:
url = None
elif url or temp_video_type == VIDEO_TYPES.EPISODE:
result = self.db_connection().get_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
if isinstance(url, unicode): url = url.encode('utf-8')
logger.log('Got local related url: |%s|%s|%s|' % (video, self.get_name(), url), log_utils.LOGDEBUG)
else:
url = self._get_episode_url(url, video)
if url:
self.db_connection().set_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
def _http_get(self, url, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True, method=None, require_debrid=False, read_error=False, cache_limit=8):
html = self._cached_http_get(url, self.base_url, self.timeout, params=params, data=data, multipart_data=multipart_data,
headers=headers, cookies=cookies, allow_redirect=allow_redirect, method=method, require_debrid=require_debrid,
read_error=read_error, cache_limit=cache_limit)
sucuri_cookie = scraper_utils.get_sucuri_cookie(html)
if sucuri_cookie:
logger.log('Setting sucuri cookie: %s' % (sucuri_cookie), log_utils.LOGDEBUG)
if cookies is not None:
cookies = cookies.update(sucuri_cookie)
else:
cookies = sucuri_cookie
html = self._cached_http_get(url, self.base_url, self.timeout, params=params, data=data, multipart_data=multipart_data,
headers=headers, cookies=cookies, allow_redirect=allow_redirect, method=method, require_debrid=require_debrid,
read_error=read_error, cache_limit=0)
return html
def _cached_http_get(self, url, base_url, timeout, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True,
method=None, require_debrid=False, read_error=False, cache_limit=8):
if require_debrid:
if Scraper.debrid_resolvers is None:
Scraper.debrid_resolvers = [resolver for resolver in urlresolver.relevant_resolvers() if resolver.isUniversal()]
if not Scraper.debrid_resolvers:
logger.log('%s requires debrid: %s' % (self.__module__, Scraper.debrid_resolvers), log_utils.LOGDEBUG)
return ''
if cookies is None: cookies = {}
if timeout == 0: timeout = None
if headers is None: headers = {}
if url.startswith('//'): url = 'http:' + url
referer = headers['Referer'] if 'Referer' in headers else base_url
if params:
if url == base_url and not url.endswith('/'):
url += '/'
parts = urlparse.urlparse(url)
if parts.query:
params.update(scraper_utils.parse_query(url))
url = urlparse.urlunparse((parts.scheme, parts.netloc, parts.path, parts.params, '', parts.fragment))
url += '?' + urllib.urlencode(params)
logger.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers), log_utils.LOGDEBUG)
if data is not None:
if isinstance(data, basestring):
data = data
else:
data = urllib.urlencode(data, True)
if multipart_data is not None:
headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
data = multipart_data
_created, _res_header, html = self.db_connection().get_cached_url(url, data, cache_limit)
if html:
logger.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
return html
try:
self.cj = self._set_cookies(base_url, cookies)
if isinstance(url, unicode): url = url.encode('utf-8')
request = urllib2.Request(url, data=data)
headers = headers.copy()
request.add_header('User-Agent', scraper_utils.get_ua())
request.add_header('Accept', '*/*')
request.add_header('Accept-Encoding', 'gzip')
request.add_unredirected_header('Host', request.get_host())
if referer: request.add_unredirected_header('Referer', referer)
if 'Referer' in headers: del headers['Referer']
if 'Host' in headers: del headers['Host']
for key, value in headers.iteritems(): request.add_header(key, value)
self.cj.add_cookie_header(request)
if not allow_redirect:
opener = urllib2.build_opener(NoRedirection)
urllib2.install_opener(opener)
else:
opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
urllib2.install_opener(opener)
opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
urllib2.install_opener(opener2)
if method is not None: request.get_method = lambda: method.upper()
response = urllib2.urlopen(request, timeout=timeout)
self.cj.extract_cookies(response, request)
if kodi.get_setting('cookie_debug') == 'true':
logger.log('Response Cookies: %s - %s' % (url, scraper_utils.cookies_as_str(self.cj)), log_utils.LOGDEBUG)
self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
self.cj.save(ignore_discard=True)
if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
if response.info().getheader('Refresh') is not None:
refresh = response.info().getheader('Refresh')
return refresh.split(';')[-1].split('url=')[-1]
else:
redir_url = response.info().getheader('Location')
if redir_url.startswith('='):
redir_url = redir_url[1:]
return redir_url
content_length = response.info().getheader('Content-Length', 0)
if int(content_length) > MAX_RESPONSE:
logger.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)
if method == 'HEAD':
return ''
else:
if response.info().get('Content-Encoding') == 'gzip':
html = ungz(response.read(MAX_RESPONSE))
else:
html = response.read(MAX_RESPONSE)
except urllib2.HTTPError as e:
if e.info().get('Content-Encoding') == 'gzip':
html = ungz(e.read(MAX_RESPONSE))
else:
html = e.read(MAX_RESPONSE)
if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(), self.get_name())
if not html:
return ''
elif e.code == 503 and 'cf-browser-verification' in html:
html = cloudflare.solve(url, self.cj, scraper_utils.get_ua(), extra_headers=headers)
if not html:
return ''
else:
logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
if not read_error:
return ''
except Exception as e:
logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
return ''
self.db_connection().cache_url(url, html, data)
return html
def _set_cookies(self, base_url, cookies):
cookie_file = os.path.join(COOKIEPATH, '%s_cookies.lwp' % (self.get_name()))
cj = cookielib.LWPCookieJar(cookie_file)
try: cj.load(ignore_discard=True)
except: pass
if kodi.get_setting('cookie_debug') == 'true':
logger.log('Before Cookies: %s - %s' % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
domain = urlparse.urlsplit(base_url).hostname
for key in cookies:
c = cookielib.Cookie(0, key, str(cookies[key]), port=None, port_specified=False, domain=domain, domain_specified=True,
domain_initial_dot=False, path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None,
comment_url=None, rest={})
cj.set_cookie(c)
cj.save(ignore_discard=True)
if kodi.get_setting('cookie_debug') == 'true':
logger.log('After Cookies: %s - %s' % (self, scraper_utils.cookies_as_str(cj)), log_utils.LOGDEBUG)
return cj
def _default_get_episode_url(self, html, video, episode_pattern, title_pattern='', airdate_pattern=''):
logger.log('Default Episode Url: |%s|%s|' % (self.get_name(), video), log_utils.LOGDEBUG)
if not html: return
try: html = html[0].content
except AttributeError: pass
force_title = scraper_utils.force_title(video)
if not force_title:
if episode_pattern:
match = re.search(episode_pattern, html, re.DOTALL | re.I)
if match:
return scraper_utils.pathify_url(match.group(1))
if kodi.get_setting('airdate-fallback') == 'true' and airdate_pattern and video.ep_airdate:
airdate_pattern = airdate_pattern.replace('{year}', str(video.ep_airdate.year))
airdate_pattern = airdate_pattern.replace('{month}', str(video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{p_month}', '%02d' % (video.ep_airdate.month))
airdate_pattern = airdate_pattern.replace('{month_name}', MONTHS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{short_month}', SHORT_MONS[video.ep_airdate.month - 1])
airdate_pattern = airdate_pattern.replace('{day}', str(video.ep_airdate.day))
airdate_pattern = airdate_pattern.replace('{p_day}', '%02d' % (video.ep_airdate.day))
logger.log('Air Date Pattern: %s' % (airdate_pattern), log_utils.LOGDEBUG)
match = re.search(airdate_pattern, html, re.DOTALL | re.I)
if match:
return scraper_utils.pathify_url(match.group(1))
else:
logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and title_pattern:
norm_title = scraper_utils.normalize_title(video.ep_title)
for match in re.finditer(title_pattern, html, re.DOTALL | re.I):
episode = match.groupdict()
if norm_title == scraper_utils.normalize_title(episode['title']):
return scraper_utils.pathify_url(episode['url'])
def _blog_proc_results(self, html, post_pattern, date_format, video_type, title, year):
results = []
search_date = ''
search_sxe = ''
if video_type == VIDEO_TYPES.EPISODE:
match = re.search('(.*?)\s*(S\d+E\d+)\s*', title)
if match:
show_title, search_sxe = match.groups()
else:
match = re.search('(.*?)\s*(\d{4})[._ -]?(\d{2})[._ -]?(\d{2})\s*', title)
if match:
show_title, search_year, search_month, search_day = match.groups()
search_date = '%s-%s-%s' % (search_year, search_month, search_day)
search_date = scraper_utils.to_datetime(search_date, "%Y-%m-%d").date()
else:
show_title = title
else:
show_title = title
today = datetime.date.today()
for match in re.finditer(post_pattern, html, re.DOTALL):
post_data = match.groupdict()
post_title = post_data['post_title']
post_title = re.sub('<[^>]*>', '', post_title)
if 'quality' in post_data:
post_title += '- [%s]' % (post_data['quality'])
try: filter_days = int(kodi.get_setting('%s-filter' % (self.get_name())))
except ValueError: filter_days = 0
if filter_days and date_format and 'date' in post_data:
post_data['date'] = post_data['date'].strip()
filter_days = datetime.timedelta(days=filter_days)
post_date = scraper_utils.to_datetime(post_data['date'], date_format).date()
if not post_date:
logger.log('Failed date Check in %s: |%s|%s|%s|' % (self.get_name(), post_data['date'], date_format), log_utils.LOGWARNING)
post_date = today
if today - post_date > filter_days:
continue
match_year = ''
match_date = ''
match_sxe = ''
match_title = full_title = post_title
if video_type == VIDEO_TYPES.MOVIE:
meta = scraper_utils.parse_movie_link(post_title)
match_year = meta['year']
else:
meta = scraper_utils.parse_episode_link(post_title)
match_sxe = 'S%02dE%02d' % (int(meta['season']), int(meta['episode']))
match_date = meta['airdate']
match_title = meta['title']
full_title = '%s (%sp) [%s]' % (meta['title'], meta['height'], meta['extra'])
norm_title = scraper_utils.normalize_title(show_title)
match_norm_title = scraper_utils.normalize_title(match_title)
title_match = norm_title and (match_norm_title in norm_title or norm_title in match_norm_title)
year_match = not year or not match_year or year == match_year
sxe_match = not search_sxe or (search_sxe == match_sxe)
date_match = not search_date or (search_date == match_date)
logger.log('Blog Results: |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| - |%s|%s|%s| (%s)' %
(match_norm_title, norm_title, title_match, year, match_year, year_match,
search_date, match_date, date_match, search_sxe, match_sxe, sxe_match, self.get_name()),
log_utils.LOGDEBUG)
if title_match and year_match and date_match and sxe_match:
quality = scraper_utils.height_get_quality(meta['height'])
result = {'url': scraper_utils.pathify_url(post_data['url']), 'title': scraper_utils.cleanse_title(full_title), 'year': match_year, 'quality': quality}
results.append(result)
return results
def _blog_get_url(self, video, delim='.'):
url = None
result = self.db_connection().get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
if result:
url = result[0][0]
logger.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url), log_utils.LOGDEBUG)
else:
try: select = int(kodi.get_setting('%s-select' % (self.get_name())))
except: select = 0
if video.video_type == VIDEO_TYPES.EPISODE:
temp_title = re.sub('[^A-Za-z0-9 ]', '', video.title)
if not scraper_utils.force_title(video):
search_title = '%s S%02dE%02d' % (temp_title, int(video.season), int(video.episode))
if isinstance(video.ep_airdate, datetime.date):
fallback_search = '%s %s' % (temp_title, video.ep_airdate.strftime('%Y{0}%m{0}%d'.format(delim)))
else:
fallback_search = ''
else:
if not video.ep_title: return None
search_title = '%s %s' % (temp_title, video.ep_title)
fallback_search = ''
else:
search_title = video.title
fallback_search = ''
results = self.search(video.video_type, search_title, video.year)
if not results and fallback_search:
results = self.search(video.video_type, fallback_search, video.year)
if results:
# TODO: First result isn't always the most recent...
best_result = results[0]
if select != 0:
best_qorder = 0
for result in results:
if 'quality' in result:
quality = result['quality']
else:
match = re.search('\((\d+p)\)', result['title'])
if match:
quality = scraper_utils.height_get_quality(match.group(1))
else:
match = re.search('\[(.*)\]$', result['title'])
q_str = match.group(1) if match else ''
quality = scraper_utils.blog_get_quality(video, q_str, '')
logger.log('result: |%s|%s|%s|' % (result, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
if Q_ORDER[quality] > best_qorder:
logger.log('Setting best as: |%s|%s|%s|' % (result, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
best_result = result
best_qorder = Q_ORDER[quality]
url = best_result['url']
self.db_connection().set_related_url(video.video_type, video.title, video.year, self.get_name(), url, video.season, video.episode)
return url
def db_connection(self):
if self.__db_connection is None:
self.__db_connection = DB_Connection()
return self.__db_connection
def _get_cookies(self):
cj = self._set_cookies(self.base_url, {})
cookies = dict((cookie.name, cookie.value) for cookie in cj)
return cookies
def _get_stream_cookies(self):
cookies = ['%s=%s' % (key, value) for key, value in self._get_cookies().iteritems()]
return urllib.quote('; '.join(cookies))
| gpl-2.0 |
petteyg/intellij-community | python/helpers/epydoc/markup/__init__.py | 90 | 23129 | #
# epydoc package file
#
# A python documentation Module
# Edward Loper
#
# $Id: __init__.py 1577 2007-03-09 23:26:21Z dvarrazzo $
#
"""
Markup language support for docstrings. Each submodule defines a
parser for a single markup language. These parsers convert an
object's docstring to a L{ParsedDocstring}, a standard intermediate
representation that can be used to generate output.
C{ParsedDocstring}s support the following operations:
- output generation (L{to_plaintext()<ParsedDocstring.to_plaintext>},
L{to_html()<ParsedDocstring.to_html>}, and
L{to_latex()<ParsedDocstring.to_latex>}).
- Summarization (L{summary()<ParsedDocstring.summary>}).
- Field extraction (L{split_fields()<ParsedDocstring.split_fields>}).
- Index term extraction (L{index_terms()<ParsedDocstring.index_terms>}.
The L{parse()} function provides a single interface to the
C{epydoc.markup} package: it takes a docstring and the name of a
markup language; delegates to the appropriate parser; and returns the
parsed docstring (along with any errors or warnings that were
generated).
The C{ParsedDocstring} output generation methods (C{to_M{format}()})
use a L{DocstringLinker} to link the docstring output with the rest of
the documentation that epydoc generates. C{DocstringLinker}s are
currently responsible for translating two kinds of crossreference:
- index terms (L{translate_indexterm()
<DocstringLinker.translate_indexterm>}).
- identifier crossreferences (L{translate_identifier_xref()
<DocstringLinker.translate_identifier_xref>}).
A parsed docstring's fields can be extracted using the
L{ParsedDocstring.split_fields()} method. This method divides a
docstring into its main body and a list of L{Field}s, each of which
encodes a single field. The field's bodies are encoded as
C{ParsedDocstring}s.
Markup errors are represented using L{ParseError}s. These exception
classes record information about the cause, location, and severity of
each error.
@sort: parse, ParsedDocstring, Field, DocstringLinker
@group Errors and Warnings: ParseError
@group Utility Functions: parse_type_of
@var SCRWIDTH: The default width with which text will be wrapped
when formatting the output of the parser.
@type SCRWIDTH: C{int}
@var _parse_warnings: Used by L{_parse_warn}.
"""
__docformat__ = 'epytext en'
import re, types, sys
from epydoc import log
from epydoc.util import plaintext_to_html, plaintext_to_latex
import epydoc
from epydoc.compat import *
##################################################
## Contents
##################################################
#
# 1. parse() dispatcher
# 2. ParsedDocstring abstract base class
# 3. Field class
# 4. Docstring Linker
# 5. ParseError exceptions
# 6. Misc helpers
#
##################################################
## Dispatcher
##################################################
_markup_language_registry = {
'restructuredtext': 'epydoc.markup.restructuredtext',
'epytext': 'epydoc.markup.epytext',
'plaintext': 'epydoc.markup.plaintext',
'javadoc': 'epydoc.markup.javadoc',
}
def register_markup_language(name, parse_function):
"""
Register a new markup language named C{name}, which can be parsed
by the function C{parse_function}.
@param name: The name of the markup language. C{name} should be a
simple identifier, such as C{'epytext'} or C{'restructuredtext'}.
Markup language names are case insensitive.
@param parse_function: A function which can be used to parse the
markup language, and returns a L{ParsedDocstring}. It should
have the following signature:
>>> def parse(s, errors):
... 'returns a ParsedDocstring'
Where:
- C{s} is the string to parse. (C{s} will be a unicode
string.)
- C{errors} is a list; any errors that are generated
during docstring parsing should be appended to this
list (as L{ParseError} objects).
"""
_markup_language_registry[name.lower()] = parse_function
MARKUP_LANGUAGES_USED = set()
def parse(docstring, markup='plaintext', errors=None, **options):
"""
Parse the given docstring, and use it to construct a
C{ParsedDocstring}. If any fatal C{ParseError}s are encountered
while parsing the docstring, then the docstring will be rendered
as plaintext, instead.
@type docstring: C{string}
@param docstring: The docstring to encode.
@type markup: C{string}
@param markup: The name of the markup language that is used by
the docstring. If the markup language is not supported, then
the docstring will be treated as plaintext. The markup name
is case-insensitive.
@param errors: A list where any errors generated during parsing
will be stored. If no list is specified, then fatal errors
will generate exceptions, and non-fatal errors will be
ignored.
@type errors: C{list} of L{ParseError}
@rtype: L{ParsedDocstring}
@return: A L{ParsedDocstring} that encodes the contents of
C{docstring}.
@raise ParseError: If C{errors} is C{None} and an error is
encountered while parsing.
"""
# Initialize errors list.
raise_on_error = (errors is None)
if errors == None: errors = []
# Normalize the markup language name.
markup = markup.lower()
# Is the markup language valid?
if not re.match(r'\w+', markup):
_parse_warn('Bad markup language name %r. Treating '
'docstrings as plaintext.' % markup)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Is the markup language supported?
if markup not in _markup_language_registry:
_parse_warn('Unsupported markup language %r. Treating '
'docstrings as plaintext.' % markup)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Get the parse function.
parse_docstring = _markup_language_registry[markup]
# If it's a string, then it names a function to import.
if isinstance(parse_docstring, basestring):
try: exec('from %s import parse_docstring' % parse_docstring)
except ImportError, e:
_parse_warn('Error importing %s for markup language %s: %s' %
(parse_docstring, markup, e))
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
_markup_language_registry[markup] = parse_docstring
# Keep track of which markup languages have been used so far.
MARKUP_LANGUAGES_USED.add(markup)
# Parse the docstring.
try: parsed_docstring = parse_docstring(docstring, errors, **options)
except KeyboardInterrupt: raise
except Exception, e:
if epydoc.DEBUG: raise
log.error('Internal error while parsing a docstring: %s; '
'treating docstring as plaintext' % e)
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
# Check for fatal errors.
fatal_errors = [e for e in errors if e.is_fatal()]
if fatal_errors and raise_on_error: raise fatal_errors[0]
if fatal_errors:
import epydoc.markup.plaintext as plaintext
return plaintext.parse_docstring(docstring, errors, **options)
return parsed_docstring
# only issue each warning once:
_parse_warnings = {}
def _parse_warn(estr):
"""
Print a warning message. If the given error has already been
printed, then do nothing.
"""
global _parse_warnings
if estr in _parse_warnings: return
_parse_warnings[estr] = 1
log.warning(estr)
##################################################
## ParsedDocstring
##################################################
class ParsedDocstring:
"""
A standard intermediate representation for parsed docstrings that
can be used to generate output. Parsed docstrings are produced by
markup parsers (such as L{epytext.parse} or L{javadoc.parse}).
C{ParsedDocstring}s support several kinds of operation:
- output generation (L{to_plaintext()}, L{to_html()}, and
L{to_latex()}).
- Summarization (L{summary()}).
- Field extraction (L{split_fields()}).
- Index term extraction (L{index_terms()}.
The output generation methods (C{to_M{format}()}) use a
L{DocstringLinker} to link the docstring output with the rest
of the documentation that epydoc generates.
Subclassing
===========
The only method that a subclass is I{required} to implement is
L{to_plaintext()}; but it is often useful to override the other
methods. The default behavior of each method is described below:
- C{to_I{format}}: Calls C{to_plaintext}, and uses the string it
returns to generate verbatim output.
- C{summary}: Returns C{self} (i.e., the entire docstring).
- C{split_fields}: Returns C{(self, [])} (i.e., extracts no
fields).
- C{index_terms}: Returns C{[]} (i.e., extracts no index terms).
If and when epydoc adds more output formats, new C{to_I{format}}
methods will be added to this base class; but they will always
be given a default implementation.
"""
def split_fields(self, errors=None):
"""
Split this docstring into its body and its fields.
@return: A tuple C{(M{body}, M{fields})}, where C{M{body}} is
the main body of this docstring, and C{M{fields}} is a list
of its fields. If the resulting body is empty, return
C{None} for the body.
@rtype: C{(L{ParsedDocstring}, list of L{Field})}
@param errors: A list where any errors generated during
splitting will be stored. If no list is specified, then
errors will be ignored.
@type errors: C{list} of L{ParseError}
"""
# Default behavior:
return self, []
def summary(self):
"""
@return: A pair consisting of a short summary of this docstring and a
boolean value indicating whether there is further documentation
in addition to the summary. Typically, the summary consists of the
first sentence of the docstring.
@rtype: (L{ParsedDocstring}, C{bool})
"""
# Default behavior:
return self, False
def concatenate(self, other):
"""
@return: A new parsed docstring containing the concatination
of this docstring and C{other}.
@raise ValueError: If the two parsed docstrings are
incompatible.
"""
return ConcatenatedDocstring(self, other)
def __add__(self, other): return self.concatenate(other)
def to_html(self, docstring_linker, **options):
"""
Translate this docstring to HTML.
@param docstring_linker: An HTML translator for crossreference
links into and out of the docstring.
@type docstring_linker: L{DocstringLinker}
@param options: Any extra options for the output. Unknown
options are ignored.
@return: An HTML fragment that encodes this docstring.
@rtype: C{string}
"""
# Default behavior:
plaintext = plaintext_to_html(self.to_plaintext(docstring_linker))
return '<pre class="literalblock">\n%s\n</pre>\n' % plaintext
def to_latex(self, docstring_linker, **options):
"""
Translate this docstring to LaTeX.
@param docstring_linker: A LaTeX translator for crossreference
links into and out of the docstring.
@type docstring_linker: L{DocstringLinker}
@param options: Any extra options for the output. Unknown
options are ignored.
@return: A LaTeX fragment that encodes this docstring.
@rtype: C{string}
"""
# Default behavior:
plaintext = plaintext_to_latex(self.to_plaintext(docstring_linker))
return '\\begin{alltt}\n%s\\end{alltt}\n\n' % plaintext
def to_plaintext(self, docstring_linker, **options):
"""
Translate this docstring to plaintext.
@param docstring_linker: A plaintext translator for
crossreference links into and out of the docstring.
@type docstring_linker: L{DocstringLinker}
@param options: Any extra options for the output. Unknown
options are ignored.
@return: A plaintext fragment that encodes this docstring.
@rtype: C{string}
"""
raise NotImplementedError, 'ParsedDocstring.to_plaintext()'
def index_terms(self):
"""
@return: The list of index terms that are defined in this
docstring. Each of these items will be added to the index
page of the documentation.
@rtype: C{list} of C{ParsedDocstring}
"""
# Default behavior:
return []
##################################################
## Concatenated Docstring
##################################################
class ConcatenatedDocstring:
def __init__(self, *parsed_docstrings):
self._parsed_docstrings = [pds for pds in parsed_docstrings
if pds is not None]
def split_fields(self, errors=None):
bodies = []
fields = []
for doc in self._parsed_docstrings:
b,f = doc.split_fields()
bodies.append(b)
fields.extend(f)
return ConcatenatedDocstring(*bodies), fields
def summary(self):
return self._parsed_docstrings[0].summary()
def to_html(self, docstring_linker, **options):
htmlstring = ''
for doc in self._parsed_docstrings:
htmlstring += doc.to_html(docstring_linker, **options)
return htmlstring
def to_latex(self, docstring_linker, **options):
latexstring = ''
for doc in self._parsed_docstrings:
latexstring += doc.to_latex(docstring_linker, **options)
return latexstring
def to_plaintext(self, docstring_linker, **options):
textstring = ''
for doc in self._parsed_docstrings:
textstring += doc.to_plaintext(docstring_linker, **options)
return textstring
def index_terms(self):
terms = []
for doc in self._parsed_docstrings:
terms += doc.index_terms()
return terms
##################################################
## Fields
##################################################
class Field:
"""
The contents of a docstring's field. Docstring fields are used
to describe specific aspects of an object, such as a parameter of
a function or the author of a module. Each field consists of a
tag, an optional argument, and a body:
- The tag specifies the type of information that the field
encodes.
- The argument specifies the object that the field describes.
The argument may be C{None} or a C{string}.
- The body contains the field's information.
Tags are automatically downcased and stripped; and arguments are
automatically stripped.
"""
def __init__(self, tag, arg, body):
self._tag = tag.lower().strip()
if arg is None: self._arg = None
else: self._arg = arg.strip()
self._body = body
def tag(self):
"""
@return: This field's tag.
@rtype: C{string}
"""
return self._tag
def arg(self):
"""
@return: This field's argument, or C{None} if this field has
no argument.
@rtype: C{string} or C{None}
"""
return self._arg
def body(self):
"""
@return: This field's body.
@rtype: L{ParsedDocstring}
"""
return self._body
def __repr__(self):
if self._arg is None:
return '<Field @%s: ...>' % self._tag
else:
return '<Field @%s %s: ...>' % (self._tag, self._arg)
##################################################
## Docstring Linker (resolves crossreferences)
##################################################
class DocstringLinker:
"""
A translator for crossreference links into and out of a
C{ParsedDocstring}. C{DocstringLinker} is used by
C{ParsedDocstring} to convert these crossreference links into
appropriate output formats. For example,
C{DocstringLinker.to_html} expects a C{DocstringLinker} that
converts crossreference links to HTML.
"""
def translate_indexterm(self, indexterm):
"""
Translate an index term to the appropriate output format. The
output will typically include a crossreference anchor.
@type indexterm: L{ParsedDocstring}
@param indexterm: The index term to translate.
@rtype: C{string}
@return: The translated index term.
"""
raise NotImplementedError, 'DocstringLinker.translate_indexterm()'
def translate_identifier_xref(self, identifier, label=None):
"""
Translate a crossreference link to a Python identifier to the
appropriate output format. The output will typically include
a reference or pointer to the crossreference target.
@type identifier: C{string}
@param identifier: The name of the Python identifier that
should be linked to.
@type label: C{string} or C{None}
@param label: The label that should be used for the identifier,
if it's different from the name of the identifier.
@rtype: C{string}
@return: The translated crossreference link.
"""
raise NotImplementedError, 'DocstringLinker.translate_xref()'
##################################################
## ParseError exceptions
##################################################
class ParseError(Exception):
"""
The base class for errors generated while parsing docstrings.
@ivar _linenum: The line on which the error occured within the
docstring. The linenum of the first line is 0.
@type _linenum: C{int}
@ivar _offset: The line number where the docstring begins. This
offset is added to C{_linenum} when displaying the line number
of the error. Default value: 1.
@type _offset: C{int}
@ivar _descr: A description of the error.
@type _descr: C{string}
@ivar _fatal: True if this is a fatal error.
@type _fatal: C{boolean}
"""
def __init__(self, descr, linenum=None, is_fatal=1):
"""
@type descr: C{string}
@param descr: A description of the error.
@type linenum: C{int}
@param linenum: The line on which the error occured within
the docstring. The linenum of the first line is 0.
@type is_fatal: C{boolean}
@param is_fatal: True if this is a fatal error.
"""
self._descr = descr
self._linenum = linenum
self._fatal = is_fatal
self._offset = 1
def is_fatal(self):
"""
@return: true if this is a fatal error. If an error is fatal,
then epydoc should ignore the output of the parser, and
parse the docstring as plaintext.
@rtype: C{boolean}
"""
return self._fatal
def linenum(self):
"""
@return: The line number on which the error occured (including
any offset). If the line number is unknown, then return
C{None}.
@rtype: C{int} or C{None}
"""
if self._linenum is None: return None
else: return self._offset + self._linenum
def set_linenum_offset(self, offset):
"""
Set the line number offset for this error. This offset is the
line number where the docstring begins. This offset is added
to C{_linenum} when displaying the line number of the error.
@param offset: The new line number offset.
@type offset: C{int}
@rtype: C{None}
"""
self._offset = offset
def descr(self):
return self._descr
def __str__(self):
"""
Return a string representation of this C{ParseError}. This
multi-line string contains a description of the error, and
specifies where it occured.
@return: the informal representation of this C{ParseError}.
@rtype: C{string}
"""
if self._linenum is not None:
return 'Line %s: %s' % (self._linenum+self._offset, self.descr())
else:
return self.descr()
def __repr__(self):
"""
Return the formal representation of this C{ParseError}.
C{ParseError}s have formal representations of the form::
<ParseError on line 12>
@return: the formal representation of this C{ParseError}.
@rtype: C{string}
"""
if self._linenum is None:
return '<ParseError on line %d' % self._offset
else:
return '<ParseError on line %d>' % (self._linenum+self._offset)
def __cmp__(self, other):
"""
Compare two C{ParseError}s, based on their line number.
- Return -1 if C{self.linenum<other.linenum}
- Return +1 if C{self.linenum>other.linenum}
- Return 0 if C{self.linenum==other.linenum}.
The return value is undefined if C{other} is not a
ParseError.
@rtype: C{int}
"""
if not isinstance(other, ParseError): return -1000
return cmp(self._linenum+self._offset,
other._linenum+other._offset)
##################################################
## Misc helpers
##################################################
# These are used by multiple markup parsers
def parse_type_of(obj):
"""
@return: A C{ParsedDocstring} that encodes the type of the given
object.
@rtype: L{ParsedDocstring}
@param obj: The object whose type should be returned as DOM document.
@type obj: any
"""
# This is a bit hackish; oh well. :)
from epydoc.markup.epytext import ParsedEpytextDocstring
from xml.dom.minidom import Document
doc = Document()
epytext = doc.createElement('epytext')
para = doc.createElement('para')
doc.appendChild(epytext)
epytext.appendChild(para)
if type(obj) is types.InstanceType:
link = doc.createElement('link')
name = doc.createElement('name')
target = doc.createElement('target')
para.appendChild(link)
link.appendChild(name)
link.appendChild(target)
name.appendChild(doc.createTextNode(str(obj.__class__.__name__)))
target.appendChild(doc.createTextNode(str(obj.__class__)))
else:
code = doc.createElement('code')
para.appendChild(code)
code.appendChild(doc.createTextNode(type(obj).__name__))
return ParsedEpytextDocstring(doc)
| apache-2.0 |
scalable-networks/gnuradio-3.7.0.1 | grc/python/extract_docs.py | 4 | 2185 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import re
def _extract(key):
"""
Extract the documentation from the python __doc__ strings.
If multiple modules match, combine the docs.
Args:
key: the block key
Returns:
a string with documentation
"""
#extract matches
try:
module_name, constructor_name = key.split('_', 1)
module = __import__('gnuradio.'+module_name)
module = getattr(module, module_name)
except ImportError:
try:
module_name, constructor_name = key.split('_', 1)
module = __import__(module_name)
except: return ''
except:
return ''
pattern = constructor_name.replace('_', '_*').replace('x', '\w')
pattern_matcher = re.compile('^%s\w*$'%pattern)
matches = filter(lambda x: pattern_matcher.match(x), dir(module))
#combine all matches
doc_strs = list()
for match in matches:
try:
title = ' --- ' + match + ' --- '
doc_strs.append('\n\n'.join([title, getattr(module, match).__doc__]).strip())
except: pass
return '\n\n'.join(doc_strs)
_docs_cache = dict()
def extract(key):
"""
Call the private extract and cache the result.
Args:
key: the block key
Returns:
a string with documentation
"""
if not _docs_cache.has_key(key):
_docs_cache[key] = _extract(key)
return _docs_cache[key]
if __name__ == '__main__':
import sys
print extract(sys.argv[1])
| gpl-3.0 |
tardyp/buildbot | master/buildbot/steps/package/deb/lintian.py | 5 | 2885 | # This program is free software; you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright Marius Rieder <marius.rieder@durchmesser.ch>
"""
Steps and objects related to lintian
"""
from twisted.internet import defer
from buildbot import config
from buildbot.process import buildstep
from buildbot.process import logobserver
from buildbot.process.results import FAILURE
from buildbot.process.results import SUCCESS
from buildbot.process.results import WARNINGS
from buildbot.steps.package import util as pkgutil
class MaxQObserver(logobserver.LogLineObserver):
def __init__(self):
super().__init__()
self.failures = 0
def outLineReceived(self, line):
if line.startswith('TEST FAILURE:'):
self.failures += 1
class DebLintian(buildstep.ShellMixin, buildstep.BuildStep):
name = "lintian"
description = "Lintian running"
descriptionDone = "Lintian"
fileloc = None
suppressTags = []
flunkOnFailure = False
warnOnFailure = True
def __init__(self, fileloc=None, suppressTags=None, **kwargs):
kwargs = self.setupShellMixin(kwargs)
super().__init__(**kwargs)
if fileloc:
self.fileloc = fileloc
if suppressTags:
self.suppressTags = suppressTags
if not self.fileloc:
config.error("You must specify a fileloc")
self.command = ["lintian", "-v", self.fileloc]
if self.suppressTags:
for tag in self.suppressTags:
self.command += ['--suppress-tags', tag]
self.obs = pkgutil.WEObserver()
self.addLogObserver('stdio', self.obs)
@defer.inlineCallbacks
def run(self):
cmd = yield self.makeRemoteShellCommand()
yield self.runCommand(cmd)
stdio_log = yield self.getLog('stdio')
yield stdio_log.finish()
warnings = self.obs.warnings
errors = self.obs.errors
if warnings:
yield self.addCompleteLog('%d Warnings' % len(warnings), "\n".join(warnings))
if errors:
yield self.addCompleteLog('%d Errors' % len(errors), "\n".join(errors))
if cmd.rc != 0 or errors:
return FAILURE
if warnings:
return WARNINGS
return SUCCESS
| gpl-2.0 |
notationist17/MuseScore | thirdparty/freetype/src/tools/docmaker/sources.py | 44 | 13003 | #
# sources.py
#
# Convert source code comments to multi-line blocks (library file).
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This library file contains definitions of classes needed to decompose C
# source code files into a series of multi-line `blocks'. There are two
# kinds of blocks.
#
# - Normal blocks, which contain source code or ordinary comments.
#
# - Documentation blocks, which have restricted formatting, and whose text
# always start with a documentation markup tag like `<Function>',
# `<Type>', etc.
#
# The routines to process the content of documentation blocks are contained
# in file `content.py'; the classes and methods found here only deal with
# text parsing and basic documentation block extraction.
#
import fileinput, re, sys, os, string
################################################################
##
## SOURCE BLOCK FORMAT CLASS
##
## A simple class containing compiled regular expressions to detect
## potential documentation format block comments within C source code.
##
## The `column' pattern must contain a group to `unbox' the content of
## documentation comment blocks.
##
## Later on, paragraphs are converted to long lines, which simplifies the
## regular expressions that act upon the text.
##
class SourceBlockFormat:
def __init__( self, id, start, column, end ):
"""Create a block pattern, used to recognize special documentation
blocks."""
self.id = id
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# Format 1 documentation comment blocks.
#
# /************************************/ (at least 2 asterisks)
# /* */
# /* */
# /* */
# /************************************/ (at least 2 asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# Format 2 documentation comment blocks.
#
# /************************************ (at least 2 asterisks)
# *
# * (1 asterisk)
# *
# */ (1 or more asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# The list of supported documentation block formats. We could add new ones
# quite easily.
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# The following regular expressions correspond to markup tags within the
# documentation comment blocks. They are equivalent despite their different
# syntax.
#
# A markup tag consists of letters or character `-', to be found in group 1.
#
# Notice that a markup tag _must_ begin a new paragraph.
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# The list of supported markup tags. We could add new ones quite easily.
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# A regular expression to detect a cross reference, after markup tags have
# been stripped off. Group 1 is the reference, group 2 the rest of the
# line.
#
# A cross reference consists of letters, digits, or characters `-' and `_'.
#
re_crossref = re.compile( r'@((?:\w|-)*)(.*)' ) # @foo
#
# Two regular expressions to detect italic and bold markup, respectively.
# Group 1 is the markup, group 2 the rest of the line.
#
# Note that the markup is limited to words consisting of letters, digits,
# the characters `_' and `-', or an apostrophe (but not as the first
# character).
#
re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
#
# This regular expression code to identify an URL has been taken from
#
# http://mail.python.org/pipermail/tutor/2002-September/017228.html
#
# (with slight modifications).
#
urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
ltrs = r'\w'
gunk = r'/#~:.?+=&%@!\-'
punc = r'.:?\-'
any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
'gunk' : gunk,
'punc' : punc }
url = r"""
(
\b # start at word boundary
%(urls)s : # need resource and a colon
[%(any)s] +? # followed by one or more of any valid
# character, but be conservative and
# take only what you need to...
(?= # [look-ahead non-consumptive assertion]
[%(punc)s]* # either 0 or more punctuation
(?: # [non-grouping parentheses]
[^%(any)s] | $ # followed by a non-url char
# or end of the string
)
)
)
""" % {'urls' : urls,
'any' : any,
'punc' : punc }
re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
#
# A regular expression that stops collection of comments for the current
# block.
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
#
# A regular expression to find possible C identifiers while outputting
# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
# the prefix, group 2 the identifier -- since we scan lines from left to
# right, sequentially splitting the source code into prefix and identifier
# is fully sufficient for our purposes.
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# A regular expression that matches a list of reserved C source keywords.
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
\#include |
\#define |
\#undef |
\#if |
\#ifdef |
\#ifndef |
\#else |
\#endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## There are two important fields in a `SourceBlock' object.
##
## self.lines
## A list of text lines for the corresponding block.
##
## self.content
## For documentation comment blocks only, this is the block content
## that has been `unboxed' from its decoration. This is `None' for all
## other blocks (i.e., sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock:
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
words = []
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = string.strip( l )
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only -- not used in normal operations
def dump( self ):
if self.content:
print "{{{content start---"
for l in self.content:
print l
print "---content end}}}"
return
fmt = ""
if self.format:
fmt = repr( self.format.id ) + " "
for line in self.lines:
print line
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The `SourceProcessor' is in charge of reading a C source file and
## decomposing it into a series of different `SourceBlock' objects.
##
## A SourceBlock object consists of the following data.
##
## - A documentation comment block using one of the layouts above. Its
## exact format will be discussed later.
##
## - Normal sources lines, including comments.
##
##
class SourceProcessor:
def __init__( self ):
"""Initialize a source processor."""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""Reset a block processor and clean up all its blocks."""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""Parse a C source file and add its blocks to the processor's
list."""
self.reset()
self.filename = filename
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# A normal block end. Add it to `lines' and create a
# new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# A normal column line. Add it to `lines'.
self.lines.append( line )
else:
# An unexpected block end. Create a new block, but
# don't process the line.
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""Process a normal line and check whether it is the start of a new
block."""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""Add the current accumulated lines and create a new block."""
if self.lines != []:
block = SourceBlock( self,
self.filename,
self.lineno,
self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""Print all blocks in a processor."""
for b in self.blocks:
b.dump()
# eof
| gpl-2.0 |
ozFri/rhizi | rhizi/tests/util.py | 1 | 11087 | # This file is part of rhizi, a collaborative knowledge graph editor.
# Copyright (C) 2014-2015 Rhizi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Various test utilities
"""
import string
import random
import logging
from time import sleep
import socket
import sys
import os
import urllib2
import subprocess
import atexit
from glob import glob
import tarfile
import functools
from unittest import TestCase
from .. import db_controller as dbc
from ..model.graph import Topo_Diff
from ..model.model import Link, RZDoc
from ..neo4j_util import generate_random_id__uuid, generate_random_rzdoc_id
from ..rz_kernel import RZ_Kernel
from ..rz_mesh import init_ws_interface
from ..rz_server import init_webapp
from ..rz_user import User_Signup_Request
from ..rz_config import RZ_Config
from .. import rz_api
from ..db_op import DBO_factory__default, DBO_raw_query_set
from ..rz_user_db import Fake_User_DB
from .neo4j_test_util import rand_label
def parentdir(x):
return os.path.realpath(os.path.join(x, '..'))
def generate_parent_paths():
cur = cwd = os.getcwd()
yield cwd
nextdir = parentdir(cur)
while nextdir != cur:
yield nextdir
cur = nextdir
nextdir = parentdir(cur)
def find_dir_up_or_abort(dirname):
for d in generate_parent_paths():
candidate = os.path.join(d, dirname)
if os.path.exists(candidate) and os.path.isdir(candidate):
return candidate
raise Exception("{} not found".format(dirname))
def env(name, default):
return os.environ.get(name, default)
REPO_ROOT = parentdir(find_dir_up_or_abort('.git'))
MTA_PORT = int(env('RHIZI_TESTS__MTA_PORT', 10025))
NEO4J_VERSION = '2.3.0'
NEO4J_URL = "http://neo4j.com/artifact.php?name=neo4j-community-{}-unix.tar.gz".format(NEO4J_VERSION)
ASSET_DIRECTORY = env('RHIZI_TESTS__ASSET_DIRECTORY', os.path.join(REPO_ROOT, 'assets'))
NEO4J_ARCHIVE = os.path.join(ASSET_DIRECTORY, 'neo4j-community-{}-unix.tar.gz'.format(NEO4J_VERSION))
NEO4J_SUBDIR = 'neo4j-community-{}'.format(NEO4J_VERSION)
NEO4J_DEST = os.path.join(ASSET_DIRECTORY, NEO4J_SUBDIR)
NEO4J_BIN = os.path.join(NEO4J_DEST, "bin", "neo4j")
NEO4J_PORT = int(env('RHIZI_TESTS__NEO4J_PORT', 28800))
NEO4J_STDOUT = os.path.join(NEO4J_DEST, "stdout.log")
MTA_STDOUT = os.path.join(ASSET_DIRECTORY, "mta.log")
NEO4J_SERVER_CONF = os.path.join(NEO4J_DEST, 'conf', 'neo4j-server.properties')
NEO4J_SERVER_CONF_TEMPLATE = \
"""org.neo4j.server.database.location=data/graph.db
org.neo4j.server.db.tuning.properties=conf/neo4j.properties
dbms.security.auth_enabled=false
org.neo4j.server.webserver.port={port}
org.neo4j.server.webserver.https.enabled=false
org.neo4j.server.http.log.enabled=true
org.neo4j.server.http.log.config=conf/neo4j-http-logging.xml
org.neo4j.server.webadmin.rrdb.location=data/rrd
"""
def httpget(src, dst):
src_sock = urllib2.urlopen(src)
total_length = int(src_sock.headers.get('content-length', 0))
read_length = 0
with open(dst, 'w+') as fd:
s = None
while s != '':
s = src_sock.read(4096)
fd.write(s)
if total_length > 0:
sys.stdout.write("\r{}%".format((100 * read_length) / total_length))
else:
sys.stdout.write("\r{}".format(read_length))
sys.stdout.flush()
def install_neo4j():
if os.path.exists(NEO4J_DEST):
return
if not os.path.exists(NEO4J_ARCHIVE):
httpget(NEO4J_URL, NEO4J_ARCHIVE)
doglob = lambda: set(glob(os.path.join(ASSET_DIRECTORY, '*')))
prev = doglob()
tarfile.open(NEO4J_ARCHIVE).extractall(ASSET_DIRECTORY)
now = doglob()
new_dirs = {os.path.basename(x) for x in now - prev}
if new_dirs != {NEO4J_SUBDIR}:
print("error extracting neo4j: expected {}, got {}".format(NEO4J_SUBDIR, new_dirs))
raise Exception("Neo4J extracted to unexpected directory place")
subprocesses = []
def launch(args, stdout_filename):
stdout = open(stdout_filename, 'a+')
stdout.write('-------------------\n')
p = subprocess.Popen(args, stdout=stdout, stderr=subprocess.STDOUT)
print("[{}]: launched {}".format(p.pid, repr(args)))
subprocesses.append(p)
return p
def write_template(filename, template, **kw):
with open(filename, 'w+') as fd:
fd.write(template.format(**kw))
def wait_for_port(port):
timeout = 100
dt = 1
while timeout > 0:
s = socket.socket()
try:
s.connect(('localhost', port))
except socket.error:
pass
else:
break
timeout -= 1
sleep(0.1)
def abort_if_port_open(port):
s = socket.socket()
try:
s.connect(('localhost', port))
except:
pass
else:
raise Exception("TCP port {} is open and should not have been".format(port))
def neo4j_write_server_conf():
write_template(filename=NEO4J_SERVER_CONF, template=NEO4J_SERVER_CONF_TEMPLATE, port=NEO4J_PORT)
def once(f):
ret = []
@functools.wraps(f)
def wrapper(*args, **kw):
if len(ret) > 0:
return ret[0]
ret.append(f(*args, **kw))
return ret[0]
return wrapper
@once
def launch_neo4j():
if env('RHIZI_TESTS__EXTERNAL_NEO4J_PROCESS', False):
return
install_neo4j()
abort_if_port_open(NEO4J_PORT)
neo4j_write_server_conf()
launch([NEO4J_BIN, "console"], stdout_filename=NEO4J_STDOUT)
wait_for_port(NEO4J_PORT)
neo4j_started = True
@once
def launch_mta():
if env('RHIZI_TESTS__EXTERNAL_MTA_PROCESS', False):
return
launch('python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=MTA_PORT).split(),
stdout_filename=MTA_STDOUT)
def kill_subprocesses():
def ignore_os_error(f):
try:
f()
except OSError:
pass
do = lambda f: map(f, subprocesses)
ignore_os_error(lambda: do(lambda p: p.kill()))
sleep(0.01)
awake = [p for p in subprocesses if p.poll() is None]
if len(awake) == 0:
return
print("terminating {} processes".format(len(awake)))
ignore_os_error(lambda: do(lambda p: p.terminate()))
atexit.register(kill_subprocesses)
def gen_random_name(size=8, char_set=string.ascii_uppercase + string.digits):
"""
used for random node generation
"""
return ''.join(random.choice(char_set) for _ in range(size))
def generate_random_node_dict(n_type, nid=None):
"""
@param n_type: is converted to a label set
@return: a dict based node object representation and the generated node id
"""
if None == nid:
nid = generate_random_id__uuid()
return {'__label_set': [n_type],
'id': nid,
'name': gen_random_name() }, nid
def generate_random_link_dict(l_type, src_id, dst_id, lid=None):
"""
@param l_type: is converted to a single item type array
@return: a dict based node object representation and the generated node id
"""
if None == lid:
lid = generate_random_id__uuid()
ret_dict = Link.link_ptr(src_id, dst_id)
ret_dict['__type'] = [l_type]
ret_dict['id'] = lid
return ret_dict, lid
def generate_random_diff__topo__minimal(test_label):
"""
@return: a ~minimal Topo_Diff containing three nodes and two links
"""
n_0, n_0_id = generate_random_node_dict(test_label)
n_1, n_1_id = generate_random_node_dict(test_label)
n_2, n_2_id = generate_random_node_dict(test_label)
l_0, l_0_id = generate_random_link_dict(test_label, n_0_id, n_1_id)
l_1, l_1_id = generate_random_link_dict(test_label, n_0_id, n_2_id)
n_set = [n_0, n_1, n_2]
l_set = [l_0, l_1]
topo_diff = Topo_Diff(node_set_add=n_set,
link_set_add=l_set)
return topo_diff
def generate_random_RZDoc(rzdoc_name=None):
if rzdoc_name is None:
rzdoc_name = gen_random_name()
rzdoc = RZDoc(rzdoc_name)
rzdoc.id = generate_random_rzdoc_id()
return rzdoc
def gen_random_user_signup():
seed = gen_random_name()
us_req = User_Signup_Request(rz_username='rz_username_%s' % (seed),
email_address='%s@localhost' % (seed),
first_name='firstname%s' % (seed),
last_name='lastname%s' % (seed),
pw_plaintxt='aaaa12345')
return us_req
db_ctl = None
kernel = None
cfg = None
user_db = None
webapp = None
def get_connection():
if cfg is None:
initialize_test_kernel()
return db_ctl, kernel
def initialize_test_kernel():
global db_ctl
global kernel
global cfg
global user_db
global webapp
sys.stderr.write("initializing neo4j\n")
launch_neo4j()
launch_mta()
sys.stderr.write("connecting to db\n")
cfg = RZ_Config.generate_default()
cfg.access_control = False
cfg.neo4j_url = 'http://localhost:{}'.format(NEO4J_PORT)
cfg.mta_host = 'localhost'
cfg.mta_port = MTA_PORT
db_ctl = dbc.DB_Controller(cfg.db_base_url)
rz_api.db_ctl = db_ctl
log = logging.getLogger('rhizi')
log.setLevel(logging.DEBUG)
log_handler_c = logging.FileHandler('rhizi-tests.log')
log_handler_c.setFormatter(logging.Formatter(u'%(asctime)s [%(levelname)s] %(name)s %(message)s'))
log.addHandler(log_handler_c)
# clear db !!!
db_ctl.exec_op(DBO_raw_query_set(['match n optional match (n)-[l]-(m) delete n,l return count(n),count(l)']))
# bootstrap kernel
kernel = RZ_Kernel()
kernel.db_ctl = db_ctl
kernel.db_op_factory = DBO_factory__default()
kernel.start()
while not kernel.is_DB_status__ok(): # wait for kernel to initialize...
sleep(0.3)
sys.stderr.write(".\n")
user_db = Fake_User_DB()
webapp = init_webapp(cfg, kernel)
webapp.user_db = user_db
kernel.db_op_factory = webapp # assist kernel with DB initialization
init_ws_interface(cfg, kernel, webapp)
class RhiziTestBase(TestCase):
@classmethod
def setUpClass(clz):
db_ctl, kernel = get_connection()
clz.cfg = cfg
clz.db_ctl = db_ctl
clz.user_db = user_db
rz_api.db_ctl = clz.db_ctl
clz.kernel = kernel
clz.webapp = webapp
def test_main():
#initialize_test_kernel()
launch_neo4j()
print("sleeping")
while True:
sleep(1)
if __name__ == '__main__':
test_main()
| agpl-3.0 |
tvanesse/theforce | Filters.py | 1 | 1721 | # -*- coding: utf-8 -*-
from CSVHandler import CSVFile, Field
class Filter(object):
"""
An abstract object that is able to produce a filtered version of a CSVFile based
on some filtering criterias.
"""
def __init__(self, csvin):
self._input = csvin
def __str__(self):
raise NotImplementedError
@property
def inputCSV(self): # damn it ! input is already used by Python
return self._input
def output(self):
"""
This method is abstract. It should be overriden by all the classes extending
Filter.
"""
raise NotImplementedError
class FieldSelector(Filter):
"""
A Filter object that selects a field within a given CSVFile.
"""
def __init__(self, field=None, csvin=None):
super(FieldSelector, self).__init__(csvin)
self._field = field
def __str__(self):
return "Field selector"
@property
def field(self):
return self._field
@field.setter
def field(self, value):
self._field = value
def output(self):
"""
Returns a Field object out of a bigger CSVFile. The selected
field is stored in self._field.
"""
return Field(header=self._field, values=self._input.data[self._field])
class FindAndReplace(Filter):
def __str__(self):
return "Find and Replace"
class Remover(Filter):
def __str__(self):
return "Remover"
class Inserter(Filter):
def __str__(self):
return "Inserter"
# ----- TESTING ----- #
if __name__ == "__main__" :
import sys
import os
if os.path.isfile(os.path.abspath(sys.argv[1])):
csv_file = CSVFile(os.path.abspath(sys.argv[1]))
filter_test = FieldSelector(csvin=csv_file, field="X_CURRENT_TARGET_SP")
print(csv_file)
print(filter_test.output())
else:
print("Error, the file does not exist")
| gpl-3.0 |
colloquium/spacewalk | backend/server/importlib/blacklistImport.py | 1 | 4044 | #
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Blacklists import
#
from importLib import Import
class BlacklistObsoletesImport(Import):
def __init__(self, batch, backend):
Import.__init__(self, batch, backend)
self.names = {}
self.evrs = {}
self.package_arches = {}
def preprocess(self):
for entry in self.batch:
self.names[entry['name']] = None
self.names[entry['ignored-name']] = None
evr = self._make_evr(entry)
entry['evr'] = evr
self.evrs[evr] = None
self.package_arches[entry['package-arch']] = None
def fix(self):
self.backend.lookupPackageNames(self.names)
self.backend.lookupEVRs(self.evrs)
self.backend.lookupPackageArches(self.package_arches)
for entry in self.batch:
entry['name_id'] = self.names[entry['name']]
entry['evr_id'] = self.evrs[entry['evr']]
entry['package_arch_id'] = self.package_arches[entry['package-arch']]
entry['ignore_name_id'] = self.names[entry['ignored-name']]
def submit(self):
self.backend.processBlacklistObsoletes(self.batch)
self.backend.commit()
def _make_evr(self, entry):
result = []
for label in ['epoch', 'version', 'release']:
val = entry[label]
if val == '':
val = None
result.append(val)
return tuple(result)
if __name__ == '__main__':
from spacewalk.server import rhnSQL
rhnSQL.initDB('satuser/satuser@satdev')
from importLib import BlacklistObsoletes
batch = [
BlacklistObsoletes().populate({
'name' : 'zebra',
'epoch' : '',
'version' : '0.91a',
'release' : '6',
'package-arch' : 'i386',
'ignored-name' : 'gated',
}),
BlacklistObsoletes().populate({
'name' : 'zebra',
'epoch' : '',
'version' : '0.91a',
'release' : '3',
'package-arch' : 'i386',
'ignored-name' : 'gated',
}),
BlacklistObsoletes().populate({
'name' : 'zebra',
'epoch' : '',
'version' : '0.91a',
'release' : '3',
'package-arch' : 'alpha',
'ignored-name' : 'gated',
}),
BlacklistObsoletes().populate({
'name' : 'gated',
'epoch' : '',
'version' : '3.6',
'release' : '10',
'package-arch' : 'i386',
'ignored-name' : 'zebra',
}),
BlacklistObsoletes().populate({
'name' : 'gated',
'epoch' : '',
'version' : '3.6',
'release' : '10',
'package-arch' : 'alpha',
'ignored-name' : 'zebra',
}),
BlacklistObsoletes().populate({
'name' : 'gated',
'epoch' : '',
'version' : '3.6',
'release' : '12',
'package-arch' : 'i386',
'ignored-name' : 'zebra',
}),
]
from backendOracle import OracleBackend
ob = OracleBackend()
importer = BlacklistObsoletesImport(batch, ob)
importer.run()
| gpl-2.0 |
yunikkk/omim | 3party/protobuf/python/google/protobuf/internal/descriptor_database_test.py | 73 | 2924 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_database."""
__author__ = 'matthewtoia@google.com (Matt Toia)'
from google.apputils import basetest
from google.protobuf import descriptor_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf import descriptor_database
class DescriptorDatabaseTest(basetest.TestCase):
def testAdd(self):
db = descriptor_database.DescriptorDatabase()
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
db.Add(file_desc_proto)
self.assertEquals(file_desc_proto, db.FindFileByName(
'google/protobuf/internal/factory_test2.proto'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Enum'))
self.assertEquals(file_desc_proto, db.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum'))
if __name__ == '__main__':
basetest.main()
| apache-2.0 |
thawatchai/mrkimontour | appengine-django/lib/django/views/generic/list.py | 471 | 7630 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import InvalidPage, Paginator
from django.db.models.query import QuerySet
from django.http import Http404
from django.utils import six
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class MultipleObjectMixin(ContextMixin):
"""
A mixin for views manipulating multiple objects.
"""
allow_empty = True
queryset = None
model = None
paginate_by = None
paginate_orphans = 0
context_object_name = None
paginator_class = Paginator
page_kwarg = 'page'
ordering = None
def get_queryset(self):
"""
Return the list of items for this view.
The return value must be an iterable and may be an instance of
`QuerySet` in which case `QuerySet` specific behavior will be enabled.
"""
if self.queryset is not None:
queryset = self.queryset
if isinstance(queryset, QuerySet):
queryset = queryset.all()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
ordering = self.get_ordering()
if ordering:
if isinstance(ordering, six.string_types):
ordering = (ordering,)
queryset = queryset.order_by(*ordering)
return queryset
def get_ordering(self):
"""
Return the field or fields to use for ordering the queryset.
"""
return self.ordering
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(
queryset, page_size, orphans=self.get_paginate_orphans(),
allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(
queryset, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page, **kwargs)
def get_paginate_orphans(self):
"""
Returns the maximum number of orphans extend the last page by when
paginating.
"""
return self.paginate_orphans
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return '%s_list' % object_list.model._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
if context_object_name is not None:
context[context_object_name] = queryset
context.update(kwargs)
return super(MultipleObjectMixin, self).get_context_data(**context)
class BaseListView(MultipleObjectMixin, View):
"""
A base view for displaying a list of objects.
"""
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if (self.get_paginate_by(self.object_list) is not None
and hasattr(self.object_list, 'exists')):
is_empty = not self.object_list.exists()
else:
is_empty = len(self.object_list) == 0
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data()
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
"""
Mixin for responding with a template and list of objects.
"""
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
try:
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.model_name, self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
| gpl-2.0 |
wooyek/glosuj-w-lomiankach | src/flask_multi_view/tests.py | 1 | 2740 | # coding=utf-8
#
# Copyright 2010 Brave Labs sp. z o.o.
# All rights reserved.
#
# This source code and all resulting intermediate files are CONFIDENTIAL and
# PROPRIETY TRADE SECRETS of Brave Labs sp. z o.o.
# Use is subject to license terms. See NOTICE file of this project for details.
import unittest
import flask
from flask import Response
from flask_multi_view.extensions.sqlachemy import ModelView
from jinja2 import Template
from .basic import NextMixin, RenderMixin
class RenderMixinTests(unittest.TestCase):
def test_render(self):
mixin = RenderMixin()
app = flask.Flask(__name__)
with app.test_request_context('/'):
self.assertRaises(AssertionError, mixin.render)
with app.test_request_context('/'):
self.assertRaises(AssertionError, mixin.render)
mixin.template = Template("foo bar")
response = mixin.render()
self.assertIsInstance(response, Response)
class NextMixinTests(unittest.TestCase):
def test_next_provided(self):
app = flask.Flask(__name__)
with app.test_request_context('/?next=a'):
mixin = NextMixin()
self.assertEqual(mixin.get_next(), 'a')
with app.test_request_context('/?next=a', headers=[('HTTP_REFERER', 'c')]):
mixin = NextMixin()
self.assertEqual(mixin.get_next(), 'a')
def test_next_hardwired(self):
app = flask.Flask(__name__)
with app.test_request_context('/?next=a', headers=[('HTTP_REFERER', 'c')]):
mixin = NextMixin()
mixin.next = "b"
self.assertEqual(mixin.get_next(), 'b')
def test_no_next(self):
app = flask.Flask(__name__)
with app.test_request_context('/', ):
mixin = NextMixin()
self.assertIsNone(mixin.get_next())
self.assertRaises(AttributeError, mixin.get, "super 'get' method should not be implemented")
def test_next_refferrer(self):
app = flask.Flask(__name__)
with app.test_request_context('/', headers=[('HTTP_REFERER', 'c')]):
mixin = NextMixin()
self.assertIsNone(mixin.get_next())
responce = mixin.get()
self.assertEqual(responce.status_code, 302)
self.assertEqual(responce.headers['Location'], 'http://localhost/?next=c')
class FormTests(unittest.TestCase):
def test_form(self):
a = ModelView()
b = ModelView()
self.assertEqual(id(a.page_size), id(b.page_size))
self.assertNotEqual(id(a.forms), id(b.forms))
a.forms["update"] = object()
self.assertFalse(b.forms.has_key("update"))
| mit |
MeddlerOnTheRoof/mean | node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
leductan-nguyen/RaionPi | src/octoprint/server/apps/__init__.py | 1 | 4070 | # coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import rsa
from flask import Blueprint, request, make_response, jsonify
import octoprint.server
import octoprint.plugin
from octoprint.server.util import noCachingResponseHandler, corsResponseHandler
from octoprint.settings import settings as s
apps = Blueprint("apps", __name__)
apps.after_request(noCachingResponseHandler)
apps.after_request(corsResponseHandler)
@apps.route("/auth", methods=["GET"])
def getSessionKey():
unverified_key, valid_until = octoprint.server.appSessionManager.create()
return jsonify(unverifiedKey=unverified_key, validUntil=valid_until)
@apps.route("/auth", methods=["POST"])
def verifySessionKey():
if not "application/json" in request.headers["Content-Type"]:
return None, None, make_response("Expected content-type JSON", 400)
data = request.json
for key in ("appid", "key", "_sig"):
if not key in data:
return make_response("Missing argument: {key}".format(key=key), 400)
appid = str(data["appid"])
if not "appversion" in data:
appversion = "any"
else:
appversion = str(data["appversion"])
key = str(data["key"])
# calculate message that was signed
message = "{appid}:{appversion}:{key}".format(**locals())
# decode signature
import base64
signature = data["_sig"]
signature = base64.decodestring("\n".join([signature[x:x+64] for x in range(0, len(signature), 64)]))
# fetch and validate app information
lookup_key = appid + ":" + appversion
apps = _get_registered_apps()
if not lookup_key in apps or not apps[lookup_key]["enabled"] or not "pubkey" in apps[lookup_key]:
octoprint.server.appSessionManager.remove(key)
return make_response("Invalid app: {lookup_key}".format(lookup_key=lookup_key), 401)
pubkey_string = apps[lookup_key]["pubkey"]
pubkey_string = "\n".join([pubkey_string[x:x+64] for x in range(0, len(pubkey_string), 64)])
try:
pubkey = rsa.PublicKey.load_pkcs1("-----BEGIN RSA PUBLIC KEY-----\n" + pubkey_string + "\n-----END RSA PUBLIC KEY-----\n")
except:
octoprint.server.appSessionManager.remove(key)
return make_response("Invalid pubkey stored in server", 500)
# verify signature
try:
rsa.verify(message, signature, pubkey)
except rsa.VerificationError:
octoprint.server.appSessionManager.remove(key)
return make_response("Invalid signature", 401)
# generate new session key and return it
result = octoprint.server.appSessionManager.verify(key)
if not result:
return make_response("Invalid key or already verified", 401)
verified_key, valid_until = result
return jsonify(key=verified_key, validUntil=valid_until)
__registered_apps = None
def _get_registered_apps():
global __registered_apps
if __registered_apps is not None:
return __registered_apps
apps = s().get(["api", "apps"], merged=True)
for app, app_data in apps.items():
if not "enabled" in app_data:
apps[app]["enabled"] = True
hooks = octoprint.server.pluginManager.get_hooks("octoprint.accesscontrol.appkey")
for name, hook in hooks.items():
try:
additional_apps = hook()
except:
import logging
logging.getLogger(__name__).exception("Error while retrieving additional appkeys from plugin {name}".format(**locals()))
continue
any_version_enabled = dict()
for app_data in additional_apps:
id, version, pubkey = app_data
key = id + ":" + version
if key in apps:
continue
if not id in any_version_enabled:
any_version_enabled[id] = False
if version == "any":
any_version_enabled[id] = True
apps[key] = dict(
pubkey=pubkey,
enabled=True
)
for id, enabled in any_version_enabled.items():
if enabled:
continue
apps[id + ":any"] = dict(
pubkey=None,
enabled=False
)
__registered_apps = apps
return apps
def clear_registered_app():
global __registered_apps
__registered_apps = None
| agpl-3.0 |
iGivefirst/cassandra-dbapi2 | cql/apivalues.py | 11 | 2017 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import exceptions
# dbapi Error hierarchy
class Warning(exceptions.StandardError): pass
class Error (exceptions.StandardError):
def __init__(self, msg, code=None):
exceptions.StandardError.__init__(self, msg)
self.code = code
class InterfaceError(Error): pass
class DatabaseError (Error): pass
class DataError (DatabaseError): pass
class OperationalError (DatabaseError): pass
class IntegrityError (DatabaseError): pass
class InternalError (DatabaseError): pass
class ProgrammingError (DatabaseError): pass
class NotSupportedError(DatabaseError): pass
class NotAuthenticated (DatabaseError): pass
# Module constants
apilevel = 1.0
threadsafety = 1 # Threads may share the module, but not connections/cursors.
paramstyle = 'named'
# Module Type Objects and Constructors
Binary = buffer
try:
from uuid import UUID # new in Python 2.5
except ImportError:
class UUID:
def __init__(self, bytes):
self.bytes = bytes
def get_time(self):
thisint = reduce(lambda a, b: a<<8 | b, map(ord, self.bytes), 0)
return ((thisint >> 64 & 0x0fff) << 48 |
(thisint >> 80 & 0xffff) << 32 |
(thisint >> 96))
| apache-2.0 |
themrmax/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
rogerthat-platform/rogerthat-backend | src/rogerthat/api/activity.py | 1 | 2012 | # -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import logging
from rogerthat.rpc import users
from rogerthat.rpc.rpc import expose
from rogerthat.to.activity import LogCallRequestTO, LogCallResponseTO, LogLocationsResponseTO, LogLocationsRequestTO
from rogerthat.utils import foreach
from mcfw.rpc import returns, arguments
@expose(('api',))
@returns(LogCallResponseTO)
@arguments(request=LogCallRequestTO)
def logCall(request):
record = request.record
response = LogCallResponseTO()
response.recordId = record.id
return response
@expose(('api',))
@returns(LogLocationsResponseTO)
@arguments(request=LogLocationsRequestTO)
def logLocations(request):
records = request.records
from rogerthat.bizz.location import get_location, post, CannotDetermineLocationException
user = users.get_current_user()
mobile = users.get_current_mobile()
def logLocation(record):
if not record.geoPoint and not record.rawLocation:
logging.error("Received location record without location details!\nuser = %s\nmobile = %s" \
% (user, mobile.id))
else:
try:
geoPoint = record.geoPoint if record.geoPoint else get_location(record.rawLocation)
post(user, geoPoint, record.timestamp, request.recipients)
except CannotDetermineLocationException:
pass
foreach(logLocation, records)
| apache-2.0 |
embeddedarm/android_external_chromium_org | tools/oopif/iframe_server.py | 172 | 7285 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test server for generating nested iframes with different sites.
Very simple python server for creating a bunch of iframes. The page generation
is randomized based on query parameters. See the __init__ function of the
Params class for a description of the parameters.
This server relies on gevent. On Ubuntu, install it via:
sudo apt-get install python-gevent
Run the server using
python iframe_server.py
To use the server, run chrome as follows:
google-chrome --host-resolver-rules='map *.invalid 127.0.0.1'
Change 127.0.0.1 to be the IP of the machine this server is running on. Then
in this chrome instance, navigate to any domain in .invalid
(eg., http://1.invalid:8090) to run this test.
"""
import colorsys
import copy
import random
import urllib
import urlparse
from gevent import pywsgi # pylint: disable=F0401
MAIN_PAGE = """
<html>
<head>
<style>
body {
background-color: %(color)s;
}
</style>
</head>
<body>
<center>
<h1><a href="%(url)s">%(site)s</a></h1>
<p><small>%(url)s</small>
</center>
<br />
%(iframe_html)s
</body>
</html>
"""
IFRAME_FRAGMENT = """
<iframe src="%(src)s" width="%(width)s" height="%(height)s">
</iframe>
"""
class Params(object):
"""Simple object for holding parameters"""
def __init__(self, query_dict):
# Basic params:
# nframes is how many frames per page.
# nsites is how many sites to random choose out of.
# depth is how deep to make the frame tree
# pattern specifies how the sites are layed out per depth. An empty string
# uses a random N = [0, nsites] each time to generate a N.invalid URL.
# Otherwise sepcify with single letters like 'ABCA' and frame
# A.invalid will embed B.invalid will embed C.invalid will embed A.
# jitter is the amount of randomness applied to nframes and nsites.
# Should be from [0,1]. 0.0 means no jitter.
# size_jitter is like jitter, but for width and height.
self.nframes = int(query_dict.get('nframes', [4] )[0])
self.nsites = int(query_dict.get('nsites', [10] )[0])
self.depth = int(query_dict.get('depth', [1] )[0])
self.jitter = float(query_dict.get('jitter', [0] )[0])
self.size_jitter = float(query_dict.get('size_jitter', [0.5] )[0])
self.pattern = query_dict.get('pattern', [''] )[0]
self.pattern_pos = int(query_dict.get('pattern_pos', [0] )[0])
# Size parameters. Values are percentages.
self.width = int(query_dict.get('width', [60])[0])
self.height = int(query_dict.get('height', [50])[0])
# Pass the random seed so our pages are reproduceable.
self.seed = int(query_dict.get('seed',
[random.randint(0, 2147483647)])[0])
def get_site(urlpath):
"""Takes a urlparse object and finds its approximate site.
Site is defined as registered domain name + scheme. We approximate
registered domain name by preserving the last 2 elements of the DNS
name. This breaks for domains like co.uk.
"""
no_port = urlpath.netloc.split(':')[0]
host_parts = no_port.split('.')
site_host = '.'.join(host_parts[-2:])
return '%s://%s' % (urlpath.scheme, site_host)
def generate_host(rand, params):
"""Generates the host to be used as an iframes source.
Uses the .invalid domain to ensure DNS will not resolve to any real
address.
"""
if params.pattern:
host = params.pattern[params.pattern_pos]
params.pattern_pos = (params.pattern_pos + 1) % len(params.pattern)
else:
host = rand.randint(1, apply_jitter(rand, params.jitter, params.nsites))
return '%s.invalid' % host
def apply_jitter(rand, jitter, n):
"""Reduce n by random amount from [0, jitter]. Ensures result is >=1."""
if jitter <= 0.001:
return n
v = n - int(n * rand.uniform(0, jitter))
if v:
return v
else:
return 1
def get_color_for_site(site):
"""Generate a stable (and pretty-ish) color for a site."""
val = hash(site)
# The constants below are arbitrary chosen emperically to look "pretty."
# HSV is used because it is easier to control the color than RGB.
# Reducing the H to 0.6 produces a good range of colors. Preserving
# > 0.5 saturation and value means the colors won't be too washed out.
h = (val % 100)/100.0 * 0.6
s = 1.0 - (int(val/100) % 100)/200.
v = 1.0 - (int(val/10000) % 100)/200.0
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
return 'rgb(%d, %d, %d)' % (int(r * 255), int(g * 255), int(b * 255))
def make_src(scheme, netloc, path, params):
"""Constructs the src url that will recreate the given params."""
if path == '/':
path = ''
return '%(scheme)s://%(netloc)s%(path)s?%(params)s' % {
'scheme': scheme,
'netloc': netloc,
'path': path,
'params': urllib.urlencode(params.__dict__),
}
def make_iframe_html(urlpath, params):
"""Produces the HTML fragment for the iframe."""
if (params.depth <= 0):
return ''
# Ensure a stable random number per iframe.
rand = random.Random()
rand.seed(params.seed)
netloc_paths = urlpath.netloc.split(':')
netloc_paths[0] = generate_host(rand, params)
width = apply_jitter(rand, params.size_jitter, params.width)
height = apply_jitter(rand, params.size_jitter, params.height)
iframe_params = {
'src': make_src(urlpath.scheme, ':'.join(netloc_paths),
urlpath.path, params),
'width': '%d%%' % width,
'height': '%d%%' % height,
}
return IFRAME_FRAGMENT % iframe_params
def create_html(environ):
"""Creates the current HTML page. Also parses out query parameters."""
urlpath = urlparse.urlparse('%s://%s%s?%s' % (
environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
environ['PATH_INFO'],
environ['QUERY_STRING']))
site = get_site(urlpath)
params = Params(urlparse.parse_qs(urlpath.query))
rand = random.Random()
rand.seed(params.seed)
iframe_htmls = []
for frame in xrange(0, apply_jitter(rand, params.jitter, params.nframes)):
# Copy current parameters into iframe and make modifications
# for the recursive generation.
iframe_params = copy.copy(params)
iframe_params.depth = params.depth - 1
# Base the new seed off the current seed, but have it skip enough that
# different frame trees are unlikely to collide. Numbers and skips
# not chosen in any scientific manner at all.
iframe_params.seed = params.seed + (frame + 1) * (
1000000 + params.depth + 333)
iframe_htmls.append(make_iframe_html(urlpath, iframe_params))
template_params = dict(params.__dict__)
template_params.update({
'color': get_color_for_site(site),
'iframe_html': '\n'.join(iframe_htmls),
'site': site,
'url': make_src(urlpath.scheme, urlpath.netloc, urlpath.path, params),
})
return MAIN_PAGE % template_params
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
if environ['PATH_INFO'] == '/favicon.ico':
yield ''
else:
yield create_html(environ)
server = pywsgi.WSGIServer(('', 8090), application)
server.serve_forever()
| bsd-3-clause |
sdague/home-assistant | tests/components/homekit_controller/test_config_flow.py | 6 | 21662 | """Tests for homekit_controller config flow."""
from unittest import mock
import aiohomekit
from aiohomekit.model import Accessories, Accessory
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.helpers import device_registry
import tests.async_mock
from tests.async_mock import patch
from tests.common import MockConfigEntry, mock_device_registry
PAIRING_START_FORM_ERRORS = [
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error"),
(aiohomekit.UnavailableError, "already_paired"),
]
PAIRING_TRY_LATER_ERRORS = [
(aiohomekit.BusyError, "busy_error"),
(aiohomekit.MaxTriesError, "max_tries_error"),
(IndexError, "protocol_error"),
]
PAIRING_FINISH_FORM_ERRORS = [
(aiohomekit.exceptions.MalformedPinError, "authentication_error"),
(aiohomekit.MaxPeersError, "max_peers_error"),
(aiohomekit.AuthenticationError, "authentication_error"),
(aiohomekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error")
]
INVALID_PAIRING_CODES = [
"aaa-aa-aaa",
"aaa-11-aaa",
"111-aa-aaa",
"aaa-aa-111",
"1111-1-111",
"a111-11-111",
" 111-11-111",
"111-11-111 ",
"111-11-111a",
"1111111",
"22222222",
]
VALID_PAIRING_CODES = [
"114-11-111",
"123-45-679",
"123-45-679 ",
"11121111",
"98765432",
" 98765432 ",
]
def _setup_flow_handler(hass, pairing=None):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
finish_pairing = tests.async_mock.AsyncMock(return_value=pairing)
discovery = mock.Mock()
discovery.device_id = "00:00:00:00:00:00"
discovery.start_pairing = tests.async_mock.AsyncMock(return_value=finish_pairing)
flow.controller = mock.Mock()
flow.controller.pairings = {}
flow.controller.find_ip_by_device_id = tests.async_mock.AsyncMock(
return_value=discovery
)
return flow
@pytest.mark.parametrize("pairing_code", INVALID_PAIRING_CODES)
def test_invalid_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid pin code."""
with pytest.raises(aiohomekit.exceptions.MalformedPinError):
config_flow.ensure_pin_format(pairing_code)
@pytest.mark.parametrize("pairing_code", VALID_PAIRING_CODES)
def test_valid_pairing_codes(pairing_code):
"""Test ensure_pin_format corrects format for a valid pin in an alternative format."""
valid_pin = config_flow.ensure_pin_format(pairing_code).split("-")
assert len(valid_pin) == 3
assert len(valid_pin[0]) == 3
assert len(valid_pin[1]) == 2
assert len(valid_pin[2]) == 3
def get_flow_context(hass, result):
"""Get the flow context from the result of async_init or async_configure."""
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
return flow["context"]
def get_device_discovery_info(device, upper_case_props=False, missing_csharp=False):
"""Turn a aiohomekit format zeroconf entry into a homeassistant one."""
record = device.info
result = {
"host": record["address"],
"port": record["port"],
"hostname": record["name"],
"type": "_hap._tcp.local.",
"name": record["name"],
"properties": {
"md": record["md"],
"pv": record["pv"],
"id": device.device_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": 0x01, # record["sf"],
"sh": "",
},
}
if missing_csharp:
del result["properties"]["c#"]
if upper_case_props:
result["properties"] = {
key.upper(): val for (key, val) in result["properties"].items()
}
return result
def setup_mock_accessory(controller):
"""Add a bridge accessory to a test controller."""
bridge = Accessories()
accessory = Accessory.create_with_info(
name="Koogeek-LS1-20833F",
manufacturer="Koogeek",
model="LS1",
serial_number="12345",
firmware_revision="1.1",
)
service = accessory.add_service(ServicesTypes.LIGHTBULB)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = 0
bridge.add_accessory(accessory)
return controller.add_device(bridge)
@pytest.mark.parametrize("upper_case_props", [True, False])
@pytest.mark.parametrize("missing_csharp", [True, False])
async def test_discovery_works(hass, controller, upper_case_props, missing_csharp):
"""Test a device being discovered."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device, upper_case_props, missing_csharp)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"source": "zeroconf",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing doesn't error error and pairing results
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_abort_duplicate_flow(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Flag device as already paired
discovery_info["properties"]["sf"] = 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
async def test_id_missing(hass, controller):
"""Test id is missing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Remove id from device
del discovery_info["properties"]["id"]
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_properties"
async def test_discovery_ignored_model(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain=config_flow.HOMEKIT_BRIDGE_DOMAIN, data={})
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={
(
config_flow.HOMEKIT_BRIDGE_DOMAIN,
config_entry.entry_id,
config_flow.HOMEKIT_BRIDGE_SERIAL_NUMBER,
)
},
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
model=config_flow.HOMEKIT_BRIDGE_MODEL,
)
discovery_info["properties"]["id"] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_invalid_config_entry(hass, controller):
"""There is already a config entry for the pairing id but it's invalid."""
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
# And new config flow should continue allowing user to set up a new pairing
assert result["type"] == "form"
async def test_discovery_already_configured(hass, controller):
"""Already configured."""
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info["properties"]["sf"] = 0x00
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_TRY_LATER_ERRORS)
async def test_pair_try_later_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# User initiates pairing - device refuses to enter pairing mode but may be successful after entering pairing mode or rebooting
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["step_id"] == expected
assert result2["type"] == "form"
# Device is rebooted or placed into pairing mode as they have been instructed
# We start pairing again
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], user_input={"any": "key"}
)
# .. and successfully complete pair
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result4["type"] == "create_entry"
assert result4["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User gets back the form
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["errors"] == {}
# User re-tries entering pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = tests.async_mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = tests.async_mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
async def test_user_works(hass, controller):
"""Test user initiated disovers devices."""
setup_mock_accessory(controller)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": "user",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": "user",
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_no_devices(hass, controller):
"""Test user initiated pairing where no devices discovered."""
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_user_no_unpaired_devices(hass, controller):
"""Test user initiated pairing where no unpaired devices discovered."""
device = setup_mock_accessory(controller)
# Pair the mock device so that it shows as paired in discovery
finish_pairing = await device.start_pairing(device.device_id)
await finish_pairing(device.pairing_code)
# Device discovery is requested
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_unignore_works(hass, controller):
"""Test rediscovery triggered disovers work."""
device = setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": "unignore"},
data={"unique_id": device.device_id},
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "unignore",
}
# User initiates pairing by clicking on 'configure' - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing finalized
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_unignore_ignores_missing_devices(hass, controller):
"""Test rediscovery triggered disovers handle devices that have gone away."""
setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": "unignore"},
data={"unique_id": "00:00:00:00:00:01"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
| apache-2.0 |
falkolab/titanium_mobile | support/common/localecompiler.py | 32 | 5750 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Localization Compiler
#
import os, sys, codecs, shutil
from xml.dom.minidom import parse
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.join(template_dir,'../'))
from tiapp import *
ignoreFiles = ['.gitignore', '.cvsignore']
ignoreDirs = ['.git','.svn', 'CVS']
class LocaleCompiler(object):
def __init__(self,name,dir,platform,mode='simulator',outdir=None):
self.dir = os.path.join(dir,'i18n')
self.platform = platform
self.name = name
self.mode = mode
self.outdir = outdir
self.iphone_dir = os.path.join(dir,'build','iphone','build')
self.android_dir = os.path.join(dir,'build','android','res')
if self.outdir!=None:
self.android_dir = self.outdir
def get_locale(self,file):
return os.path.basename(os.path.dirname(file))
def get_ios_dir(self):
if self.outdir!=None: return self.outdir
if self.mode == 'development': # simulator
return os.path.join(self.iphone_dir,'Debug-iphonesimulator','%s.app' % self.name)
elif self.mode == 'test': # adhoc install
return os.path.join(self.iphone_dir,'Debug-iphoneos','%s.app' % self.name)
else: # distribution
return os.path.join(self.iphone_dir,'Release-iphoneos','%s.app' % self.name)
def getText(self,nodelist):
rc = u""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def isApp(self,file):
return (os.path.basename(file) == "app.xml")
def isStrings(self,file):
return (os.path.basename(file) == "strings.xml")
def localization_file_name_ios(self,file):
if self.isApp(file):
return "InfoPlist.strings"
return "Localizable.strings"
def compile_for_ios(self,file):
locale = self.get_locale(file)
build_dir = self.get_ios_dir()
lproj_dir = os.path.join(build_dir,'%s.lproj' % locale)
if not os.path.exists(lproj_dir): os.makedirs(lproj_dir)
locale_file = os.path.join(lproj_dir,self.localization_file_name_ios(file))
f = codecs.open(locale_file,'w','utf-16')
f.write(u'/**\n * Appcelerator Titanium\n * this is a generated file - DO NOT EDIT\n */\n\n')
dom = parse(file)
appkeys = { 'appname' : 'CFBundleDisplayName' }
for node in dom.documentElement.childNodes:
if node.nodeType != 1: continue
name = node.attributes['name'].nodeValue
if self.isApp(file):
name = appkeys[name]
if name is None:
pass
value = self.getText(node.childNodes)
# TODO: translate any more symbols?
value = value.replace("%s",'%@')
f.write(u'"%s" = "%s";\n' % (name,value))
f.close()
if self.mode!='development': #only compile if not simulator
os.system("/usr/bin/plutil -convert binary1 \"%s\"" % locale_file)
print "[DEBUG] compiled ios file: %s" % locale_file
def compile_for_android(self,file):
#TODO: Add android support for app.xml
if not self.isStrings(file):
return
locale = self.get_locale(file)
# for andoird, we can simply copy into the right directory
if locale == 'en' or locale.lower() == 'en-us':
dir = os.path.join(self.android_dir,'values')
else:
if len(locale) == 5 and locale[2] == '-':
# Android en-US -> en-rUS (need the r)
locale = locale[0:3] + 'r' + locale[-2:]
dir = os.path.join(self.android_dir,'values-%s' % locale)
to_ = os.path.join(dir,'strings.xml')
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy(file, to_)
#
# Merge strings.xml from /i18n/ and build/android/res/values/
# (TIMOB-12663)
#
elif os.path.isfile(to_):
sfile = open(file, 'r')
dfile = open(to_, 'r')
scontent = sfile.read()
dcontent = dfile.read()
sfile.close()
dfile.close()
sindex = scontent.find('</resources>')
dindex = dcontent.find('>', dcontent.find('<resources')) + 1
content_to_write = scontent[:sindex] + dcontent[dindex:]
wfile = open(to_, 'w')
wfile.write(content_to_write)
wfile.close()
else:
shutil.copy(file, to_)
print "[DEBUG] compiled android file: %s" % file
def compile(self):
if not os.path.exists(self.dir): return
print "[INFO] Compiling localization files"
sys.stdout.flush()
for dirname,dirs,files in os.walk(self.dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for f in files:
if f in ignoreFiles: continue
if not f.endswith('.xml'): continue
file = os.path.join(dirname,f)
if self.platform == 'ios' or self.platform == 'iphone' or self.platform == 'ipad' or self.platform == 'universal':
self.compile_for_ios(file)
elif self.platform == 'android':
self.compile_for_android(file)
elif self.platform == 'blackberry':
# TODO
pass
if __name__ == "__main__":
if len(sys.argv)==1 or len(sys.argv) < 3:
print "Appcelerator Locale Compiler"
print "Usage: %s <project_dir> <platform> [mode] [outdir]" % os.path.basename(sys.argv[0])
sys.exit(1)
path = os.path.expanduser(sys.argv[1])
if not os.path.exists(path):
print "Project directory not found: %s" % path
sys.exit(1)
tiapp_xml_path = os.path.join(path,'tiapp.xml')
if not os.path.exists(tiapp_xml_path):
print "Project directory doesn't look like a valid Titanium project: %s" % path
sys.exit(1)
resources_dir = os.path.join(path,'Resources')
if not os.path.exists(resources_dir):
print "Project directory doesn't look like a valid Titanium project: %s" % path
sys.exit(1)
platform = sys.argv[2]
tiapp = TiAppXML(tiapp_xml_path)
app_name = tiapp.properties['name']
mode = 'simulator'
outdir = None
if len(sys.argv) > 3:
mode = sys.argv[3]
if len(sys.argv) > 4:
outdir = os.path.expanduser(sys.argv[4])
c = LocaleCompiler(app_name,path,platform,mode,outdir)
c.compile()
| apache-2.0 |
nutszebra/trainer | nutszebra_utility.py | 1 | 25948 | from __future__ import division, print_function, absolute_import, unicode_literals
import os
import re
import six
import sys
import mmap
import json
from tqdm import tqdm
import threading
import multiprocessing
import subprocess
import itertools
import six.moves.cPickle as pickle
import collections
from operator import itemgetter
from os.path import expanduser
from six.moves.urllib_parse import urlparse
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout=3600):
def target():
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
class Utility(object):
"""Some utility functions are defined here
Attributes:
alphabet_lowercase (list): store all alphabet in lowercase
alphabet_capital (list): store all alphabet in capital
numbers (list): store all digits in string
reg_text (list): regular expression to search for text file
reg_jpg (list): regular expression to search for jpg
reg_png (list): regular expression to search for png
reg_pickle (list): regular expression to search for pickle
reg_json (list): regular expression to search for json
home (str): home, e.g. /home/nutszebra
nutszebra_path (list): path to nutszebra_utility
inf (inf): inf
"""
alphabet_lowercase = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
alphabet_capital = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']
reg_text = [r'.*\.txt$', r'.*\.text$', r'.*\.TEXT$']
reg_jpg = [r'.*\.jpg$', r'.*\.jpeg$', r'.*\.JPEG$', r'.*\.Jpeg$']
reg_png = [r'.*\.png$', r'.*\.Png$', r'.*\.PNG$']
reg_pickle = [r'.*\.pkl$', r'.*\.pickle$', r'.*\.PKL$', r'.*\.Pickle$']
reg_json = [r'.*\.json$', r'.*\.Json$', r'.*\.JSON$']
home = expanduser("~")
nutszebra_path = './'
inf = float('inf')
def __init__(self):
pass
@staticmethod
def slice_and_paste_prefix(seq, separator='/', start=0, end=None, prefix=''):
"""Slice strings and then put prefix
Edited date:
160606
Example:
::
seq = u'/home/ubuntu/img/shoes/sneakers/m693084193.jpg'
answer = self.slice_and_paste_prefix(seq, separator='/', start=0, end=None, prefix='')
>>> print(answer)
u'/home/ubuntu/img/shoes/sneakers/m693084193.jpg'
answer = self.slice_and_paste_prefix(seq, separator='/', start=1, end=None, prefix='')
>>> print(answer)
u'home/ubuntu/img/shoes/sneakers/m693084193.jpg'
answer = self.slice_and_paste_prefix(seq, separator='/', start=2, end=None, prefix='')
>>> print(answer)
u'ubuntu/img/shoes/sneakers/m693084193.jpg'
answer = self.slice_and_paste_prefix(seq, separator='/', start=-3, end=None, prefix='/home/suguru/aws_CodeNext/fashion/mercari/')
>>> print(answer)
u'/home/suguru/aws_CodeNext/fashion/mercari/shoes/sneakers/m693084193.jpg'
Args:
seq (str): strings to slice
separator (str): separator to slplit seq
start (str): the index to start slice
end (Optional[int, None]): the index to end slice
prefix (str): prefix is added to sliced strings
Returns:
str: sliced sequence with prefix
"""
tmp = seq.split(separator)
if end is None:
return prefix + separator.join(tmp[start:end])
else:
return prefix + separator.join(tmp[start:end])
@staticmethod
def flat_two_dimensional_list(data):
# http://d.hatena.ne.jp/xef/20121027/p2
return list(itertools.chain.from_iterable(data))
@staticmethod
def pwd():
return os.path.abspath('./')
@staticmethod
def input():
return six.moves.input()
@staticmethod
def input_cast(cast=int):
return six.moves.map(cast, Utility.input().split())
def input_multiple_lines(self):
# http://nemupm.hatenablog.com/entry/2015/01/03/234840
return sys.stdin.readlines()
@staticmethod
def set_recursion_limit(n=1 * 10 ** 8):
sys.setrecursionlimit(n)
return True
@staticmethod
def get_recursion_limit():
return sys.getrecursionlimit()
@staticmethod
def download_file(url, destination, file_name, timeout=3600, header='--header="Accept: text/html" --user-agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0" '):
"""Download file
Edited date:
160429
Test:
160429
Example:
::
self.download_file('http://img5.zozo.jp/goodsimages/502/11989502/11989502B_16_D_500.jpg', '.', 'test.jpg', timeout=10)
Args:
url (str): url to download
destination (str): put the downloaded file onto the destination
file_name (str): file name
timeout (Optional[int, float]): If downloading is not finished in timeout seconds, stop downloading
header (Optional[str]): header for wget
"""
if destination[-1] is not '/':
destination = destination + '/'
cmd = 'wget ' + header + url + ' -O ' + destination + file_name + ' -q'
command = Command(cmd)
command.run(timeout=timeout)
def untar_gz(self, tar_gz_file, destination='./', timeout=3600):
"""Untar gz file
Edited date:
160429
Test:
160429
Example:
::
self.untra_gz('./test.gz', '.', timeout=10)
Args:
tar_gz_file (str): gz file
destination (str): untar gz file onto the destination
file_name (str): file name
timeout (Optional[int, float]): If untar_gz is not finished in timeout seconds, stop untra_gz
"""
cmd = 'tar -zxvf ' + tar_gz_file + ' -C ' + destination
command = Command(cmd)
command.run(timeout=timeout)
def tar_gz(self, tar_gz_file, destination='./', timeout=3600):
"""tar gz file
Edited date:
160429
Test:
160429
Example:
::
self.untra_gz('./test.gz', '.', timeout=10)
Args:
tar_gz_file (str): file to apply tar command
destination (str): file that tar is applied onto the destination
file_name (str): file name
timeout (Optional[int, float]): If untar_gz is not finished in timeout seconds, stop untra_gz
"""
cmd = 'tar -zcvf ' + tar_gz_file + '.tar.gz ' + tar_gz_file + ' -C ' + destination
command = Command(cmd)
command.run(timeout=timeout)
def url_to_directory(self, url, whole_flag=False):
"""Convert url to directory name
Edited date:
160429
Test:
160429
Example:
::
answer = self.url_to_directory('http://img5.zozo.jp/goodsimages/502/11989502/11989502B_16_D_500.jpg')
>>> print(answer)
'___a___goodsimages___a___502___a___11989502___a___11989502B_16_D_500___b___jpg'
answer = self.url_to_directory('http://img5.zozo.jp/goodsimages/502/11989502/11989502B_16_D_500.jpg', whole_flag=True)
>>> print(answer)
'http___e______a______a___img5___b___zozo___b___jp___a___goodsimages___a___502___a___11989502___a___11989502B_16_D_500___b___jpg
Args:
url (str): url to convert to directory name
whole_flag (bool): if whole_flag is True, whole url will be converted to the directory name.
Returns:
str: converted url
"""
if whole_flag is True:
return self._url_to_directory(url)
scheme, netloc, path, params, query, fragment = urlparse(url)
# ___a___: /
# ___b___: .
# ___c___: ?
# ___d___: =
# ___e___: :
name = ''
name += self._url_to_directory(path)
if query:
name += '___c___' + self._url_to_directory(query)
return name
def _url_to_directory(self, sentence):
"""Private method for self.url_to_directory
Edited date:
160429
Args:
sentence (str): sentence to be converted
Returns:
str: converted sentence
"""
# ___a___: /
# ___b___: .
# ___c___: ?
# ___d___: =
# ___e___: :
return sentence.replace('/', '___a___').replace('.', '___b___').replace('?', '___c___').replace('=', '___d___').replace(':', '___e___')
def directory_to_url(self, directory):
"""Convert directory to url
Edited date:
160429
Test:
160429
Example:
::
answer = self.url_to_directory('http://img5.zozo.jp/goodsimages/502/11989502/11989502B_16_D_500.jpg', whole_flag=True)
>>> print(answer)
'http___e______a______a___img5___b___zozo___b___jp___a___goodsimages___a___502___a___11989502___a___11989502B_16_D_500___b___jpg
answer = self.directory_to_url(answer)
>>> print(answer)
'http://img5.zozo.jp/goodsimages/502/11989502/11989502B_16_D_500.jpg
Args:
directory (str): the directory name that was converted by self.url_to_directory
Returns:
str: converted directory
"""
# ___a___: /
# ___b___: .
# ___c___: ?
# ___d___: =
name = directory.split('/')[-1]
return self._directory_to_url(name)
def _directory_to_url(self, sentence):
"""Private method for self.directory_to_url
Edited date:
160429
Args:
sentence (str): sentence to be converted
Returns:
str: converted sentence
"""
# ___a___: /
# ___b___: .
# ___c___: ?
# ___d___: =
# ___e___: :
return sentence.replace('___e___', ':').replace('___d___', '=').replace('___c___', '?').replace('___b___', '.').replace('___a___', '/')
def copy_file(self, src, dest, sudo=True):
if sudo:
cmd = 'sudo cp ' + src + ' ' + dest
else:
cmd = 'cp ' + src + ' ' + dest
subprocess.call(cmd, shell=True)
@staticmethod
def create_progressbar(end, desc='', stride=1, start=0):
return tqdm(six.moves.range(int(start), int(end), int(stride)), desc=desc, leave=False)
@staticmethod
def log_progressbar(sentence):
tqdm.write(sentence)
return True
@staticmethod
def make_dir_one(path):
"""Make one directory
Examples:
::
path = './test'
self.make_dir_one(path)
Args:
path (str): path to dir that you'd like to create
Returns:
bool: True if successful, False otherwise
"""
if not os.path.exists(path):
os.makedirs(path)
return True
return False
@staticmethod
def make_dir(path):
separated_path = path.split('/')
tmp_path = ''
for directory in separated_path:
tmp_path = tmp_path + directory + '/'
if directory == '.':
continue
Utility.make_dir_one(tmp_path)
return True
@staticmethod
def remove_file(path):
"""Remove file
Examples:
::
path = './test.text'
self.remove_file(path)
Args:
path (str): path to file that you'd like to delete
Returns:
bool: True if successful, False otherwise
"""
if os.path.exists(path):
os.remove(path)
return True
return False
def remove_dir(self, path):
"""Remove directory
Examples:
::
path = './test'
self.remove_dir(path)
Args:
path (str): path to directory that you'd like to delete
Returns:
bool: True if successful, False otherwise
"""
if os.path.exists(path):
os.remove(path)
return True
return False
@staticmethod
def find_files_recursively(base):
"""Get all files recursively beneath base path
Edited date:
160628
Test:
160628
Note:
directory is disregarded
Examples:
::
base = './'
files = self.find_files_recursively(base)
>>> print(files)
['./nutszebra.py',
'./nutszebra_gmail.py',
'./nutszebra_preprocess.py'
...
]
Args:
base (str): base path
Returns:
list: return the list that store all files name
"""
if base[-1] == '/':
base = base[:-1]
all_files = []
walk = os.walk(base, topdown=True, onerror=None, followlinks=False)
for dirpath, dirs, files in walk:
for f in files:
all_files.append(dirpath + '/' + f)
return all_files
@staticmethod
def find_files(path, affix_flag=False):
"""Get all path of files; this function is not recursive
Edited date:
160628
Test:
160628
Note:
directories are included
Examples:
::
path = './'
files = self.find_files(path)
>>> print(files)
['./nutszebra.py',
'./nutszebra_gmail.py',
'./nutszebra_preprocess.py'
...
]
files = self.find_files(path, affix_flag=True)
>>> print(files)
['nutszebra.py',
'nutszebra_gmail.py',
'nutszebra_preprocess.py'
...
]
Args:
path (str): path
affix_flag (Optional[True, False]): if True, only file or directory names are stored
Returns:
list: return list that stores file name paths
"""
if path[-1] == '/':
path = path[:-1]
if affix_flag is False:
return [path + '/' + name for name in os.listdir(path)]
else:
return [name for name in os.listdir(path)]
@staticmethod
def reg_extract(files, reg, casting=None):
"""Extract elements by resular expression
Edited date:
160628
Test:
160628
Note:
| reg can be:
| nutszebra_utility.Utility.reg_jpg
| nutszebra_utility.Utility.reg_png
| nutszebra_utility.Utility.reg_text
| nutszebra_utility.Utility.reg_json
| nutszebra_utility.Utility.reg_pickle
Examples:
::
data = ['1.jpg', '3.png', '.4.swp', 'hi', '10.jpeg', '2.Jpeg', '4.JPEG', '5.PNG', '6.Png', 'gomi', 'hoge', '1.txt', '2.TEXT', '3.text', '5.Json', '6.JSON', '7.json', 'hyohyo', '10.Pickle', 'a', '1.pickle', '11.Pickle']
>>> print(self.reg_extract(data, self.reg_jpg))
['1.jpg', '10.jpeg', '4.JPEG', '2.Jpeg']
>>> print(self.reg_extract(data, self.reg_png))
['3.png', '6.Png', '5.PNG']
>>> print(self.reg_extract(data, self.reg_text))
['1.txt', '3.text', '2.TEXT']
>>> print(self.reg_extract(data, self.reg_json))
['7.json', '5.Json', '6.JSON']
>>> print(self.reg_extract(data, self.reg_pickle))
['1.pickle', '10.Pickle', '11.Pickle']
data = ['value', '0.001', 'value1', '3.23', '4', 'end']
reg = [r'^\d+\.\d+$|^\d+$']
>>> print(self.reg_extract(data, reg))
['0.001', '3.23', '4']
>>> print(self.reg_extract(data, reg, self.cast_int))
[0, 3, 4]
>>> print(self.reg_extract(data, reg, self.cast_float))
[0.001, 3.23, 4.0]
Args:
files (list): files to extract
reg (list): regular expression
casting Optional([nutszebra_utility.Utility.cast_int, nutszebra_utility.Utility.cast_float, nutszebra_utility.Utility.cast_str, None]): extracted elements can be casted
Returns:
list: extracted elements
"""
if casting is None:
return [ff for r in reg for f in files for ff in re.findall(r, f)]
else:
return [casting(ff)[0] for r in reg for f in files for ff in re.findall(r, f)]
@staticmethod
def cast_int(array):
"""Convert values inside array to int
Edited date:
160628
Test:
160628
Examples:
::
data = [1.0, 2.0, 3.0]
>>> print(self.cast_int(data))
[1, 2, 3]
Args:
array (list): array that contains value to be converted
Returns:
list: converted array
"""
if type(array) is str or not isinstance(array, collections.Iterable):
array = [array]
# int('1.0') gives error, thus float first
return [int(float(num)) for num in array]
@staticmethod
def cast_float(array):
"""Convert values inside array to float
Edited date:
160628
Test:
160628
Examples:
::
data = [1, 2, 3]
>>> print(self.cast_float(data))
[1.0, 2.0, 3.0]
Args:
array (list): array that contains value to be converted
Returns:
list: converted array
"""
if type(array) is str or not isinstance(array, collections.Iterable):
array = [array]
return [float(num) for num in array]
@staticmethod
def cast_str(array):
"""Convert values inside array to str
Edited date:
160628
Test:
160628
Examples:
::
data = [1, 2, 3]
>>> print(self.cast_str(data))
['1', '2', '3']
Args:
array (list): array that contains value to be converted
Returns:
list: converted array
"""
if type(array) is str or not isinstance(array, collections.Iterable):
array = [array]
return [str(num) for num in array]
@staticmethod
def save_pickle(data, path):
"""Save as pickle
Edited date:
160628
Test:
160628
Examples:
::
data = [numpy.zeros((10,10)),numpy.zeros((10,10))]
path = 'test.pickle'
self.save_pickle(data, path)
Args:
data : data to save
path (str): path
Returns:
bool: True if successful, False otherwise
"""
with open(path, 'wb') as f:
pickle.dump(data, f)
return True
@staticmethod
def load_pickle(path, encoding=None):
"""Load pickle
Edited date:
160628
Test:
160628
Examples:
::
path = 'test.pickle'
data = self.load_pickle(path)
Args:
path (str): pickle file
Returns:
loaded pickle
"""
with open(path, 'rb') as f:
if encoding is None:
answer = pickle.load(f)
else:
answer = pickle.load(f, encoding=encoding)
return answer
@staticmethod
def save_json(data, path, jap=False):
"""Save as json
Edited date:
160628
Test:
160628
Examples:
::
data = {'a':1, 'b':2}
path = 'test.json'
self.save_json(data, path)
Args:
data (dict): data to save
path (str): path
Returns:
bool: True if successful, False otherwise
"""
with open(path, 'w') as f:
json.dump(data, f, ensure_ascii=not jap)
return True
@staticmethod
def load_json(path):
"""Load json
Edited date:
160628
Test:
160628
Examples:
::
path = 'test.json'
data = self.load_json(path)
Args:
path (str): json file
Returns:
dict: loaded json
"""
with open(path, 'r') as f:
answer = json.load(f)
return answer
@staticmethod
def count_line(path):
"""count lines of text
Edited date:
160626
Test:
160626
Examples:
::
path = 'test.text'
self.count_line(text)
Args:
path (str): text file
Returns:
int: number of lines
"""
f = os.open(path, os.O_RDONLY)
buf = mmap.mmap(f, 0, prot=mmap.PROT_READ)
answer = 0
readline = buf.readline
while readline():
answer += 1
buf.close()
return int(answer)
@staticmethod
def save_text(data, output):
"""Save as text
Edited date:
160626
Test:
160626
Examples:
::
data = ['this', 'is', 'test']
output = 'test.text'
self.save_text(data, output)
Args:
data (list): data to save
output (str): output name
Returns:
bool: True if successful, False otherwise
"""
if not type(data) == list:
data = [data]
with open(output, 'w') as f:
for i in six.moves.range(len(data)):
f.write(data[i] + '\n')
return True
@staticmethod
def load_text(path, count=False):
"""Load text
Edited date:
160626
Test:
160626
Note:
| load big file
| without count: 394.2885489463806
| count: 529.5308997631073
Examples:
::
path = 'test.text'
count = True
data = self.load_text(path)
Args:
path (str): json file
count (bool): if True, firstly count lines of files and initialized list
Returns:
list: content inside text
"""
if count is True:
num = Utility.count_line(path)
content = [0] * num
progressbar = Utility.create_progressbar(num, 'loading text...')
for i, line in six.moves.zip(progressbar, Utility.yield_text(path)):
content[i] = line
return content
else:
content = []
for line in Utility.yield_text(path):
content.append(line)
return content
@staticmethod
def yield_text(path, cast=str):
"""Yield each line of text
Edited date:
160626
Test:
160626
Note:
\n at the end of lines are removed
Args:
path (str): path to text file
cast Optional([int, float, str,...]): casting
Yields:
str: one line of text
"""
with open(path, 'r') as f:
line = f.readline()
while line:
# remove \n
yield cast(line[:-1])
line = f.readline()
@staticmethod
def _itemgetter(order):
"""Give itemgetter for sort
Edited date:
160626
Test:
160626
Args:
order (tuple): sort order is stored
Returns:
function: itemgetter
"""
if type(order) is float:
order = int(order)
if type(order) is int:
order = [order]
return itemgetter(*order)
@staticmethod
def sort_list(array, key=(0), reverse=False):
"""Sort tuples in array
Edited date:
160626
Test:
160626
Examples:
::
data = [('a', 1), ('b', 5), ('c', 2)]
sorted_list = self.sort_list(data, key=(1))
>>> print(sorted_list)
[('a', 1), ('c', 2), ('b', 5)]
data = [('02', 2), ('02', 1), ('01', 3)]
sorted_list = self.sort_list(data, key=(0, 1))
>>> print(sorted_list)
[('01', 3), ('02', 1), ('02', 2)]
Args:
array (list): data to sort
key (int): sort by array[i][key]
reverse (bool): reverse flag
Returns:
list: sorted list
"""
return sorted(array, key=Utility._itemgetter(key), reverse=reverse)
@staticmethod
def get_cpu_number():
"""Get cpu number
Edited date:
160620
Test:
160626
Example:
::
test = self.get_cpu_number()
>>> print(test)
2
int: cpu number
"""
return int(multiprocessing.cpu_count())
| mit |
mancoast/CPythonPyc_test | fail/301_test_functools.py | 3 | 12512 | import functools
import unittest
from test import support
from weakref import proxy
@staticmethod
def PythonPartial(func, *args, **keywords):
'Pure Python approximation of partial()'
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
class TestPartial(unittest.TestCase):
thetype = functools.partial
def test_basic_examples(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.thetype(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.thetype(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
# attributes should not be writable
if not isinstance(self.thetype, type):
return
self.assertRaises(TypeError, setattr, p, 'func', map)
self.assertRaises(TypeError, setattr, p, 'args', (1, 2))
self.assertRaises(TypeError, setattr, p, 'keywords', dict(a=1, b=2))
def test_argument_checking(self):
self.assertRaises(TypeError, self.thetype) # need at least a func arg
try:
self.thetype(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.thetype(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.thetype(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.thetype(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.thetype(capture, a=1)
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.thetype(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.failUnless(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.thetype(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.failUnless(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.thetype(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.failUnless(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.failUnless(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.thetype(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.thetype(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.thetype(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.thetype(f, y=0), 1)
def test_attributes(self):
p = self.thetype(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_weakref(self):
f = self.thetype(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.thetype(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.thetype(''.join)
self.assertEqual(join(data), '0123456789')
class PartialSubclass(functools.partial):
pass
class TestPartialSubclass(TestPartial):
thetype = PartialSubclass
class TestPythonPartial(TestPartial):
thetype = PythonPartial
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.failUnless(getattr(wrapper, name) is getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
self.failUnless(wrapped_attr[key] is wrapper_attr[key])
def test_default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f)
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__doc__, 'This is a test')
self.assertEqual(wrapper.attr, 'This is also a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.failIf(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assert_(wrapper.__doc__.startswith('max('))
class TestWraps(TestUpdateWrapper):
def test_default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f)
def wrapper():
pass
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__doc__, 'This is a test')
self.assertEqual(wrapper.attr, 'This is also a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.failIf(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
func = functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
self.assertEqual(self.func(lambda x, y: x+y, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(lambda x, y: x+y, Squares(10)), 285)
self.assertEqual(self.func(lambda x, y: x+y, Squares(10), 0), 285)
self.assertEqual(self.func(lambda x, y: x+y, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
def test_main(verbose=None):
import sys
test_classes = (
TestPartial,
TestPartialSubclass,
TestPythonPartial,
TestUpdateWrapper,
TestWraps,
TestReduce
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == '__main__':
test_main(verbose=True)
| gpl-3.0 |
mpkato/interleaving | tests/test_probabilistic_interleave.py | 1 | 2343 | import interleaving as il
import numpy as np
from collections import defaultdict
from .test_methods import TestMethods
class TestProbabilisticInterleave(TestMethods):
n = 5000 # Number of times of probabilistic tests
def test_sanity(self):
assert il.Probabilistic([[0], [0]]).interleave() == [0]
def test_uniform(self):
ideal = 0.5
counts = [0.0, 0.0]
pm = il.Probabilistic([[0], [1]])
for i in range(self.n):
r = pm.interleave()
counts[r[0]] += 1
for j in [0, 1]:
self.assert_almost_equal(ideal, counts[j] / self.n)
def test_ranking_with_teams(self):
result = defaultdict(int)
pm = il.Probabilistic([[1, 2, 3], [2, 3, 1]])
for i in range(self.n):
result[pm.interleave()] += 1
assert len(result) == 6
def test_memorylessness(self):
result = []
pm = il.Probabilistic([[0, 1], [2, 3]])
for i in range(self.n):
result.extend(pm.interleave())
result = list(set(result))
result.sort()
assert result == [0, 1, 2, 3]
def test_softmax(self):
ideals = {0: 0.86056, 1: 0.10757, 2: 0.03187}
counts = {}
for d in ideals:
counts[d] = 0.0
pm = il.Probabilistic([[0, 1, 2], [0, 1, 2]])
for i in range(self.n):
counts[pm.interleave()[0]] += 1
for d in ideals:
self.assert_almost_equal(ideals[d], counts[d] / self.n)
def test_interaction(self):
ideals = {0: 0.44444, 1: 0.50000, 2: 0.05556}
counts = {}
for d in ideals:
counts[d] = 0.0
pm = il.Probabilistic([[0, 1], [1, 2]])
for i in range(self.n):
counts[pm.interleave()[0]] += 1
for d in ideals:
self.assert_almost_equal(ideals[d], counts[d] / self.n)
def test_uniqueness(self):
pm = il.Probabilistic([[0, 1, 2], [1, 2, 0]])
for i in range(self.n):
ranking = pm.interleave()
ranking.sort()
uniq_ranking = list(set(ranking))
uniq_ranking.sort()
assert ranking == uniq_ranking
def test_no_shortage(self):
rankings = [[0, 1], [0, 1, 2]]
pm = il.Probabilistic(rankings)
assert 2 == len(pm.interleave())
| mit |
toladata/TolaProfile | userprofile/tasks.py | 1 | 2735 | #The tasks goes here
from __future__ import absolute_import, unicode_literals
from celery import shared_task
import requests
import urlparse
import json
#sync with TolaWork
@shared_task
def sync_create_with_tolawork(data, url){
server_name = "tola.work"
tola_work_url = urlparse.urljoin(url, server_name)
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.post(tola_work_url,json.dumps(data),headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
@shared_task
def sync_update_with_tolawork(data, url){
server_name = "tola.work"
tola_work_url = urlparse.urljoin(url, server_name)
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.post(tola_work_url,json.dumps(data),headers=header)
if api_response.status_code == // 100 == 2:
return True,
return False
}
@shared_task
def sync_delete_with_tolawork(data, url){
server_name = "tola.work"
tola_work_url = urlparse.urljoin(url+"/", server_name)
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.delete(tola_work_url+'/'+data.id,headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
#sync with TolaActivity
@shared_task
def sync_create_with_activity(data, url){
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.post(url,json.dumps(data),headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
@shared_task
def sync_update_with_activity(data, url){
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.post(url,json.dumps(data),headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
@shared_task
def sync_delete_with_activity(data, url){
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.delete(url+'/'+data.id,headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
#sync with TolaTables
@shared_task
def sync_create_with_table(data, url){
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.post(url,json.dumps(data),headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
@shared_task
def sync_delete_with_table(data, url){
token = ''
header = {'Authorization': 'token %s' % token}
api_response = requests.delete(url+'/'+data.id,headers=header)
if api_response.status_code // 100 == 2:
return True,
return False
}
| apache-2.0 |
stonegithubs/odoo | addons/mail/tests/test_mail_group.py | 143 | 3939 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .common import TestMail
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestMailGroup(TestMail):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_mail_group_access_rights(self):
""" Testing mail_group access rights and basic mail_thread features """
cr, uid, user_noone_id, user_employee_id = self.cr, self.uid, self.user_noone_id, self.user_employee_id
# Do: Bert reads Jobs -> ok, public
self.mail_group.read(cr, user_noone_id, [self.group_jobs_id])
# Do: Bert read Pigs -> ko, restricted to employees
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_noone_id, [self.group_pigs_id])
# Do: Raoul read Pigs -> ok, belong to employees
self.mail_group.read(cr, user_employee_id, [self.group_pigs_id])
# Do: Bert creates a group -> ko, no access rights
with self.assertRaises(AccessError):
self.mail_group.create(cr, user_noone_id, {'name': 'Test'})
# Do: Raoul creates a restricted group -> ok
new_group_id = self.mail_group.create(cr, user_employee_id, {'name': 'Test'})
# Do: Bert added in followers, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [new_group_id], [user_noone_id])
self.mail_group.read(cr, user_noone_id, [new_group_id])
# Do: Raoul reads Priv -> ko, private
with self.assertRaises(except_orm):
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul added in follower, read -> ok, in followers
self.mail_group.message_subscribe_users(cr, uid, [self.group_priv_id], [user_employee_id])
self.mail_group.read(cr, user_employee_id, [self.group_priv_id])
# Do: Raoul write on Jobs -> ok
self.mail_group.write(cr, user_employee_id, [self.group_priv_id], {'name': 'modified'})
# Do: Bert cannot write on Private -> ko (read but no write)
with self.assertRaises(AccessError):
self.mail_group.write(cr, user_noone_id, [self.group_priv_id], {'name': 're-modified'})
# Test: Bert cannot unlink the group
with self.assertRaises(except_orm):
self.mail_group.unlink(cr, user_noone_id, [self.group_priv_id])
# Do: Raoul unlinks the group, there are no followers and messages left
self.mail_group.unlink(cr, user_employee_id, [self.group_priv_id])
fol_ids = self.mail_followers.search(cr, uid, [('res_model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(fol_ids, 'unlinked document should not have any followers left')
msg_ids = self.mail_message.search(cr, uid, [('model', '=', 'mail.group'), ('res_id', '=', self.group_priv_id)])
self.assertFalse(msg_ids, 'unlinked document should not have any followers left')
| agpl-3.0 |
woobe/h2o | py/testdir_release/c4/test_c4_four_billion_rows_fvec.py | 2 | 4142 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_glm, h2o_browse as h2b, h2o_import as h2i, h2o_common, h2o_exec as h2e
print "Assumes you ran ../build_for_clone.py in this directory"
print "Using h2o-nodes.json. Also the sandbox dir"
class releaseTest(h2o_common.ReleaseCommon, unittest.TestCase):
def test_four_billion_rows_fvec(self):
h2o.beta_features = True
timeoutSecs = 1500
importFolderPath = "billions"
csvFilenameList = [
"four_billion_rows.csv",
]
for csvFilename in csvFilenameList:
csvPathname = importFolderPath + "/" + csvFilename
start = time.time()
# Parse*********************************
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
timeoutSecs=timeoutSecs, pollTimeoutSecs=180)
elapsed = time.time() - start
print "Parse result['destination_key']:", parseResult['destination_key']
print csvFilename, "completed in", elapsed, "seconds.", "%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# Inspect*********************************
# We should be able to see the parse result?
inspect = h2o_cmd.runInspect(key=parseResult['destination_key'])
numCols = inspect['numCols']
numRows = inspect['numRows']
byteSize = inspect['byteSize']
print "\n" + csvFilename, \
" numRows:", "{:,}".format(numRows), \
" numCols:", "{:,}".format(numCols), \
" byteSize:", "{:,}".format(byteSize)
expectedRowSize = numCols * 1 # plus output
# expectedValueSize = expectedRowSize * numRows
expectedValueSize = 8001271520
self.assertEqual(byteSize, expectedValueSize,
msg='byteSize %s is not expected: %s' % \
(byteSize, expectedValueSize))
summaryResult = h2o_cmd.runSummary(key=parseResult['destination_key'], timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
self.assertEqual(2, numCols,
msg="generated %s cols (including output). parsed to %s cols" % (2, numCols))
self.assertEqual(4*1000000000, numRows,
msg="generated %s rows, parsed to %s rows" % (4*1000000000, numRows))
# KMeans*********************************
kwargs = {
'k': 3,
'initialization': 'Furthest',
'max_iter': 4,
'normalize': 0,
'destination_key': 'junk.hex',
'seed': 265211114317615310,
}
timeoutSecs = 900
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
# GLM*********************************
print "\n" + csvFilename
kwargs = {
'response': 'C1',
'n_folds': 0,
'family': 'binomial',
}
# one coefficient is checked a little more
colX = 1
# convert to binomial
execExpr="A.hex=%s" % parseResult['destination_key']
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
execExpr="A.hex[,%s]=(A.hex[,%s]==%s)" % ('C1', 'C1', 1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=180)
aHack = {'destination_key': "A.hex"}
# L2
timeoutSecs = 900
kwargs.update({'alpha': 0, 'lambda': 0})
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "glm (L2) end on ", csvFilename, 'took', elapsed, 'seconds.', "%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_glm.simpleCheckGLM(self, glm, "C" + str(colX), **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
davehunt/kuma | vendor/packages/logilab/astng/raw_building.py | 25 | 13572 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""this module contains a set of functions to create astng trees from scratch
(build_* functions) or from living object (object_build_* functions)
"""
__docformat__ = "restructuredtext en"
import sys
from os.path import abspath
from inspect import (getargspec, isdatadescriptor, isfunction, ismethod,
ismethoddescriptor, isclass, isbuiltin)
from logilab.astng.node_classes import CONST_CLS
from logilab.astng.nodes import (Module, Class, Const, const_factory, From,
Function, EmptyNode, Name, Arguments)
from logilab.astng.bases import BUILTINS, Generator
from logilab.astng.manager import ASTNGManager
MANAGER = ASTNGManager()
_CONSTANTS = tuple(CONST_CLS) # the keys of CONST_CLS eg python builtin types
def _attach_local_node(parent, node, name):
node.name = name # needed by add_local_node
parent.add_local_node(node)
_marker = object()
def attach_dummy_node(node, name, object=_marker):
"""create a dummy node and register it in the locals of the given
node with the specified name
"""
enode = EmptyNode()
enode.object = object
_attach_local_node(node, enode, name)
EmptyNode.has_underlying_object = lambda self: self.object is not _marker
def attach_const_node(node, name, value):
"""create a Const node and register it in the locals of the given
node with the specified name
"""
if not name in node.special_attributes:
_attach_local_node(node, const_factory(value), name)
def attach_import_node(node, modname, membername):
"""create a From node and register it in the locals of the given
node with the specified name
"""
from_node = From(modname, [(membername, None)])
_attach_local_node(node, from_node, membername)
def build_module(name, doc=None):
"""create and initialize a astng Module node"""
node = Module(name, doc, pure_python=False)
node.package = False
node.parent = None
return node
def build_class(name, basenames=(), doc=None):
"""create and initialize a astng Class node"""
node = Class(name, doc)
for base in basenames:
basenode = Name()
basenode.name = base
node.bases.append(basenode)
basenode.parent = node
return node
def build_function(name, args=None, defaults=None, flag=0, doc=None):
"""create and initialize a astng Function node"""
args, defaults = args or [], defaults or []
# first argument is now a list of decorators
func = Function(name, doc)
func.args = argsnode = Arguments()
argsnode.args = []
for arg in args:
argsnode.args.append(Name())
argsnode.args[-1].name = arg
argsnode.args[-1].parent = argsnode
argsnode.defaults = []
for default in defaults:
argsnode.defaults.append(const_factory(default))
argsnode.defaults[-1].parent = argsnode
argsnode.kwarg = None
argsnode.vararg = None
argsnode.parent = func
if args:
register_arguments(func)
return func
def build_from_import(fromname, names):
"""create and initialize an astng From import statement"""
return From(fromname, [(name, None) for name in names])
def register_arguments(func, args=None):
"""add given arguments to local
args is a list that may contains nested lists
(i.e. def func(a, (b, c, d)): ...)
"""
if args is None:
args = func.args.args
if func.args.vararg:
func.set_local(func.args.vararg, func.args)
if func.args.kwarg:
func.set_local(func.args.kwarg, func.args)
for arg in args:
if isinstance(arg, Name):
func.set_local(arg.name, arg)
else:
register_arguments(func, arg.elts)
def object_build_class(node, member, localname):
"""create astng for a living class object"""
basenames = [base.__name__ for base in member.__bases__]
return _base_class_object_build(node, member, basenames,
localname=localname)
def object_build_function(node, member, localname):
"""create astng for a living function object"""
args, varargs, varkw, defaults = getargspec(member)
if varargs is not None:
args.append(varargs)
if varkw is not None:
args.append(varkw)
func = build_function(getattr(member, '__name__', None) or localname, args,
defaults, member.func_code.co_flags, member.__doc__)
node.add_local_node(func, localname)
def object_build_datadescriptor(node, member, name):
"""create astng for a living data descriptor object"""
return _base_class_object_build(node, member, [], name)
def object_build_methoddescriptor(node, member, localname):
"""create astng for a living method descriptor object"""
# FIXME get arguments ?
func = build_function(getattr(member, '__name__', None) or localname,
doc=member.__doc__)
# set node's arguments to None to notice that we have no information, not
# and empty argument list
func.args.args = None
node.add_local_node(func, localname)
def _base_class_object_build(node, member, basenames, name=None, localname=None):
"""create astng for a living class object, with a given set of base names
(e.g. ancestors)
"""
klass = build_class(name or getattr(member, '__name__', None) or localname,
basenames, member.__doc__)
klass._newstyle = isinstance(member, type)
node.add_local_node(klass, localname)
try:
# limit the instantiation trick since it's too dangerous
# (such as infinite test execution...)
# this at least resolves common case such as Exception.args,
# OSError.errno
if issubclass(member, Exception):
instdict = member().__dict__
else:
raise TypeError
except:
pass
else:
for name, obj in instdict.items():
valnode = EmptyNode()
valnode.object = obj
valnode.parent = klass
valnode.lineno = 1
klass.instance_attrs[name] = [valnode]
return klass
class InspectBuilder(object):
"""class for building nodes from living object
this is actually a really minimal representation, including only Module,
Function and Class nodes and some others as guessed.
"""
# astng from living objects ###############################################
def __init__(self):
self._done = {}
self._module = None
def inspect_build(self, module, modname=None, path=None):
"""build astng from a living module (i.e. using inspect)
this is used when there is no python source code available (either
because it's a built-in module or because the .py is not available)
"""
self._module = module
if modname is None:
modname = module.__name__
try:
node = build_module(modname, module.__doc__)
except AttributeError:
# in jython, java modules have no __doc__ (see #109562)
node = build_module(modname)
node.file = node.path = path and abspath(path) or path
MANAGER.astng_cache[modname] = node
node.package = hasattr(module, '__path__')
self._done = {}
self.object_build(node, module)
return node
def object_build(self, node, obj):
"""recursive method which create a partial ast from real objects
(only function, class, and method are handled)
"""
if obj in self._done:
return self._done[obj]
self._done[obj] = node
for name in dir(obj):
try:
member = getattr(obj, name)
except AttributeError:
# damned ExtensionClass.Base, I know you're there !
attach_dummy_node(node, name)
continue
if ismethod(member):
member = member.im_func
if isfunction(member):
# verify this is not an imported function
filename = getattr(member.func_code, 'co_filename', None)
if filename is None:
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif filename != getattr(self._module, '__file__', None):
attach_dummy_node(node, name, member)
else:
object_build_function(node, member, name)
elif isbuiltin(member):
if self.imported_member(node, member, name):
#if obj is object:
# print 'skippp', obj, name, member
continue
object_build_methoddescriptor(node, member, name)
elif isclass(member):
if self.imported_member(node, member, name):
continue
if member in self._done:
class_node = self._done[member]
if not class_node in node.locals.get(name, ()):
node.add_local_node(class_node, name)
else:
class_node = object_build_class(node, member, name)
# recursion
self.object_build(class_node, member)
if name == '__class__' and class_node.parent is None:
class_node.parent = self._done[self._module]
elif ismethoddescriptor(member):
assert isinstance(member, object)
object_build_methoddescriptor(node, member, name)
elif isdatadescriptor(member):
assert isinstance(member, object)
object_build_datadescriptor(node, member, name)
elif type(member) in _CONSTANTS:
attach_const_node(node, name, member)
else:
# create an empty node so that the name is actually defined
attach_dummy_node(node, name, member)
def imported_member(self, node, member, name):
"""verify this is not an imported class or handle it"""
# /!\ some classes like ExtensionClass doesn't have a __module__
# attribute ! Also, this may trigger an exception on badly built module
# (see http://www.logilab.org/ticket/57299 for instance)
try:
modname = getattr(member, '__module__', None)
except:
# XXX use logging
print 'unexpected error while building astng from living object'
import traceback
traceback.print_exc()
modname = None
if modname is None:
if name in ('__new__', '__subclasshook__'):
# Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14)
# >>> print object.__new__.__module__
# None
modname = BUILTINS
else:
attach_dummy_node(node, name, member)
return True
if {'gtk': 'gtk._gtk'}.get(modname, modname) != self._module.__name__:
# check if it sounds valid and then add an import node, else use a
# dummy node
try:
getattr(sys.modules[modname], name)
except (KeyError, AttributeError):
attach_dummy_node(node, name, member)
else:
attach_import_node(node, modname, name)
return True
return False
### astng boot strapping ################################################### ###
ASTNG_BUILDER = InspectBuilder()
_CONST_PROXY = {}
def astng_boot_strapping():
"""astng boot strapping the builtins module"""
# this boot strapping is necessary since we need the Const nodes to
# inspect_build builtins, and then we can proxy Const
from logilab.common.compat import builtins
astng_builtin = ASTNG_BUILDER.inspect_build(builtins)
for cls, node_cls in CONST_CLS.items():
if cls is type(None):
proxy = build_class('NoneType')
proxy.parent = astng_builtin
else:
proxy = astng_builtin.getattr(cls.__name__)[0]
if cls in (dict, list, set, tuple):
node_cls._proxied = proxy
else:
_CONST_PROXY[cls] = proxy
astng_boot_strapping()
# TODO : find a nicer way to handle this situation;
# However __proxied introduced an
# infinite recursion (see https://bugs.launchpad.net/pylint/+bug/456870)
def _set_proxied(const):
return _CONST_PROXY[const.value.__class__]
Const._proxied = property(_set_proxied)
from types import GeneratorType
Generator._proxied = Class(GeneratorType.__name__, GeneratorType.__doc__)
ASTNG_BUILDER.object_build(Generator._proxied, GeneratorType)
| mpl-2.0 |
GbalsaC/bitnamiP | pyfs/fs/expose/dokan/__init__.py | 7 | 37823 | """
fs.expose.dokan
===============
Expose an FS object to the native filesystem via Dokan.
This module provides the necessary interfaces to mount an FS object into
the local filesystem using Dokan on win32::
http://dokan-dev.net/en/
For simple usage, the function 'mount' takes an FS object and a drive letter,
and exposes the given FS as that drive::
>>> from fs.memoryfs import MemoryFS
>>> from fs.expose import dokan
>>> fs = MemoryFS()
>>> mp = dokan.mount(fs,"Q")
>>> mp.drive
'Q'
>>> mp.path
'Q:\\'
>>> mp.unmount()
The above spawns a new background process to manage the Dokan event loop, which
can be controlled through the returned subprocess.Popen object. To avoid
spawning a new process, set the 'foreground' option::
>>> # This will block until the filesystem is unmounted
>>> dokan.mount(fs,"Q",foreground=True)
Any additional options for the Dokan process can be passed as keyword arguments
to the 'mount' function.
If you require finer control over the creation of the Dokan process, you can
instantiate the MountProcess class directly. It accepts all options available
to subprocess.Popen::
>>> from subprocess import PIPE
>>> mp = dokan.MountProcess(fs,"Q",stderr=PIPE)
>>> dokan_errors = mp.communicate()[1]
If you are exposing an untrusted filesystem, you may like to apply the
wrapper class Win32SafetyFS before passing it into dokan. This will take
a number of steps to avoid suspicious operations on windows, such as
hiding autorun files.
The binding to Dokan is created via ctypes. Due to the very stable ABI of
win32, this should work without further configuration on just about all
systems with Dokan installed.
"""
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the MIT License.
from __future__ import with_statement
import sys
import os
import signal
import errno
import time
import stat as statinfo
import subprocess
import cPickle
import datetime
import ctypes
from collections import deque
from fs.base import threading
from fs.errors import *
from fs.path import *
from fs.local_functools import wraps
from fs.wrapfs import WrapFS
try:
import libdokan
except (NotImplementedError,EnvironmentError,ImportError,NameError,):
is_available = False
sys.modules.pop("fs.expose.dokan.libdokan",None)
libdokan = None
else:
is_available = True
from ctypes.wintypes import LPCWSTR, WCHAR
kernel32 = ctypes.windll.kernel32
import logging
logger = logging.getLogger("fs.expose.dokan")
# Options controlling the behavior of the Dokan filesystem
DOKAN_OPTION_DEBUG = 1
DOKAN_OPTION_STDERR = 2
DOKAN_OPTION_ALT_STREAM = 4
DOKAN_OPTION_KEEP_ALIVE = 8
DOKAN_OPTION_NETWORK = 16
DOKAN_OPTION_REMOVABLE = 32
# Error codes returned by DokanMain
DOKAN_SUCCESS = 0
DOKAN_ERROR = -1
DOKAN_DRIVE_LETTER_ERROR = -2
DOKAN_DRIVER_INSTALL_ERROR = -3
DOKAN_START_ERROR = -4
DOKAN_MOUNT_ERROR = -5
# Misc windows constants
FILE_LIST_DIRECTORY = 0x01
FILE_SHARE_READ = 0x01
FILE_SHARE_WRITE = 0x02
FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
FILE_FLAG_OVERLAPPED = 0x40000000
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_TEMPORARY = 4
CREATE_NEW = 1
CREATE_ALWAYS = 2
OPEN_EXISTING = 3
OPEN_ALWAYS = 4
TRUNCATE_EXISTING = 5
FILE_GENERIC_READ = 1179785
FILE_GENERIC_WRITE = 1179926
REQ_GENERIC_READ = 0x80 | 0x08 | 0x01
REQ_GENERIC_WRITE = 0x004 | 0x0100 | 0x002 | 0x0010
ERROR_ACCESS_DENIED = 5
ERROR_LOCK_VIOLATION = 33
ERROR_NOT_SUPPORTED = 50
ERROR_FILE_EXISTS = 80
ERROR_DIR_NOT_EMPTY = 145
ERROR_NOT_LOCKED = 158
ERROR_LOCK_FAILED = 167
ERROR_ALREADY_EXISTS = 183
ERROR_LOCKED = 212
ERROR_INVALID_LOCK_RANGE = 306
# Some useful per-process global information
NATIVE_ENCODING = sys.getfilesystemencoding()
DATETIME_ZERO = datetime.datetime(1,1,1,0,0,0)
DATETIME_STARTUP = datetime.datetime.utcnow()
FILETIME_UNIX_EPOCH = 116444736000000000
def handle_fs_errors(func):
"""Method decorator to report FS errors in the appropriate way.
This decorator catches all FS errors and translates them into an
equivalent OSError, then returns the negated error number. It also
makes the function return zero instead of None as an indication of
successful execution.
"""
name = func.__name__
func = convert_fs_errors(func)
@wraps(func)
def wrapper(*args,**kwds):
try:
res = func(*args,**kwds)
except OSError, e:
if e.errno:
res = -1 * _errno2syserrcode(e.errno)
else:
res = -1
except Exception, e:
raise
else:
if res is None:
res = 0
return res
return wrapper
# During long-running operations, Dokan requires that the DokanResetTimeout
# function be called periodically to indicate the progress is still being
# made. Unfortunately we don't have any facility for the underlying FS
# to make these calls for us, so we have to hack around it.
#
# The idea is to use a single background thread to monitor all active Dokan
# method calls, resetting the timeout until they have completed. Note that
# this completely undermines the point of DokanResetTimeout as it's now
# possible for a deadlock to hang the entire filesystem.
_TIMEOUT_PROTECT_THREAD = None
_TIMEOUT_PROTECT_LOCK = threading.Lock()
_TIMEOUT_PROTECT_COND = threading.Condition(_TIMEOUT_PROTECT_LOCK)
_TIMEOUT_PROTECT_QUEUE = deque()
_TIMEOUT_PROTECT_WAIT_TIME = 4 * 60
_TIMEOUT_PROTECT_RESET_TIME = 5 * 60 * 1000
def _start_timeout_protect_thread():
"""Start the background thread used to protect dokan from timeouts.
This function starts the background thread that monitors calls into the
dokan API and resets their timeouts. It's safe to call this more than
once, only a single thread will be started.
"""
global _TIMEOUT_PROTECT_THREAD
with _TIMEOUT_PROTECT_LOCK:
if _TIMEOUT_PROTECT_THREAD is None:
target = _run_timeout_protect_thread
_TIMEOUT_PROTECT_THREAD = threading.Thread(target=target)
_TIMEOUT_PROTECT_THREAD.daemon = True
_TIMEOUT_PROTECT_THREAD.start()
def _run_timeout_protect_thread():
while True:
with _TIMEOUT_PROTECT_COND:
try:
(when,info,finished) = _TIMEOUT_PROTECT_QUEUE.popleft()
except IndexError:
_TIMEOUT_PROTECT_COND.wait()
continue
if finished:
continue
now = time.time()
wait_time = max(0,_TIMEOUT_PROTECT_WAIT_TIME - now + when)
time.sleep(wait_time)
with _TIMEOUT_PROTECT_LOCK:
if finished:
continue
libdokan.DokanResetTimeout(_TIMEOUT_PROTECT_RESET_TIME,info)
_TIMEOUT_PROTECT_QUEUE.append((now+wait_time,info,finished))
def timeout_protect(func):
"""Method decorator to enable timeout protection during call.
This decorator adds an entry to the timeout protect queue before executing
the function, and marks it as finished when the function exits.
"""
@wraps(func)
def wrapper(self,*args):
if _TIMEOUT_PROTECT_THREAD is None:
_start_timeout_protect_thread()
info = args[-1]
finished = []
try:
with _TIMEOUT_PROTECT_COND:
_TIMEOUT_PROTECT_QUEUE.append((time.time(),info,finished))
_TIMEOUT_PROTECT_COND.notify()
return func(self,*args)
finally:
with _TIMEOUT_PROTECT_LOCK:
finished.append(True)
return wrapper
MIN_FH = 100
class FSOperations(object):
"""Object delegating all DOKAN_OPERATIONS pointers to an FS object."""
def __init__(self, fs, fsname="Dokan FS", volname="Dokan Volume"):
if libdokan is None:
msg = "dokan library (http://dokan-dev.net/en/) is not available"
raise OSError(msg)
self.fs = fs
self.fsname = fsname
self.volname = volname
self._files_by_handle = {}
self._files_lock = threading.Lock()
self._next_handle = MIN_FH
# Windows requires us to implement a kind of "lazy deletion", where
# a handle is marked for deletion but this is not actually done
# until the handle is closed. This set monitors pending deletes.
self._pending_delete = set()
# Since pyfilesystem has no locking API, we manage file locks
# in memory. This maps paths to a list of current locks.
self._active_locks = PathMap()
# Dokan expects a succesful write() to be reflected in the file's
# reported size, but the FS might buffer writes and prevent this.
# We explicitly keep track of the size Dokan expects a file to be.
# This dict is indexed by path, then file handle.
self._files_size_written = PathMap()
def get_ops_struct(self):
"""Get a DOKAN_OPERATIONS struct mapping to our methods."""
struct = libdokan.DOKAN_OPERATIONS()
for (nm,typ) in libdokan.DOKAN_OPERATIONS._fields_:
setattr(struct,nm,typ(getattr(self,nm)))
return struct
def _get_file(self, fh):
"""Get the information associated with the given file handle."""
try:
return self._files_by_handle[fh]
except KeyError:
raise FSError("invalid file handle")
def _reg_file(self, f, path):
"""Register a new file handle for the given file and path."""
self._files_lock.acquire()
try:
fh = self._next_handle
self._next_handle += 1
lock = threading.Lock()
self._files_by_handle[fh] = (f,path,lock)
if path not in self._files_size_written:
self._files_size_written[path] = {}
self._files_size_written[path][fh] = 0
return fh
finally:
self._files_lock.release()
def _rereg_file(self, fh, f):
"""Re-register the file handle for the given file.
This might be necessary if we are required to write to a file
after its handle was closed (e.g. to complete an async write).
"""
self._files_lock.acquire()
try:
(f2,path,lock) = self._files_by_handle[fh]
assert f2.closed
self._files_by_handle[fh] = (f,path,lock)
return fh
finally:
self._files_lock.release()
def _del_file(self, fh):
"""Unregister the given file handle."""
self._files_lock.acquire()
try:
(f,path,lock) = self._files_by_handle.pop(fh)
del self._files_size_written[path][fh]
if not self._files_size_written[path]:
del self._files_size_written[path]
finally:
self._files_lock.release()
def _is_pending_delete(self, path):
"""Check if the given path is pending deletion.
This is true if the path or any of its parents have been marked
as pending deletion, false otherwise.
"""
for ppath in recursepath(path):
if ppath in self._pending_delete:
return True
return False
def _check_lock(self, path, offset, length, info, locks=None):
"""Check whether the given file range is locked.
This method implements basic lock checking. It checks all the locks
held against the given file, and if any overlap the given byte range
then it returns -ERROR_LOCKED. If the range is not locked, it will
return zero.
"""
if locks is None:
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return 0
for (lh,lstart,lend) in locks:
if info is not None and info.contents.Context == lh:
continue
if lstart >= offset + length:
continue
if lend <= offset:
continue
return -ERROR_LOCKED
return 0
@timeout_protect
@handle_fs_errors
def CreateFile(self, path, access, sharing, disposition, flags, info):
path = normpath(path)
# Can't open files that are pending delete.
if self._is_pending_delete(path):
return -ERROR_ACCESS_DENIED
# If no access rights are requestsed, only basic metadata is queried.
if not access:
if self.fs.isdir(path):
info.contents.IsDirectory = True
elif not self.fs.exists(path):
raise ResourceNotFoundError(path)
return
# This is where we'd convert the access mask into an appropriate
# mode string. Unfortunately, I can't seem to work out all the
# details. I swear MS Word is trying to write to files that it
# opens without asking for write permission.
# For now, just set the mode based on disposition flag.
retcode = 0
if disposition == CREATE_ALWAYS:
if self.fs.exists(path):
retcode = ERROR_ALREADY_EXISTS
mode = "w+b"
elif disposition == OPEN_ALWAYS:
if not self.fs.exists(path):
mode = "w+b"
else:
retcode = ERROR_ALREADY_EXISTS
mode = "r+b"
elif disposition == OPEN_EXISTING:
mode = "r+b"
elif disposition == TRUNCATE_EXISTING:
if not self.fs.exists(path):
raise ResourceNotFoundError(path)
mode = "w+b"
elif disposition == CREATE_NEW:
if self.fs.exists(path):
return -ERROR_ALREADY_EXISTS
mode = "w+b"
else:
mode = "r+b"
# Try to open the requested file. It may actually be a directory.
info.contents.Context = 1
try:
f = self.fs.open(path,mode)
except ResourceInvalidError:
info.contents.IsDirectory = True
except FSError:
# Sadly, win32 OSFS will raise all kinds of strange errors
# if you try to open() a directory. Need to check by hand.
if self.fs.isdir(path):
info.contents.IsDirectory = True
else:
raise
else:
info.contents.Context = self._reg_file(f,path)
return retcode
@timeout_protect
@handle_fs_errors
def OpenDirectory(self, path, info):
path = normpath(path)
if self._is_pending_delete(path):
raise ResourceNotFoundError(path)
if not self.fs.isdir(path):
if not self.fs.exists(path):
raise ResourceNotFoundError(path)
else:
raise ResourceInvalidError(path)
info.contents.IsDirectory = True
@timeout_protect
@handle_fs_errors
def CreateDirectory(self, path, info):
path = normpath(path)
if self._is_pending_delete(path):
return -ERROR_ACCESS_DENIED
self.fs.makedir(path)
info.contents.IsDirectory = True
@timeout_protect
@handle_fs_errors
def Cleanup(self, path, info):
path = normpath(path)
if info.contents.IsDirectory:
if info.contents.DeleteOnClose:
self.fs.removedir(path)
self._pending_delete.remove(path)
else:
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
if info.contents.DeleteOnClose:
self.fs.remove(path)
self._pending_delete.remove(path)
self._del_file(info.contents.Context)
info.contents.Context = 0
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def CloseFile(self, path, info):
if info.contents.Context >= MIN_FH:
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
info.contents.Context = 0
@timeout_protect
@handle_fs_errors
def ReadFile(self, path, buffer, nBytesToRead, nBytesRead, offset, info):
path = normpath(path)
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
errno = self._check_lock(path,offset,nBytesToRead,info)
if errno:
return errno
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path,file.mode)
self._rereg_file(info.contents.Context,file)
file.seek(offset)
data = file.read(nBytesToRead)
ctypes.memmove(buffer,ctypes.create_string_buffer(data),len(data))
nBytesRead[0] = len(data)
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def WriteFile(self, path, buffer, nBytesToWrite, nBytesWritten, offset, info):
path = normpath(path)
fh = info.contents.Context
(file,_,lock) = self._get_file(fh)
lock.acquire()
try:
errno = self._check_lock(path,offset,nBytesToWrite,info)
if errno:
return errno
# This may be called after Cleanup, meaning we
# need to re-open the file.
if file.closed:
file = self.fs.open(path,file.mode)
self._rereg_file(info.contents.Context,file)
if info.contents.WriteToEndOfFile:
file.seek(0,os.SEEK_END)
else:
file.seek(offset)
data = ctypes.create_string_buffer(nBytesToWrite)
ctypes.memmove(data,buffer,nBytesToWrite)
file.write(data.raw)
nBytesWritten[0] = len(data.raw)
try:
size_written = self._files_size_written[path][fh]
except KeyError:
pass
else:
if offset + nBytesWritten[0] > size_written:
new_size_written = offset + nBytesWritten[0]
self._files_size_written[path][fh] = new_size_written
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def FlushFileBuffers(self, path, info):
path = normpath(path)
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.flush()
finally:
lock.release()
@timeout_protect
@handle_fs_errors
def GetFileInformation(self, path, buffer, info):
path = normpath(path)
finfo = self.fs.getinfo(path)
data = buffer.contents
self._info2finddataw(path,finfo,data,info)
try:
written_size = max(self._files_size_written[path].values())
except KeyError:
pass
else:
reported_size = (data.nFileSizeHigh << 32) + data.nFileSizeLow
if written_size > reported_size:
data.nFileSizeHigh = written_size >> 32
data.nFileSizeLow = written_size & 0xffffffff
data.nNumberOfLinks = 1
@timeout_protect
@handle_fs_errors
def FindFiles(self, path, fillFindData, info):
path = normpath(path)
for (nm,finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path,nm)
if self._is_pending_delete(fpath):
continue
data = self._info2finddataw(fpath,finfo)
fillFindData(ctypes.byref(data),info)
@timeout_protect
@handle_fs_errors
def FindFilesWithPattern(self, path, pattern, fillFindData, info):
path = normpath(path)
infolist = []
for (nm,finfo) in self.fs.listdirinfo(path):
fpath = pathjoin(path,nm)
if self._is_pending_delete(fpath):
continue
if not libdokan.DokanIsNameInExpression(pattern,nm,True):
continue
data = self._info2finddataw(fpath,finfo,None)
fillFindData(ctypes.byref(data),info)
@timeout_protect
@handle_fs_errors
def SetFileAttributes(self, path, attrs, info):
path = normpath(path)
# TODO: decode various file attributes
@timeout_protect
@handle_fs_errors
def SetFileTime(self, path, ctime, atime, mtime, info):
path = normpath(path)
# setting ctime is not supported
if atime is not None:
try:
atime = _filetime2datetime(atime.contents)
except ValueError:
atime = None
if mtime is not None:
try:
mtime = _filetime2datetime(mtime.contents)
except ValueError:
mtime = None
# some programs demand this succeed; fake it
try:
self.fs.settimes(path, atime, mtime)
except UnsupportedError:
pass
@timeout_protect
@handle_fs_errors
def DeleteFile(self, path, info):
path = normpath(path)
if not self.fs.isfile(path):
if not self.fs.exists(path):
raise ResourceNotFoundError(path)
else:
raise ResourceInvalidError(path)
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
@timeout_protect
@handle_fs_errors
def DeleteDirectory(self, path, info):
path = normpath(path)
for nm in self.fs.listdir(path):
if not self._is_pending_delete(pathjoin(path,nm)):
raise DirectoryNotEmptyError(path)
self._pending_delete.add(path)
# the actual delete takes place in self.CloseFile()
@timeout_protect
@handle_fs_errors
def MoveFile(self, src, dst, overwrite, info):
# Close the file if we have an open handle to it.
if info.contents.Context >= MIN_FH:
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
file.close()
self._del_file(info.contents.Context)
finally:
lock.release()
src = normpath(src)
dst = normpath(dst)
if info.contents.IsDirectory:
self.fs.movedir(src,dst,overwrite=overwrite)
else:
self.fs.move(src,dst,overwrite=overwrite)
@timeout_protect
@handle_fs_errors
def SetEndOfFile(self, path, length, info):
path = normpath(path)
(file,_,lock) = self._get_file(info.contents.Context)
lock.acquire()
try:
pos = file.tell()
if length != pos:
file.seek(length)
file.truncate()
if pos < length:
file.seek(min(pos,length))
finally:
lock.release()
@handle_fs_errors
def GetDiskFreeSpaceEx(self, nBytesAvail, nBytesTotal, nBytesFree, info):
# This returns a stupidly large number if not info is available.
# It's better to pretend an operation is possible and have it fail
# than to pretend an operation will fail when it's actually possible.
large_amount = 100 * 1024*1024*1024
nBytesFree[0] = self.fs.getmeta("free_space",large_amount)
nBytesTotal[0] = self.fs.getmeta("total_space",2*large_amount)
nBytesAvail[0] = nBytesFree[0]
@handle_fs_errors
def GetVolumeInformation(self, vnmBuf, vnmSz, sNum, maxLen, flags, fnmBuf, fnmSz, info):
nm = ctypes.create_unicode_buffer(self.volname[:vnmSz-1])
sz = (len(nm.value)+1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(vnmBuf,nm,sz)
if sNum:
sNum[0] = 0
if maxLen:
maxLen[0] = 255
if flags:
flags[0] = 0
nm = ctypes.create_unicode_buffer(self.fsname[:fnmSz-1])
sz = (len(nm.value)+1) * ctypes.sizeof(ctypes.c_wchar)
ctypes.memmove(fnmBuf,nm,sz)
@timeout_protect
@handle_fs_errors
def SetAllocationSize(self, path, length, info):
# I think this is supposed to reserve space for the file
# but *not* actually move the end-of-file marker.
# No way to do that in pyfs.
return 0
@timeout_protect
@handle_fs_errors
def LockFile(self, path, offset, length, info):
end = offset + length
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
locks = self._active_locks[path] = []
else:
errno = self._check_lock(path,offset,length,None,locks)
if errno:
return errno
locks.append((info.contents.Context,offset,end))
return 0
@timeout_protect
@handle_fs_errors
def UnlockFile(self, path, offset, length, info):
end = offset + length
with self._files_lock:
try:
locks = self._active_locks[path]
except KeyError:
return -ERROR_NOT_LOCKED
todel = []
for i,(lh,lstart,lend) in enumerate(locks):
if info.contents.Context == lh:
if lstart == offset:
if lend == offset + length:
todel.append(i)
if not todel:
return -ERROR_NOT_LOCKED
for i in reversed(todel):
del locks[i]
return 0
@handle_fs_errors
def Unmount(self, info):
pass
def _info2attrmask(self,path,info,hinfo=None):
"""Convert a file/directory info dict to a win32 file attribute mask."""
attrs = 0
st_mode = info.get("st_mode",None)
if st_mode:
if statinfo.S_ISDIR(st_mode):
attrs |= FILE_ATTRIBUTE_DIRECTORY
elif statinfo.S_ISREG(st_mode):
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs and hinfo:
if hinfo.contents.IsDirectory:
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
if not attrs:
if self.fs.isdir(path):
attrs |= FILE_ATTRIBUTE_DIRECTORY
else:
attrs |= FILE_ATTRIBUTE_NORMAL
return attrs
def _info2finddataw(self,path,info,data=None,hinfo=None):
"""Convert a file/directory info dict into a WIN32_FIND_DATAW struct."""
if data is None:
data = libdokan.WIN32_FIND_DATAW()
data.dwFileAttributes = self._info2attrmask(path,info,hinfo)
data.ftCreationTime = _datetime2filetime(info.get("created_time",None))
data.ftLastAccessTime = _datetime2filetime(info.get("accessed_time",None))
data.ftLastWriteTime = _datetime2filetime(info.get("modified_time",None))
data.nFileSizeHigh = info.get("size",0) >> 32
data.nFileSizeLow = info.get("size",0) & 0xffffffff
data.cFileName = basename(path)
data.cAlternateFileName = ""
return data
def _datetime2timestamp(dtime):
"""Convert a datetime object to a unix timestamp."""
t = time.mktime(dtime.timetuple())
t += dtime.microsecond / 1000000.0
return t
DATETIME_LOCAL_TO_UTC = _datetime2timestamp(datetime.datetime.utcnow()) - _datetime2timestamp(datetime.datetime.now())
def _timestamp2datetime(tstamp):
"""Convert a unix timestamp to a datetime object."""
return datetime.datetime.fromtimestamp(tstamp)
def _timestamp2filetime(tstamp):
f = FILETIME_UNIX_EPOCH + int(tstamp * 10000000)
return libdokan.FILETIME(f & 0xffffffff,f >> 32)
def _filetime2timestamp(ftime):
f = ftime.dwLowDateTime | (ftime.dwHighDateTime << 32)
return (f - FILETIME_UNIX_EPOCH) / 10000000.0
def _filetime2datetime(ftime):
"""Convert a FILETIME struct info datetime.datetime object."""
if ftime is None:
return DATETIME_ZERO
if ftime.dwLowDateTime == 0 and ftime.dwHighDateTime == 0:
return DATETIME_ZERO
return _timestamp2datetime(_filetime2timestamp(ftime))
def _datetime2filetime(dtime):
"""Convert a FILETIME struct info datetime.datetime object."""
if dtime is None:
return libdokan.FILETIME(0,0)
if dtime == DATETIME_ZERO:
return libdokan.FILETIME(0,0)
return _timestamp2filetime(_datetime2timestamp(dtime))
def _errno2syserrcode(eno):
"""Convert an errno into a win32 system error code."""
if eno == errno.EEXIST:
return ERROR_FILE_EXISTS
if eno == errno.ENOTEMPTY:
return ERROR_DIR_NOT_EMPTY
if eno == errno.ENOSYS:
return ERROR_NOT_SUPPORTED
if eno == errno.EACCES:
return ERROR_ACCESS_DENIED
return eno
def _normalise_drive_string(drive):
"""Normalise drive string to a single letter."""
if not drive:
raise ValueError("invalid drive letter: %r" % (drive,))
if len(drive) > 3:
raise ValueError("invalid drive letter: %r" % (drive,))
if not drive[0].isalpha():
raise ValueError("invalid drive letter: %r" % (drive,))
if not ":\\".startswith(drive[1:]):
raise ValueError("invalid drive letter: %r" % (drive,))
return drive[0].upper()
def mount(fs, drive, foreground=False, ready_callback=None, unmount_callback=None, **kwds):
"""Mount the given FS at the given drive letter, using Dokan.
By default, this function spawns a new background process to manage the
Dokan event loop. The return value in this case is an instance of the
'MountProcess' class, a subprocess.Popen subclass.
If the keyword argument 'foreground' is given, we instead run the Dokan
main loop in the current process. In this case the function will block
until the filesystem is unmounted, then return None.
If the keyword argument 'ready_callback' is provided, it will be called
when the filesystem has been mounted and is ready for use. Any additional
keyword arguments control the behavior of the final dokan mount point.
Some interesting options include:
* numthreads: number of threads to use for handling Dokan requests
* fsname: name to display in explorer etc
* flags: DOKAN_OPTIONS bitmask
* FSOperationsClass: custom FSOperations subclass to use
"""
if libdokan is None:
raise OSError("the dokan library is not available")
drive = _normalise_drive_string(drive)
# This function captures the logic of checking whether the Dokan mount
# is up and running. Unfortunately I can't find a way to get this
# via a callback in the Dokan API. Instead we just check for the drive
# in a loop, polling the mount proc to make sure it hasn't died.
def check_alive(mp):
if mp and mp.poll() != None:
raise OSError("dokan mount process exited prematurely")
def check_ready(mp=None):
if ready_callback is not False:
check_alive(mp)
for _ in xrange(100):
try:
os.stat(drive+":\\")
except EnvironmentError, e:
check_alive(mp)
time.sleep(0.05)
else:
check_alive(mp)
if ready_callback:
return ready_callback()
else:
return None
else:
check_alive(mp)
raise OSError("dokan mount process seems to be hung")
# Running the the foreground is the final endpoint for the mount
# operation, it's where we call DokanMain().
if foreground:
numthreads = kwds.pop("numthreads",0)
flags = kwds.pop("flags",0)
FSOperationsClass = kwds.pop("FSOperationsClass",FSOperations)
opts = libdokan.DOKAN_OPTIONS(drive[:1], numthreads, flags)
ops = FSOperationsClass(fs, **kwds)
if ready_callback:
check_thread = threading.Thread(target=check_ready)
check_thread.daemon = True
check_thread.start()
opstruct = ops.get_ops_struct()
res = libdokan.DokanMain(ctypes.byref(opts),ctypes.byref(opstruct))
if res != DOKAN_SUCCESS:
raise OSError("Dokan failed with error: %d" % (res,))
if unmount_callback:
unmount_callback()
# Running the background, spawn a subprocess and wait for it
# to be ready before returning.
else:
mp = MountProcess(fs, drive, kwds)
check_ready(mp)
if unmount_callback:
orig_unmount = mp.unmount
def new_unmount():
orig_unmount()
unmount_callback()
mp.unmount = new_unmount
return mp
def unmount(drive):
"""Unmount the given drive.
This function unmounts the dokan drive mounted at the given drive letter.
It works but may leave dangling processes; its better to use the "unmount"
method on the MountProcess class if you have one.
"""
if not libdokan.DokanUnmount(drive):
raise OSError("filesystem could not be unmounted: %s" % (drive,))
class MountProcess(subprocess.Popen):
"""subprocess.Popen subclass managing a Dokan mount.
This is a subclass of subprocess.Popen, designed for easy management of
a Dokan mount in a background process. Rather than specifying the command
to execute, pass in the FS object to be mounted, the target drive letter
and a dictionary of options for the Dokan process.
In order to be passed successfully to the new process, the FS object
must be pickleable. Since win32 has no fork() this restriction is not
likely to be lifted (see also the "multiprocessing" module)
This class has an extra attribute 'drive' giving the drive of the mounted
filesystem, and an extra method 'unmount' that will cleanly unmount it
and terminate the process.
"""
# This works by spawning a new python interpreter and passing it the
# pickled (fs,path,opts) tuple on the command-line. Something like this:
#
# python -c "import MountProcess; MountProcess._do_mount('..data..')
#
unmount_timeout = 5
def __init__(self, fs, drive, dokan_opts={}, nowait=False, **kwds):
if libdokan is None:
raise OSError("the dokan library is not available")
self.drive = _normalise_drive_string(drive)
self.path = self.drive + ":\\"
cmd = "import cPickle; "
cmd = cmd + "data = cPickle.loads(%s); "
cmd = cmd + "from fs.expose.dokan import MountProcess; "
cmd = cmd + "MountProcess._do_mount(data)"
cmd = cmd % (repr(cPickle.dumps((fs,drive,dokan_opts,nowait),-1)),)
cmd = [sys.executable,"-c",cmd]
super(MountProcess,self).__init__(cmd,**kwds)
def unmount(self):
"""Cleanly unmount the Dokan filesystem, terminating this subprocess."""
if not libdokan.DokanUnmount(self.drive):
raise OSError("the filesystem could not be unmounted: %s" %(self.drive,))
self.terminate()
if not hasattr(subprocess.Popen, "terminate"):
def terminate(self):
"""Gracefully terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle),-1)
if not hasattr(subprocess.Popen, "kill"):
def kill(self):
"""Forcibly terminate the subprocess."""
kernel32.TerminateProcess(int(self._handle),-1)
@staticmethod
def _do_mount(data):
"""Perform the specified mount."""
(fs,drive,opts,nowait) = data
opts["foreground"] = True
def unmount_callback():
fs.close()
opts["unmount_callback"] = unmount_callback
if nowait:
opts["ready_callback"] = False
mount(fs,drive,**opts)
class Win32SafetyFS(WrapFS):
"""FS wrapper for extra safety when mounting on win32.
This wrapper class provides some safety features when mounting untrusted
filesystems on win32. Specifically:
* hiding autorun files
* removing colons from paths
"""
def __init__(self,wrapped_fs,allow_autorun=False):
self.allow_autorun = allow_autorun
super(Win32SafetyFS,self).__init__(wrapped_fs)
def _encode(self,path):
path = relpath(normpath(path))
path = path.replace(":","__colon__")
if not self.allow_autorun:
if path.lower().startswith("_autorun."):
path = path[1:]
return path
def _decode(self,path):
path = relpath(normpath(path))
path = path.replace("__colon__",":")
if not self.allow_autorun:
if path.lower().startswith("autorun."):
path = "_" + path
return path
if __name__ == "__main__":
import os, os.path
import tempfile
from fs.osfs import OSFS
from fs.memoryfs import MemoryFS
path = tempfile.mkdtemp()
try:
fs = OSFS(path)
#fs = MemoryFS()
fs.setcontents("test1.txt","test one")
flags = DOKAN_OPTION_DEBUG|DOKAN_OPTION_STDERR|DOKAN_OPTION_REMOVABLE
mount(fs, "Q", foreground=True, numthreads=1, flags=flags)
fs.close()
finally:
OSFS(path).removedir("/",force=True)
| agpl-3.0 |
titom1986/CouchPotatoServer | libs/pyutil/iputil.py | 92 | 9752 | # from the Python Standard Library
import os, re, socket, sys, subprocess
# from Twisted
from twisted.internet import defer, threads, reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.python.procutils import which
from twisted.python import log
try:
import resource
def increase_rlimits():
# We'd like to raise our soft resource.RLIMIT_NOFILE, since certain
# systems (OS-X, probably solaris) start with a relatively low limit
# (256), and some unit tests want to open up more sockets than this.
# Most linux systems start with both hard and soft limits at 1024,
# which is plenty.
# unfortunately the values to pass to setrlimit() vary widely from
# one system to another. OS-X reports (256, HUGE), but the real hard
# limit is 10240, and accepts (-1,-1) to mean raise it to the
# maximum. Cygwin reports (256, -1), then ignores a request of
# (-1,-1): instead you have to guess at the hard limit (it appears to
# be 3200), so using (3200,-1) seems to work. Linux reports a
# sensible (1024,1024), then rejects (-1,-1) as trying to raise the
# maximum limit, so you could set it to (1024,1024) but you might as
# well leave it alone.
try:
current = resource.getrlimit(resource.RLIMIT_NOFILE)
except AttributeError:
# we're probably missing RLIMIT_NOFILE
return
if current[0] >= 1024:
# good enough, leave it alone
return
try:
if current[1] > 0 and current[1] < 1000000:
# solaris reports (256, 65536)
resource.setrlimit(resource.RLIMIT_NOFILE,
(current[1], current[1]))
else:
# this one works on OS-X (bsd), and gives us 10240, but
# it doesn't work on linux (on which both the hard and
# soft limits are set to 1024 by default).
resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1))
new = resource.getrlimit(resource.RLIMIT_NOFILE)
if new[0] == current[0]:
# probably cygwin, which ignores -1. Use a real value.
resource.setrlimit(resource.RLIMIT_NOFILE, (3200,-1))
except ValueError:
log.msg("unable to set RLIMIT_NOFILE: current value %s"
% (resource.getrlimit(resource.RLIMIT_NOFILE),))
except:
# who knows what. It isn't very important, so log it and continue
log.err()
except ImportError:
def _increase_rlimits():
# TODO: implement this for Windows. Although I suspect the
# solution might be "be running under the iocp reactor and
# make this function be a no-op".
pass
# pyflakes complains about two 'def FOO' statements in the same time,
# since one might be shadowing the other. This hack appeases pyflakes.
increase_rlimits = _increase_rlimits
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
"""
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
strings) that are currently configured on this host, sorted in descending
order of how likely we think they are to work.
@param target: we want to learn an IP address they could try using to
connect to us; The default value is fine, but it might help if you
pass the address of a host that you are actually trying to be
reachable to.
"""
addresses = []
local_ip = get_local_ip_for(target)
if local_ip:
addresses.append(local_ip)
if sys.platform == "cygwin":
d = _cygwin_hack_find_addresses(target)
else:
d = _find_addresses_via_config()
def _collect(res):
for addr in res:
if addr != "0.0.0.0" and not addr in addresses:
addresses.append(addr)
return addresses
d.addCallback(_collect)
return d
def get_local_ip_for(target):
"""Find out what our IP address is for use by a given target.
@return: the IP address as a dotted-quad string which could be used by
to connect to us. It might work for them, it might not. If
there is no suitable address (perhaps we don't currently have an
externally-visible interface), this will return None.
"""
try:
target_ipaddr = socket.gethostbyname(target)
except socket.gaierror:
# DNS isn't running, or somehow we encountered an error
# note: if an interface is configured and up, but nothing is
# connected to it, gethostbyname("A.ROOT-SERVERS.NET") will take 20
# seconds to raise socket.gaierror . This is synchronous and occurs
# for each node being started, so users of
# test.common.SystemTestMixin (like test_system) will see something
# like 120s of delay, which may be enough to hit the default trial
# timeouts. For that reason, get_local_addresses_async() was changed
# to default to the numerical ip address for A.ROOT-SERVERS.NET, to
# avoid this DNS lookup. This also makes node startup fractionally
# faster.
return None
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
try:
udpprot.transport.connect(target_ipaddr, 7)
localip = udpprot.transport.getHost().host
except socket.error:
# no route to that host
localip = None
port.stopListening() # note, this returns a Deferred
return localip
# k: result of sys.platform, v: which kind of IP configuration reader we use
_platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"linux3": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"openbsd3": "bsd",
"openbsd4": "bsd",
"openbsd5": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"freebsd5": "bsd",
"freebsd6": "bsd",
"freebsd7": "bsd",
"freebsd8": "bsd",
"freebsd9": "bsd",
"netbsd1": "bsd",
"netbsd2": "bsd",
"netbsd3": "bsd",
"netbsd4": "bsd",
"netbsd5": "bsd",
"netbsd6": "bsd",
"dragonfly2": "bsd",
"sunos5": "sunos",
"cygwin": "cygwin",
}
class UnsupportedPlatformError(Exception):
pass
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_path = 'route.exe'
_win32_args = ('print',)
_win32_re = re.compile('^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
# These work in Redhat 6.x and Debian 2.2 potato
_linux_path = '/sbin/ifconfig'
_linux_re = re.compile('^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# NetBSD 1.4 (submitted by Rhialto), Darwin, Mac OS X
_netbsd_path = '/sbin/ifconfig'
_netbsd_args = ('-a',)
_netbsd_re = re.compile('^\s+inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# Irix 6.5
_irix_path = '/usr/etc/ifconfig'
# Solaris 2.x
_sunos_path = '/usr/sbin/ifconfig'
# k: platform string as provided in the value of _platform_map
# v: tuple of (path_to_tool, args, regex,)
_tool_map = {
"linux": (_linux_path, (), _linux_re,),
"win32": (_win32_path, _win32_args, _win32_re,),
"cygwin": (_win32_path, _win32_args, _win32_re,),
"bsd": (_netbsd_path, _netbsd_args, _netbsd_re,),
"irix": (_irix_path, _netbsd_args, _netbsd_re,),
"sunos": (_sunos_path, _netbsd_args, _netbsd_re,),
}
def _find_addresses_via_config():
return threads.deferToThread(_synchronously_find_addresses_via_config)
def _synchronously_find_addresses_via_config():
# originally by Greg Smith, hacked by Zooko to conform to Brian's API
platform = _platform_map.get(sys.platform)
if not platform:
raise UnsupportedPlatformError(sys.platform)
(pathtotool, args, regex,) = _tool_map[platform]
# If pathtotool is a fully qualified path then we just try that.
# If it is merely an executable name then we use Twisted's
# "which()" utility and try each executable in turn until one
# gives us something that resembles a dotted-quad IPv4 address.
if os.path.isabs(pathtotool):
return _query(pathtotool, args, regex)
else:
exes_to_try = which(pathtotool)
for exe in exes_to_try:
try:
addresses = _query(exe, args, regex)
except Exception:
addresses = []
if addresses:
return addresses
return []
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(output, err) = p.communicate()
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.groupdict()['address']
if addr not in addresses:
addresses.append(addr)
return addresses
def _cygwin_hack_find_addresses(target):
addresses = []
for h in [target, "localhost", "127.0.0.1",]:
try:
addr = get_local_ip_for(h)
if addr not in addresses:
addresses.append(addr)
except socket.gaierror:
pass
return defer.succeed(addresses)
| gpl-3.0 |
dhermes/google-cloud-python | bigquery/google/cloud/bigquery/__init__.py | 2 | 4670 | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google BigQuery API wrapper.
The main concepts with this API are:
- :class:`~google.cloud.bigquery.client.Client` manages connections to the
BigQuery API. Use the client methods to run jobs (such as a
:class:`~google.cloud.bigquery.job.QueryJob` via
:meth:`~google.cloud.bigquery.client.Client.query`) and manage resources.
- :class:`~google.cloud.bigquery.dataset.Dataset` represents a
collection of tables.
- :class:`~google.cloud.bigquery.table.Table` represents a single "relation".
"""
from pkg_resources import get_distribution
__version__ = get_distribution("google-cloud-bigquery").version
from google.cloud.bigquery.client import Client
from google.cloud.bigquery.dataset import AccessEntry
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.external_config import BigtableOptions
from google.cloud.bigquery.external_config import BigtableColumnFamily
from google.cloud.bigquery.external_config import BigtableColumn
from google.cloud.bigquery.external_config import CSVOptions
from google.cloud.bigquery.external_config import GoogleSheetsOptions
from google.cloud.bigquery.external_config import ExternalSourceFormat
from google.cloud.bigquery.job import Compression
from google.cloud.bigquery.job import CopyJob
from google.cloud.bigquery.job import CopyJobConfig
from google.cloud.bigquery.job import CreateDisposition
from google.cloud.bigquery.job import DestinationFormat
from google.cloud.bigquery.job import Encoding
from google.cloud.bigquery.job import ExtractJob
from google.cloud.bigquery.job import ExtractJobConfig
from google.cloud.bigquery.job import LoadJob
from google.cloud.bigquery.job import LoadJobConfig
from google.cloud.bigquery.job import QueryJob
from google.cloud.bigquery.job import QueryJobConfig
from google.cloud.bigquery.job import QueryPriority
from google.cloud.bigquery.job import SchemaUpdateOption
from google.cloud.bigquery.job import SourceFormat
from google.cloud.bigquery.job import UnknownJob
from google.cloud.bigquery.job import WriteDisposition
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
from google.cloud.bigquery.query import UDFResource
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import EncryptionConfiguration
from google.cloud.bigquery.table import Table
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import Row
from google.cloud.bigquery.table import TimePartitioningType
from google.cloud.bigquery.table import TimePartitioning
__all__ = [
"__version__",
"Client",
# Queries
"QueryJob",
"QueryJobConfig",
"ArrayQueryParameter",
"ScalarQueryParameter",
"StructQueryParameter",
# Datasets
"Dataset",
"DatasetReference",
"AccessEntry",
# Tables
"EncryptionConfiguration",
"Table",
"TableReference",
"Row",
"CopyJob",
"CopyJobConfig",
"ExtractJob",
"ExtractJobConfig",
"LoadJob",
"LoadJobConfig",
"UnknownJob",
"TimePartitioningType",
"TimePartitioning",
# Shared helpers
"SchemaField",
"UDFResource",
"ExternalConfig",
"BigtableOptions",
"BigtableColumnFamily",
"BigtableColumn",
"CSVOptions",
"GoogleSheetsOptions",
"DEFAULT_RETRY",
# Enum Constants
"Compression",
"CreateDisposition",
"DestinationFormat",
"ExternalSourceFormat",
"Encoding",
"QueryPriority",
"SchemaUpdateOption",
"SourceFormat",
"WriteDisposition",
]
def load_ipython_extension(ipython):
"""Called by IPython when this module is loaded as an IPython extension."""
from google.cloud.bigquery.magics import _cell_magic
ipython.register_magic_function(
_cell_magic, magic_kind="cell", magic_name="bigquery"
)
| apache-2.0 |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Lib/encodings/cp1254.py | 593 | 13758 | """ Python Character Mapping Codec cp1254 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1254.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1254',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u20ac' # 0x80 -> EURO SIGN
u'\ufffe' # 0x81 -> UNDEFINED
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u0152' # 0x8C -> LATIN CAPITAL LIGATURE OE
u'\ufffe' # 0x8D -> UNDEFINED
u'\ufffe' # 0x8E -> UNDEFINED
u'\ufffe' # 0x8F -> UNDEFINED
u'\ufffe' # 0x90 -> UNDEFINED
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\u02dc' # 0x98 -> SMALL TILDE
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0153' # 0x9C -> LATIN SMALL LIGATURE OE
u'\ufffe' # 0x9D -> UNDEFINED
u'\ufffe' # 0x9E -> UNDEFINED
u'\u0178' # 0x9F -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\xa5' # 0xA5 -> YEN SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\xaf' # 0xAF -> MACRON
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\xb4' # 0xB4 -> ACUTE ACCENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\xb8' # 0xB8 -> CEDILLA
u'\xb9' # 0xB9 -> SUPERSCRIPT ONE
u'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
u'\xbf' # 0xBF -> INVERTED QUESTION MARK
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u011e' # 0xD0 -> LATIN CAPITAL LETTER G WITH BREVE
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd7' # 0xD7 -> MULTIPLICATION SIGN
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0130' # 0xDD -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u011f' # 0xF0 -> LATIN SMALL LETTER G WITH BREVE
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf7' # 0xF7 -> DIVISION SIGN
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u0131' # 0xFD -> LATIN SMALL LETTER DOTLESS I
u'\u015f' # 0xFE -> LATIN SMALL LETTER S WITH CEDILLA
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
hanlind/nova | nova/api/openstack/compute/extension_info.py | 1 | 14469 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _LE
from nova.policies import base as base_policies
from nova.policies import extensions as ext_policies
ALIAS = 'extensions'
LOG = logging.getLogger(__name__)
# NOTE(cyeoh): The following mappings are currently incomplete
# Having a v2.1 extension loaded can imply that several v2 extensions
# should also appear to be loaded (although they no longer do in v2.1)
v21_to_v2_extension_list_mapping = {
'os-quota-sets': [{'name': 'UserQuotas', 'alias': 'os-user-quotas',
'description': 'Project user quota support.'},
{'name': 'ExtendedQuotas',
'alias': 'os-extended-quotas',
'description': ('Adds ability for admins to delete'
' quota and optionally force the update Quota'
' command.')}],
'os-cells': [{'name': 'CellCapacities', 'alias': 'os-cell-capacities',
'description': ('Adding functionality to get cell'
' capacities.')}],
'os-baremetal-nodes': [{'name': 'BareMetalExtStatus',
'alias': 'os-baremetal-ext-status',
'description': ('Add extended status in'
' Baremetal Nodes v2 API.')}],
'os-block-device-mapping': [{'name': 'BlockDeviceMappingV2Boot',
'alias': 'os-block-device-mapping-v2-boot',
'description': ('Allow boot with the new BDM'
' data format.')}],
'os-cloudpipe': [{'name': 'CloudpipeUpdate',
'alias': 'os-cloudpipe-update',
'description': ('Adds the ability to set the vpn'
' ip/port for cloudpipe instances.')}],
'servers': [{'name': 'Createserverext', 'alias': 'os-create-server-ext',
'description': ('Extended support to the Create Server'
' v1.1 API.')},
{'name': 'ExtendedIpsMac', 'alias': 'OS-EXT-IPS-MAC',
'description': 'Adds mac address parameter to the ip list.'},
{'name': 'ExtendedIps', 'alias': 'OS-EXT-IPS',
'description': 'Adds type parameter to the ip list.'},
{'name': 'ServerListMultiStatus',
'alias': 'os-server-list-multi-status',
'description': ('Allow to filter the servers by a set of'
' status values.')},
{'name': 'ServerSortKeys', 'alias': 'os-server-sort-keys',
'description': 'Add sorting support in get Server v2 API.'},
{'name': 'ServerStartStop', 'alias': 'os-server-start-stop',
'description': 'Start/Stop instance compute API support.'}],
'flavors': [{'name': 'FlavorDisabled', 'alias': 'OS-FLV-DISABLED',
'description': ('Support to show the disabled status'
' of a flavor.')},
{'name': 'FlavorExtraData', 'alias': 'OS-FLV-EXT-DATA',
'description': 'Provide additional data for flavors.'},
{'name': 'FlavorSwap', 'alias': 'os-flavor-swap',
'description': ('Support to show the swap status of a'
' flavor.')}],
'os-services': [{'name': 'ExtendedServicesDelete',
'alias': 'os-extended-services-delete',
'description': 'Extended services deletion support.'},
{'name': 'ExtendedServices', 'alias':
'os-extended-services',
'description': 'Extended services support.'}],
'os-evacuate': [{'name': 'ExtendedEvacuateFindHost',
'alias': 'os-extended-evacuate-find-host',
'description': ('Enables server evacuation without'
' target host. Scheduler will select one to target.')}],
'os-floating-ips': [{'name': 'ExtendedFloatingIps',
'alias': 'os-extended-floating-ips',
'description': ('Adds optional fixed_address to the add'
' floating IP command.')}],
'os-hypervisors': [{'name': 'ExtendedHypervisors',
'alias': 'os-extended-hypervisors',
'description': 'Extended hypervisors support.'},
{'name': 'HypervisorStatus',
'alias': 'os-hypervisor-status',
'description': 'Show hypervisor status.'}],
'os-networks': [{'name': 'ExtendedNetworks',
'alias': 'os-extended-networks',
'description': 'Adds additional fields to networks.'}],
'os-rescue': [{'name': 'ExtendedRescueWithImage',
'alias': 'os-extended-rescue-with-image',
'description': ('Allow the user to specify the image to'
' use for rescue.')}],
'os-extended-status': [{'name': 'ExtendedStatus',
'alias': 'OS-EXT-STS',
'description': 'Extended Status support.'}],
'os-used-limits': [{'name': 'UsedLimitsForAdmin',
'alias': 'os-used-limits-for-admin',
'description': ('Provide data to admin on limited'
' resources used by other tenants.')}],
'os-volumes': [{'name': 'VolumeAttachmentUpdate',
'alias': 'os-volume-attachment-update',
'description': ('Support for updating a volume'
' attachment.')}],
'os-server-groups': [{'name': 'ServerGroupQuotas',
'alias': 'os-server-group-quotas',
'description': 'Adds quota support to server groups.'}],
}
# v2.1 plugins which should never appear in the v2 extension list
# This should be the v2.1 alias, not the V2.0 alias
v2_extension_suppress_list = ['servers', 'images', 'versions', 'flavors',
'os-block-device-mapping-v1', 'os-consoles',
'extensions', 'image-metadata', 'ips', 'limits',
'server-metadata', 'server-migrations',
'os-server-tags'
]
# v2.1 plugins which should appear under a different name in v2
v21_to_v2_alias_mapping = {
'image-size': 'OS-EXT-IMG-SIZE',
'os-remote-consoles': 'os-consoles',
'os-disk-config': 'OS-DCF',
'os-extended-availability-zone': 'OS-EXT-AZ',
'os-extended-server-attributes': 'OS-EXT-SRV-ATTR',
'os-multinic': 'NMN',
'os-scheduler-hints': 'OS-SCH-HNT',
'os-server-usage': 'OS-SRV-USG',
'os-instance-usage-audit-log': 'os-instance_usage_audit_log',
}
# NOTE(sdague): this is the list of extension metadata that we display
# to the user for features that we provide. This exists for legacy
# purposes because applications were once asked to look for these
# things to decide if a feature is enabled. As we remove extensions
# completely from the code we're going to have a static list here to
# keep the surface metadata the same.
hardcoded_extensions = [
{'name': 'DiskConfig',
'alias': 'os-disk-config',
'description': 'Disk Management Extension.'},
{'name': 'AccessIPs',
'description': 'Access IPs support.',
'alias': 'os-access-ips'},
{'name': 'PreserveEphemeralOnRebuild',
'description': ('Allow preservation of the '
'ephemeral partition on rebuild.'),
'alias': 'os-preserve-ephemeral-rebuild'},
{'name': 'Personality',
'description': 'Personality support.',
'alias': 'os-personality'},
]
# V2.1 does not support XML but we need to keep an entry in the
# /extensions information returned to the user for backwards
# compatibility
FAKE_XML_URL = "http://docs.openstack.org/compute/ext/fake_xml"
FAKE_UPDATED_DATE = "2014-12-03T00:00:00Z"
class FakeExtension(object):
def __init__(self, name, alias, description=""):
self.name = name
self.alias = alias
self.__doc__ = description
self.version = -1
class ExtensionInfoController(wsgi.Controller):
def __init__(self, extension_info):
self.extension_info = extension_info
def _translate(self, ext):
ext_data = {}
ext_data["name"] = ext.name
ext_data["alias"] = ext.alias
ext_data["description"] = ext.__doc__
ext_data["namespace"] = FAKE_XML_URL
ext_data["updated"] = FAKE_UPDATED_DATE
ext_data["links"] = []
return ext_data
def _create_fake_ext(self, name, alias, description=""):
return FakeExtension(name, alias, description)
def _add_vif_extension(self, discoverable_extensions):
vif_extension = {}
vif_extension_info = {'name': 'ExtendedVIFNet',
'alias': 'OS-EXT-VIF-NET',
'description': 'Adds network id parameter'
' to the virtual interface list.'}
vif_extension[vif_extension_info["alias"]] = self._create_fake_ext(
vif_extension_info["name"], vif_extension_info["alias"],
vif_extension_info["description"])
discoverable_extensions.update(vif_extension)
def _get_extensions(self, context):
"""Filter extensions list based on policy."""
discoverable_extensions = dict()
for item in hardcoded_extensions:
discoverable_extensions[item['alias']] = self._create_fake_ext(
item['name'],
item['alias'],
item['description']
)
for alias, ext in self.extension_info.get_extensions().items():
action = ':'.join([
base_policies.COMPUTE_API, alias, 'discoverable'])
if context.can(action, fatal=False):
discoverable_extensions[alias] = ext
else:
LOG.debug("Filter out extension %s from discover list",
alias)
# Add fake v2 extensions to list
extra_exts = {}
for alias in discoverable_extensions:
if alias in v21_to_v2_extension_list_mapping:
for extra_ext in v21_to_v2_extension_list_mapping[alias]:
extra_exts[extra_ext["alias"]] = self._create_fake_ext(
extra_ext["name"], extra_ext["alias"],
extra_ext["description"])
discoverable_extensions.update(extra_exts)
# Suppress extensions which we don't want to see in v2
for suppress_ext in v2_extension_suppress_list:
try:
del discoverable_extensions[suppress_ext]
except KeyError:
pass
# v2.1 to v2 extension name mapping
for rename_ext in v21_to_v2_alias_mapping:
if rename_ext in discoverable_extensions:
new_name = v21_to_v2_alias_mapping[rename_ext]
mod_ext = copy.deepcopy(
discoverable_extensions.pop(rename_ext))
mod_ext.alias = new_name
discoverable_extensions[new_name] = mod_ext
return discoverable_extensions
@extensions.expected_errors(())
def index(self, req):
context = req.environ['nova.context']
context.can(ext_policies.BASE_POLICY_NAME)
discoverable_extensions = self._get_extensions(context)
# NOTE(gmann): This is for v2.1 compatible mode where
# extension list should show all extensions as shown by v2.
# Here we add VIF extension which has been removed from v2.1 list.
if req.is_legacy_v2():
self._add_vif_extension(discoverable_extensions)
sorted_ext_list = sorted(
discoverable_extensions.items())
extensions = []
for _alias, ext in sorted_ext_list:
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@extensions.expected_errors(404)
def show(self, req, id):
context = req.environ['nova.context']
context.can(ext_policies.BASE_POLICY_NAME)
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self._get_extensions(context)[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
class ExtensionInfo(extensions.V21APIExtensionBase):
"""Extension information."""
name = "Extensions"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(
ALIAS, ExtensionInfoController(self.extension_info),
member_name='extension')]
return resources
def get_controller_extensions(self):
return []
class LoadedExtensionInfo(object):
"""Keep track of all loaded API extensions."""
def __init__(self):
self.extensions = {}
def register_extension(self, ext):
if not self._check_extension(ext):
return False
alias = ext.alias
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
return True
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
extension.is_valid()
except AttributeError:
LOG.exception(_LE("Exception loading extension"))
return False
return True
def get_extensions(self):
return self.extensions
| apache-2.0 |
DayGitH/Python-Challenges | DailyProgrammer/DP20120917B.py | 1 | 2265 | """
[9/17/2012] Challenge #99 [intermediate] (Unemployment map of the United States)
https://www.reddit.com/r/dailyprogrammer/comments/101mi5/9172012_challenge_99_intermediate_unemployment/
[A little while
ago](http://www.reddit.com/r/dailyprogrammer/comments/yj38u/8202012_challenge_89_difficult_coloring_the/) we took
advantage of a [very useful blank map](http://en.wikipedia.org/wiki/File:Blank_US_Map.svg) hosted at Wikipedia. The
advantage of this map is that it is very easy to assign different colors to each state (for details on how to do this,
[see the previous
problem](http://www.reddit.com/r/dailyprogrammer/comments/yj38u/8202012_challenge_89_difficult_coloring_the/)). We only
had some silly fun with it, but it can also obviously be very useful in visualizing information about the country.
[Here is a text-file with unemployment data](https://gist.github.com/3740029) for all US states for each month from
January 1980 to June 2012, stored in [CSV format](http://en.wikipedia.org/wiki/Comma-separated_values). The first
column is the dates, then each column is the data for each state (the order of which is detailed in the headers). I got
this information from the Federal Reserve Bank of St. Louis [FRED database](http://research.stlouisfed.org/fred2/),
which has excellent API access (good work, St. Louis Fed!).
Using this table, make a program that can draw a map of unemployment across the United States at a given date. For
instance, [here is a map of unemployment for July 2005](http://i.imgur.com/O4LP2.png). As you can see, I edited the map
slightly, adding a scale to the left side and a header that includes the date. You can do that too if you wish, but it
is not necessary in any way.
Your map doesn't need to look anything like mine. You can experiment with different colors and different styles. I
selected the colors linearly based on unemployment, but you may want to use a different function to select colors, or
perhaps color all states within a certain range the same (so that all states with 0%-2% are the same color, as are the
states with 2%-4%, 4%-6%, etc). Experiment and see what you like.
Create a map which shows unemployment for February 1995.
"""
def main():
pass
if __name__ == "__main__":
main()
| mit |
aaron-goshine/electron | script/update-external-binaries.py | 29 | 1682 | #!/usr/bin/env python
import errno
import sys
import os
from lib.config import get_target_arch
from lib.util import safe_mkdir, rm_rf, extract_zip, tempdir, download
VERSION = 'v0.8.0'
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
FRAMEWORKS_URL = 'http://github.com/atom/atom-shell-frameworks/releases' \
'/download/' + VERSION
def main():
os.chdir(SOURCE_ROOT)
version_file = os.path.join(SOURCE_ROOT, 'external_binaries', '.version')
if (is_updated(version_file, VERSION)):
return
rm_rf('external_binaries')
safe_mkdir('external_binaries')
if sys.platform == 'darwin':
download_and_unzip('Mantle')
download_and_unzip('ReactiveCocoa')
download_and_unzip('Squirrel')
elif sys.platform in ['cygwin', 'win32']:
download_and_unzip('directxsdk-' + get_target_arch())
download_and_unzip('vs2012-crt-' + get_target_arch())
with open(version_file, 'w') as f:
f.write(VERSION)
def is_updated(version_file, version):
existing_version = ''
try:
with open(version_file, 'r') as f:
existing_version = f.readline().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
return existing_version == version
def download_and_unzip(framework):
zip_path = download_framework(framework)
if zip_path:
extract_zip(zip_path, 'external_binaries')
def download_framework(framework):
filename = framework + '.zip'
url = FRAMEWORKS_URL + '/' + filename
download_dir = tempdir(prefix='electron-')
path = os.path.join(download_dir, filename)
download('Download ' + framework, url, path)
return path
if __name__ == '__main__':
sys.exit(main())
| mit |
dpau/thumb | lib/werkzeug/contrib/lint.py | 318 | 12282 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.lint
~~~~~~~~~~~~~~~~~~~~~
.. versionadded:: 0.5
This module provides a middleware that performs sanity checks of the WSGI
application. It checks that :pep:`333` is properly implemented and warns
on some common HTTP errors such as non-empty responses for 304 status
codes.
This module provides a middleware, the :class:`LintMiddleware`. Wrap your
application with it and it will warn about common problems with WSGI and
HTTP while your application is running.
It's strongly recommended to use it during development.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from urlparse import urlparse
from warnings import warn
from werkzeug.datastructures import Headers
from werkzeug.http import is_entity_header
from werkzeug.wsgi import FileWrapper
from werkzeug._compat import string_types
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(WSGIWarning('%s requires bytestrings, got %s' %
(context, obj.__class__.__name__)))
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(WSGIWarning('wsgi does not guarantee an EOF marker on the '
'input stream, thus making calls to '
'wsgi.input.read() unsafe. Conforming servers '
'may never return from this call.'),
stacklevel=2)
elif len(args) != 1:
warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
stacklevel=2)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(WSGIWarning('Calls to wsgi.input.readline() without arguments'
' are unsafe. Use wsgi.input.read() instead.'),
stacklevel=2)
elif len(args) == 1:
warn(WSGIWarning('wsgi.input.readline() was called with a size hint. '
'WSGI does not support this, although it\'s available '
'on all major servers.'),
stacklevel=2)
else:
raise TypeError('too many arguments passed to wsgi.input.readline()')
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2)
return iter(())
def close(self):
warn(WSGIWarning('application closed the input stream!'),
stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string('wsgi.error.write()', s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(seq)
def close(self):
warn(WSGIWarning('application closed the error stream!'),
stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string('write()', s)
self._write.write(s)
self._chunks.append(len(s))
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
self._next = iter(iterator).next
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def next(self):
if self.closed:
warn(WSGIWarning('iterated over closed app_iter'),
stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(WSGIWarning('Application returned before it '
'started the response'), stacklevel=2)
check_string('application iterator items', rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, 'close'):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get('content-length', type=int)
if status_code == 304:
for key, value in headers:
key = key.lower()
if key not in ('expires', 'content-location') and \
is_entity_header(key):
warn(HTTPWarning('entity header %r found in 304 '
'response' % key))
if bytes_sent:
warn(HTTPWarning('304 responses must not have a body'))
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(HTTPWarning('%r responses must have an empty '
'content length') % status_code)
if bytes_sent:
warn(HTTPWarning('%r responses must not have a body' %
status_code))
elif content_length is not None and content_length != bytes_sent:
warn(WSGIWarning('Content-Length and the number of bytes '
'sent to the client do not match.'))
def __del__(self):
if not self.closed:
try:
warn(WSGIWarning('Iterator was garbage collected before '
'it was closed.'))
except Exception:
pass
class LintMiddleware(object):
"""This middleware wraps an application and warns on common errors.
Among other thing it currently checks for the following problems:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Detected errors are emitted using the standard Python :mod:`warnings`
system and usually end up on :data:`stderr`.
::
from werkzeug.contrib.lint import LintMiddleware
app = LintMiddleware(app)
:param app: the application to wrap
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(WSGIWarning('WSGI environment is not a standard python dict.'),
stacklevel=4)
for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once'):
if key not in environ:
warn(WSGIWarning('required environment key %r not found'
% key), stacklevel=3)
if environ['wsgi.version'] != (1, 0):
warn(WSGIWarning('environ is not a WSGI 1.0 environ'),
stacklevel=3)
script_name = environ.get('SCRIPT_NAME', '')
if script_name and script_name[:1] != '/':
warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r'
% script_name), stacklevel=3)
path_info = environ.get('PATH_INFO', '')
if path_info[:1] != '/':
warn(WSGIWarning('PATH_INFO does not start with a slash: %r'
% path_info), stacklevel=3)
def check_start_response(self, status, headers, exc_info):
check_string('status', status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning('Status code must be three digits'), stacklevel=3)
if len(status) < 4 or status[3] != ' ':
warn(WSGIWarning('Invalid value for status %r. Valid '
'status strings are three digits, a space '
'and a status explanation'), stacklevel=3)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning('status code < 100 detected'), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning('header list is not a list'), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning('Headers must tuple 2-item tuples'),
stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning('header items must be strings'),
stacklevel=3)
if name.lower() == 'status':
warn(WSGIWarning('The status header is not supported due to '
'conflicts with the CGI spec.'),
stacklevel=3)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning('invalid value for exc_info'), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get('etag')
if etag is not None:
if etag.startswith('w/'):
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4)
location = headers.get('location')
if location is not None:
if not urlparse(location).netloc:
warn(HTTPWarning('absolute URLs required for location header'),
stacklevel=4)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(WSGIWarning('application returned string. Response will '
'send character for character to the client '
'which will kill the performance. Return a '
'list or iterable instead.'), stacklevel=3)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2)
if kwargs:
warn(WSGIWarning('No keyword arguments to WSGI app allowed'),
stacklevel=2)
environ, start_response = args
self.check_environ(environ)
environ['wsgi.input'] = InputStream(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors'])
# hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length
environ['wsgi.file_wrapper'] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(WSGIWarning('Invalid number of arguments: %s, expected '
'2 or 3' % len(args), stacklevel=2))
if kwargs:
warn(WSGIWarning('no keyword arguments allowed.'))
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers,
exc_info)
return GuardedWrite(start_response(status, headers, exc_info),
chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
| apache-2.0 |
frappe/frappe | frappe/desk/report_dump.py | 1 | 2788 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
import json
import copy
@frappe.whitelist()
def get_data(doctypes, last_modified):
data_map = {}
for dump_report_map in frappe.get_hooks().dump_report_map:
data_map.update(frappe.get_attr(dump_report_map))
out = {}
doctypes = json.loads(doctypes)
last_modified = json.loads(last_modified)
for d in doctypes:
args = copy.deepcopy(data_map[d])
dt = d.find("[") != -1 and d[:d.find("[")] or d
out[dt] = {}
if args.get("from"):
modified_table = "item."
else:
modified_table = ""
conditions = order_by = ""
table = args.get("from") or ("`tab%s`" % dt)
if d in last_modified:
if not args.get("conditions"):
args['conditions'] = []
args['conditions'].append(modified_table + "modified > '" + last_modified[d] + "'")
out[dt]["modified_names"] = frappe.db.sql_list("""select %sname from %s
where %smodified > %s""" % (modified_table, table, modified_table, "%s"), last_modified[d])
if args.get("force_index"):
conditions = " force index (%s) " % args["force_index"]
if args.get("conditions"):
conditions += " where " + " and ".join(args["conditions"])
if args.get("order_by"):
order_by = " order by " + args["order_by"]
out[dt]["data"] = [list(t) for t in frappe.db.sql("""select %s from %s %s %s""" \
% (",".join(args["columns"]), table, conditions, order_by))]
# last modified
modified_table = table
if "," in table:
modified_table = " ".join(table.split(",")[0].split(" ")[:-1])
tmp = frappe.db.sql("""select `modified`
from %s order by modified desc limit 1""" % modified_table)
out[dt]["last_modified"] = tmp and tmp[0][0] or ""
out[dt]["columns"] = list(map(lambda c: c.split(" as ")[-1], args["columns"]))
if args.get("links"):
out[dt]["links"] = args["links"]
for d in out:
unused_links = []
# only compress full dumps (not partial)
if out[d].get("links") and (d not in last_modified):
for link_key in out[d]["links"]:
link = out[d]["links"][link_key]
if link[0] in out and (link[0] not in last_modified):
# make a map of link ids
# to index
link_map = {}
doctype_data = out[link[0]]
col_idx = doctype_data["columns"].index(link[1])
for row_idx in range(len(doctype_data["data"])):
row = doctype_data["data"][row_idx]
link_map[row[col_idx]] = row_idx
for row in out[d]["data"]:
columns = list(out[d]["columns"])
if link_key in columns:
col_idx = columns.index(link_key)
# replace by id
if row[col_idx]:
row[col_idx] = link_map.get(row[col_idx])
else:
unused_links.append(link_key)
for link in unused_links:
del out[d]["links"][link]
return out
| mit |
hassanibi/erpnext | erpnext/patches/v7_0/repost_bin_qty_and_item_projected_qty.py | 35 | 1123 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.stock.doctype.bin.bin import update_item_projected_qty
def execute():
repost_bin_qty()
repost_item_projected_qty()
def repost_bin_qty():
for bin in frappe.db.sql(""" select name from `tabBin`
where (actual_qty + ordered_qty + indented_qty + planned_qty- reserved_qty - reserved_qty_for_production) != projected_qty """, as_dict=1):
bin_doc = frappe.get_doc('Bin', bin.name)
bin_doc.set_projected_qty()
bin_doc.db_set("projected_qty", bin_doc.projected_qty, update_modified = False)
def repost_item_projected_qty():
for data in frappe.db.sql(""" select
`tabBin`.item_code as item_code,
sum(`tabBin`.projected_qty) as projected_qty,
`tabItem`.total_projected_qty as total_projected_qty
from
`tabBin`, `tabItem`
where `tabBin`.item_code = `tabItem`.name
group by `tabBin`.item_code having projected_qty <> total_projected_qty """, as_dict=1):
update_item_projected_qty(data.item_code)
| gpl-3.0 |
dparks1134/GenomeTk | genometk/metadata_nucleotide.py | 1 | 4848 | ###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
import logging
import biolib.seq_io as seq_io
import biolib.seq_tk as seq_tk
import biolib.genome_tk as genome_tk
class MetadataNucleotide():
"""Calculate metadata derived from nucleotide sequences."""
def __init__(self):
"""Initialization"""
self.logger = logging.getLogger('timestamp')
def generate(self, genome_file, contig_break):
"""Derive metdata across nucleotide sequences.
Parameters
----------
genome_file : str
Name of fasta file containing nucleotide sequences.
contig_break : int
Minimum number of ambiguous bases for defining contigs.
Returns
-------
dict : d[metadata_field] -> value
Map of metadata fields to their respective values.
dict : d[metadata_field -> description
Description of each metadata field.
"""
# calculate nucleotide statistics
scaffolds = seq_io.read(genome_file)
nuc_stats = {}
nuc_desc = {}
nuc_stats['scaffold_count'] = len(scaffolds)
nuc_desc['scaffold_count'] = "Number of scaffolds in genome."
nuc_stats['gc_count'] = genome_tk.gc_count(scaffolds)
nuc_desc['gc_count'] = "Number of G or C bases in genome."
nuc_stats['gc_percentage'] = genome_tk.gc(scaffolds) * 100.0
nuc_desc['gc_percentage'] = "GC content of genome."
nuc_stats['genome_size'] = sum([len(x) for x in scaffolds.values()])
nuc_desc['genome_size'] = "Total base pairs in genome including nucleotide bases, ambiguous bases, and gaps."
nuc_stats['n50_scaffolds'] = seq_tk.N50(scaffolds)
nuc_desc['n50_scaffolds'] = "Scaffold length at which 50% of total bases in assembly are in scaffolds of that length or greater."
nuc_stats['l50_scaffolds'] = seq_tk.L50(scaffolds, nuc_stats['n50_scaffolds'])
nuc_desc['l50_scaffolds'] = "Number of scaffolds longer than, or equal to, the scaffold N50 length."
nuc_stats['mean_scaffold_length'] = int(seq_tk.mean_length(scaffolds))
nuc_desc['mean_scaffold_length'] = "Mean length of scaffolds in base pairs."
nuc_stats['longest_scaffold'] = seq_tk.max_length(scaffolds)
nuc_desc['longest_scaffold'] = "Number of bases in longest scaffold."
contigs = seq_tk.identify_contigs(scaffolds, 'N' * contig_break)
nuc_stats['contig_count'] = len(contigs)
nuc_desc['contig_count'] = "Number of contigs in genome."
nuc_stats['ambiguous_bases'] = genome_tk.ambiguous_nucleotides(contigs)
nuc_desc['ambiguous_bases'] = "Number of ambiguous bases in contigs."
nuc_stats['total_gap_length'] = genome_tk.ambiguous_nucleotides(scaffolds) - nuc_stats['ambiguous_bases']
nuc_desc['total_gap_length'] = "Number of ambiguous bases comprising gaps in scaffolds."
nuc_stats['n50_contigs'] = seq_tk.N50(contigs)
nuc_desc['n50_contigs'] = "Contig length at which 50% of total bases in assembly are in contigs of that length or greater."
nuc_stats['l50_contigs'] = seq_tk.L50(contigs, nuc_stats['n50_contigs'])
nuc_desc['l50_contigs'] = "Number of contigs longer than, or equal to, the contig N50 length."
nuc_stats['mean_contig_length'] = int(seq_tk.mean_length(contigs))
nuc_desc['mean_contig_length'] = "Mean length of contigs in base pairs."
nuc_stats['longest_contig'] = seq_tk.max_length(contigs)
nuc_desc['longest_contig'] = "Number of bases in longest contig."
return nuc_stats, nuc_desc
| gpl-3.0 |
entropy1337/infernal-twin | Modules/wpa2_hack.py | 1 | 5100 | try:
import wx
from wxPython.wx import *
from wxPython.wx import *
import wx.html
except ImportError:
raise:
ImportError("Detected Python 3+")
import wx.html
from wxPython.wx import *
import infernal_wireless_gui
class MyHtmlFrame(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, -1, title)
html = wx.html.HtmlWindow(self)
html.SetPage(
"This is test implementation of Simple <b>Notepad</b> using <font color=\"red\">wxPython</font> !!!! "
"<br>View is created using <font color=\"red\">wxGlade</font>")
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
# wx.Frame.__init__(self, parent, -1, title, pos=(150, 150), size=(350, 250))
self.flag = 0
# Menu Bar
self.frame_1_menubar = wx.MenuBar()
self.SetMenuBar(self.frame_1_menubar)
self.File = wx.Menu()
self.New = wx.MenuItem(self.File, wx.NewId(), "probe requeset", "", wx.ITEM_NORMAL)
self.File.AppendItem(self.New)
self.Save = wx.MenuItem(self.File, wx.NewId(), "Scan Range", "", wx.ITEM_NORMAL)
self.File.AppendItem(self.Save)
self.frame_1_menubar.Append(self.File, "&File")
self.frame_1_statusbar = self.CreateStatusBar(1, 0)
self.__set_properties()
self.__do_layout()
self.SetStatusText("WPA2 Cracker")
self.Bind(wx.EVT_MENU, self.file_save, self.Save)
self.Bind(wx.EVT_MENU, self.file_new, self.New)
# end wxGlade
def __set_properties(self):
# begin wxGlade: MyFrame.__set_properties
self.SetTitle("WPA2 Enterprise Cracker")
# self.frame_1_toolbar.SetToolBitmapSize((0, 0))
# self.frame_1_toolbar.Realize()
self.frame_1_statusbar.SetStatusWidths([-1])
# statusbar fields
frame_1_statusbar_fields = ["frame_1_statusbar"]
for i in range(len(frame_1_statusbar_fields)):
self.frame_1_statusbar.SetStatusText(frame_1_statusbar_fields[i], i)
# end wxGlade
def __do_layout(self):
# begin wxGlade: MyFrame.__do_layout
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.SetAutoLayout(True)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Layout()
# end wxGlade
def file_save(self, event): # wxGlade: MyFrame.<event_handler>
if self.flag == 1:
dialog = wxFileDialog(None, style=wxSAVE)
# Show the dialog and get user input
if dialog.ShowModal() == wxID_OK:
file_path = dialog.GetPath()
file = open(file_path, 'w')
file_content = self.text_ctrl_1.GetValue()
file.write(file_content)
else:
print 'Nothing was selected.'
# Destroy the dialog
self.SetStatusText("Your file has been saved")
dialog.Destroy()
else:
dlg = wxMessageDialog(self, "plz open a new file !!!!", "New File", wxOK | wxICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def open_file(self, event): # wxGlade: MyFrame.<event_handler>
if self.flag == 1:
filters = 'Text files (*.txt)|*.txt'
dialog = wxFileDialog(None, message='Open something....', wildcard=filters, style=wxOPEN | wxMULTIPLE)
if dialog.ShowModal() == wxID_OK:
filename = dialog.GetPath()
file = open(filename, 'r')
file_content = file.read()
self.text_ctrl_1.SetValue(file_content)
else:
print 'Nothing was selected.'
dialog.Destroy()
else:
dlg = wxMessageDialog(self, "plz open a new file !!!!", "New File", wxOK | wxICON_INFORMATION)
dlg.ShowModal()
# Destroy the dialog
self.SetStatusText("Your file is opened")
dlg.Destroy()
def file_new(self, event): # wxGlade: MyFrame.<event_handler>
test = infernal_wireless_gui.Example
test.wireless_scan()
"""Bring up a wx.MessageDialog with a quit message."""
alert = wx.MessageDialog(self, "Do you really want to quit")
response = alert.ShowModal()
alert.Destroy()
if response == wx.ID_OK:
print "The user clicked the 'OK' button."
self.Close()
else:
print "The user clicked the 'Cancel' button."
event.Skip()
def about(self, event):
frm = MyHtmlFrame(None, "About ....")
frm.Show()
class MyNotepad(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, -1, "WPA2 Entperise Hack")
self.SetTopWindow(frame_1)
frame_1.Show()
return 1
# end of class MyNotepad
# if __name__ == "__main__":
def openit():
app = MyNotepad(0)
app.MainLoop()
openit()
| gpl-3.0 |
clouddocx/boto | boto/provider.py | 102 | 20925 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright 2010 Google Inc.
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
This class encapsulates the provider-specific header differences.
"""
import os
from boto.compat import six
from datetime import datetime
import boto
from boto import config
from boto.compat import expanduser
from boto.pyami.config import Config
from boto.gs.acl import ACL
from boto.gs.acl import CannedACLStrings as CannedGSACLStrings
from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
from boto.s3.acl import Policy
HEADER_PREFIX_KEY = 'header_prefix'
METADATA_PREFIX_KEY = 'metadata_prefix'
AWS_HEADER_PREFIX = 'x-amz-'
GOOG_HEADER_PREFIX = 'x-goog-'
ACL_HEADER_KEY = 'acl-header'
AUTH_HEADER_KEY = 'auth-header'
COPY_SOURCE_HEADER_KEY = 'copy-source-header'
COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header'
COPY_SOURCE_RANGE_HEADER_KEY = 'copy-source-range-header'
DELETE_MARKER_HEADER_KEY = 'delete-marker-header'
DATE_HEADER_KEY = 'date-header'
METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header'
RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header'
SECURITY_TOKEN_HEADER_KEY = 'security-token-header'
STORAGE_CLASS_HEADER_KEY = 'storage-class'
MFA_HEADER_KEY = 'mfa-header'
SERVER_SIDE_ENCRYPTION_KEY = 'server-side-encryption-header'
VERSION_ID_HEADER_KEY = 'version-id-header'
RESTORE_HEADER_KEY = 'restore-header'
STORAGE_COPY_ERROR = 'StorageCopyError'
STORAGE_CREATE_ERROR = 'StorageCreateError'
STORAGE_DATA_ERROR = 'StorageDataError'
STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError'
STORAGE_RESPONSE_ERROR = 'StorageResponseError'
NO_CREDENTIALS_PROVIDED = object()
class ProfileNotFoundError(ValueError):
pass
class Provider(object):
CredentialMap = {
'aws': ('aws_access_key_id', 'aws_secret_access_key',
'aws_security_token', 'aws_profile'),
'google': ('gs_access_key_id', 'gs_secret_access_key',
None, None),
}
AclClassMap = {
'aws': Policy,
'google': ACL
}
CannedAclsMap = {
'aws': CannedS3ACLStrings,
'google': CannedGSACLStrings
}
HostKeyMap = {
'aws': 's3',
'google': 'gs'
}
ChunkedTransferSupport = {
'aws': False,
'google': True
}
MetadataServiceSupport = {
'aws': True,
'google': False
}
# If you update this map please make sure to put "None" for the
# right-hand-side for any headers that don't apply to a provider, rather
# than simply leaving that header out (which would cause KeyErrors).
HeaderInfoMap = {
'aws': {
HEADER_PREFIX_KEY: AWS_HEADER_PREFIX,
METADATA_PREFIX_KEY: AWS_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: AWS_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'AWS',
COPY_SOURCE_HEADER_KEY: AWS_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: AWS_HEADER_PREFIX +
'copy-source-range',
DATE_HEADER_KEY: AWS_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: AWS_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: AWS_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: None,
SECURITY_TOKEN_HEADER_KEY: AWS_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: AWS_HEADER_PREFIX +
'server-side-encryption',
VERSION_ID_HEADER_KEY: AWS_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: AWS_HEADER_PREFIX + 'storage-class',
MFA_HEADER_KEY: AWS_HEADER_PREFIX + 'mfa',
RESTORE_HEADER_KEY: AWS_HEADER_PREFIX + 'restore',
},
'google': {
HEADER_PREFIX_KEY: GOOG_HEADER_PREFIX,
METADATA_PREFIX_KEY: GOOG_HEADER_PREFIX + 'meta-',
ACL_HEADER_KEY: GOOG_HEADER_PREFIX + 'acl',
AUTH_HEADER_KEY: 'GOOG1',
COPY_SOURCE_HEADER_KEY: GOOG_HEADER_PREFIX + 'copy-source',
COPY_SOURCE_VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX +
'copy-source-version-id',
COPY_SOURCE_RANGE_HEADER_KEY: None,
DATE_HEADER_KEY: GOOG_HEADER_PREFIX + 'date',
DELETE_MARKER_HEADER_KEY: GOOG_HEADER_PREFIX + 'delete-marker',
METADATA_DIRECTIVE_HEADER_KEY: GOOG_HEADER_PREFIX +
'metadata-directive',
RESUMABLE_UPLOAD_HEADER_KEY: GOOG_HEADER_PREFIX + 'resumable',
SECURITY_TOKEN_HEADER_KEY: GOOG_HEADER_PREFIX + 'security-token',
SERVER_SIDE_ENCRYPTION_KEY: None,
# Note that this version header is not to be confused with
# the Google Cloud Storage 'x-goog-api-version' header.
VERSION_ID_HEADER_KEY: GOOG_HEADER_PREFIX + 'version-id',
STORAGE_CLASS_HEADER_KEY: None,
MFA_HEADER_KEY: None,
RESTORE_HEADER_KEY: None,
}
}
ErrorMap = {
'aws': {
STORAGE_COPY_ERROR: boto.exception.S3CopyError,
STORAGE_CREATE_ERROR: boto.exception.S3CreateError,
STORAGE_DATA_ERROR: boto.exception.S3DataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.S3PermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.S3ResponseError,
},
'google': {
STORAGE_COPY_ERROR: boto.exception.GSCopyError,
STORAGE_CREATE_ERROR: boto.exception.GSCreateError,
STORAGE_DATA_ERROR: boto.exception.GSDataError,
STORAGE_PERMISSIONS_ERROR: boto.exception.GSPermissionsError,
STORAGE_RESPONSE_ERROR: boto.exception.GSResponseError,
}
}
def __init__(self, name, access_key=None, secret_key=None,
security_token=None, profile_name=None):
self.host = None
self.port = None
self.host_header = None
self.access_key = access_key
self.secret_key = secret_key
self.security_token = security_token
self.profile_name = profile_name
self.name = name
self.acl_class = self.AclClassMap[self.name]
self.canned_acls = self.CannedAclsMap[self.name]
self._credential_expiry_time = None
# Load shared credentials file if it exists
shared_path = os.path.join(expanduser('~'), '.' + name, 'credentials')
self.shared_credentials = Config(do_load=False)
if os.path.isfile(shared_path):
self.shared_credentials.load_from_path(shared_path)
self.get_credentials(access_key, secret_key, security_token, profile_name)
self.configure_headers()
self.configure_errors()
# Allow config file to override default host and port.
host_opt_name = '%s_host' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_opt_name):
self.host = config.get('Credentials', host_opt_name)
port_opt_name = '%s_port' % self.HostKeyMap[self.name]
if config.has_option('Credentials', port_opt_name):
self.port = config.getint('Credentials', port_opt_name)
host_header_opt_name = '%s_host_header' % self.HostKeyMap[self.name]
if config.has_option('Credentials', host_header_opt_name):
self.host_header = config.get('Credentials', host_header_opt_name)
def get_access_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._access_key
def set_access_key(self, value):
self._access_key = value
access_key = property(get_access_key, set_access_key)
def get_secret_key(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._secret_key
def set_secret_key(self, value):
self._secret_key = value
secret_key = property(get_secret_key, set_secret_key)
def get_security_token(self):
if self._credentials_need_refresh():
self._populate_keys_from_metadata_server()
return self._security_token
def set_security_token(self, value):
self._security_token = value
security_token = property(get_security_token, set_security_token)
def _credentials_need_refresh(self):
if self._credential_expiry_time is None:
return False
else:
# The credentials should be refreshed if they're going to expire
# in less than 5 minutes.
delta = self._credential_expiry_time - datetime.utcnow()
# python2.6 does not have timedelta.total_seconds() so we have
# to calculate this ourselves. This is straight from the
# datetime docs.
seconds_left = (
(delta.microseconds + (delta.seconds + delta.days * 24 * 3600)
* 10 ** 6) / 10 ** 6)
if seconds_left < (5 * 60):
boto.log.debug("Credentials need to be refreshed.")
return True
else:
return False
def get_credentials(self, access_key=None, secret_key=None,
security_token=None, profile_name=None):
access_key_name, secret_key_name, security_token_name, \
profile_name_name = self.CredentialMap[self.name]
# Load profile from shared environment variable if it was not
# already passed in and the environment variable exists
if profile_name is None and profile_name_name is not None and \
profile_name_name.upper() in os.environ:
profile_name = os.environ[profile_name_name.upper()]
shared = self.shared_credentials
if access_key is not None:
self.access_key = access_key
boto.log.debug("Using access key provided by client.")
elif access_key_name.upper() in os.environ:
self.access_key = os.environ[access_key_name.upper()]
boto.log.debug("Using access key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, access_key_name):
self.access_key = shared.get(profile_name, access_key_name)
boto.log.debug("Using access key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name,
access_key_name):
self.access_key = config.get("profile %s" % profile_name,
access_key_name)
boto.log.debug("Using access key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', access_key_name):
self.access_key = shared.get('default', access_key_name)
boto.log.debug("Using access key found in shared credential file.")
elif config.has_option('Credentials', access_key_name):
self.access_key = config.get('Credentials', access_key_name)
boto.log.debug("Using access key found in config file.")
if secret_key is not None:
self.secret_key = secret_key
boto.log.debug("Using secret key provided by client.")
elif secret_key_name.upper() in os.environ:
self.secret_key = os.environ[secret_key_name.upper()]
boto.log.debug("Using secret key found in environment variable.")
elif profile_name is not None:
if shared.has_option(profile_name, secret_key_name):
self.secret_key = shared.get(profile_name, secret_key_name)
boto.log.debug("Using secret key found in shared credential "
"file for profile %s." % profile_name)
elif config.has_option("profile %s" % profile_name, secret_key_name):
self.secret_key = config.get("profile %s" % profile_name,
secret_key_name)
boto.log.debug("Using secret key found in config file: "
"profile %s." % profile_name)
else:
raise ProfileNotFoundError('Profile "%s" not found!' %
profile_name)
elif shared.has_option('default', secret_key_name):
self.secret_key = shared.get('default', secret_key_name)
boto.log.debug("Using secret key found in shared credential file.")
elif config.has_option('Credentials', secret_key_name):
self.secret_key = config.get('Credentials', secret_key_name)
boto.log.debug("Using secret key found in config file.")
elif config.has_option('Credentials', 'keyring'):
keyring_name = config.get('Credentials', 'keyring')
try:
import keyring
except ImportError:
boto.log.error("The keyring module could not be imported. "
"For keyring support, install the keyring "
"module.")
raise
self.secret_key = keyring.get_password(
keyring_name, self.access_key)
boto.log.debug("Using secret key found in keyring.")
if security_token is not None:
self.security_token = security_token
boto.log.debug("Using security token provided by client.")
elif ((security_token_name is not None) and
(access_key is None) and (secret_key is None)):
# Only provide a token from the environment/config if the
# caller did not specify a key and secret. Otherwise an
# environment/config token could be paired with a
# different set of credentials provided by the caller
if security_token_name.upper() in os.environ:
self.security_token = os.environ[security_token_name.upper()]
boto.log.debug("Using security token found in environment"
" variable.")
elif shared.has_option(profile_name or 'default',
security_token_name):
self.security_token = shared.get(profile_name or 'default',
security_token_name)
boto.log.debug("Using security token found in shared "
"credential file.")
elif profile_name is not None:
if config.has_option("profile %s" % profile_name,
security_token_name):
boto.log.debug("config has option")
self.security_token = config.get("profile %s" % profile_name,
security_token_name)
boto.log.debug("Using security token found in config file: "
"profile %s." % profile_name)
elif config.has_option('Credentials', security_token_name):
self.security_token = config.get('Credentials',
security_token_name)
boto.log.debug("Using security token found in config file.")
if ((self._access_key is None or self._secret_key is None) and
self.MetadataServiceSupport[self.name]):
self._populate_keys_from_metadata_server()
self._secret_key = self._convert_key_to_str(self._secret_key)
def _populate_keys_from_metadata_server(self):
# get_instance_metadata is imported here because of a circular
# dependency.
boto.log.debug("Retrieving credentials from metadata server.")
from boto.utils import get_instance_metadata
timeout = config.getfloat('Boto', 'metadata_service_timeout', 1.0)
attempts = config.getint('Boto', 'metadata_service_num_attempts', 1)
# The num_retries arg is actually the total number of attempts made,
# so the config options is named *_num_attempts to make this more
# clear to users.
metadata = get_instance_metadata(
timeout=timeout, num_retries=attempts,
data='meta-data/iam/security-credentials/')
if metadata:
# I'm assuming there's only one role on the instance profile.
security = list(metadata.values())[0]
self._access_key = security['AccessKeyId']
self._secret_key = self._convert_key_to_str(security['SecretAccessKey'])
self._security_token = security['Token']
expires_at = security['Expiration']
self._credential_expiry_time = datetime.strptime(
expires_at, "%Y-%m-%dT%H:%M:%SZ")
boto.log.debug("Retrieved credentials will expire in %s at: %s",
self._credential_expiry_time - datetime.now(), expires_at)
def _convert_key_to_str(self, key):
if isinstance(key, six.text_type):
# the secret key must be bytes and not unicode to work
# properly with hmac.new (see http://bugs.python.org/issue5285)
return str(key)
return key
def configure_headers(self):
header_info_map = self.HeaderInfoMap[self.name]
self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
self.acl_header = header_info_map[ACL_HEADER_KEY]
self.auth_header = header_info_map[AUTH_HEADER_KEY]
self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
self.copy_source_version_id = header_info_map[
COPY_SOURCE_VERSION_ID_HEADER_KEY]
self.copy_source_range_header = header_info_map[
COPY_SOURCE_RANGE_HEADER_KEY]
self.date_header = header_info_map[DATE_HEADER_KEY]
self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
self.metadata_directive_header = (
header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
self.resumable_upload_header = (
header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
self.server_side_encryption_header = header_info_map[SERVER_SIDE_ENCRYPTION_KEY]
self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
self.mfa_header = header_info_map[MFA_HEADER_KEY]
self.restore_header = header_info_map[RESTORE_HEADER_KEY]
def configure_errors(self):
error_map = self.ErrorMap[self.name]
self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
self.storage_data_error = error_map[STORAGE_DATA_ERROR]
self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]
def get_provider_name(self):
return self.HostKeyMap[self.name]
def supports_chunked_transfer(self):
return self.ChunkedTransferSupport[self.name]
# Static utility method for getting default Provider.
def get_default():
return Provider('aws')
| mit |
jtyuan/racetrack | src/mem/slicc/ast/StallAndWaitStatementAST.py | 34 | 2361 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# Copyright (c) 2010 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.StatementAST import StatementAST
class StallAndWaitStatementAST(StatementAST):
def __init__(self, slicc, in_port, address):
super(StatementAST, self).__init__(slicc)
self.in_port = in_port
self.address = address
def __repr__(self):
return "[StallAndWaitStatementAst: %r]" % self.variable
def generate(self, code, return_type):
self.in_port.assertType("InPort")
self.address.assertType("Address")
in_port_code = self.in_port.var.code
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
$in_port_code.stallMessage($address_code);
''')
| bsd-3-clause |
saketkc/statsmodels | statsmodels/stats/tests/test_corrpsd.py | 31 | 16765 | # -*- coding: utf-8 -*-
"""Tests for finding a positive semi-definite correlation or covariance matrix
Created on Mon May 27 12:07:02 2013
Author: Josef Perktold
"""
import numpy as np
import scipy.sparse as sparse
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_equal)
from statsmodels.stats.correlation_tools import (
corr_nearest, corr_clipped, cov_nearest,
_project_correlation_factors, corr_nearest_factor, _spg_optim,
corr_thresholded, cov_nearest_factor_homog, FactoredPSDMatrix)
import warnings
def norm_f(x, y):
'''Frobenious norm (squared sum) of difference between two arrays
'''
d = ((x - y)**2).sum()
return np.sqrt(d)
class Holder(object):
pass
# R library Matrix results
cov1_r = Holder()
#> nc <- nearPD(pr, conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=TRUE)
#> cat_items(nc, prefix="cov1_r.")
cov1_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov1_r.eigenvalues = np.array([
4.197315628646795, 0.7540460243978023, 0.5077608149667492,
0.3801267599652769, 0.1607508970775889, 4.197315628646795e-08
])
cov1_r.corr = '''TRUE'''
cov1_r.normF = 0.0743805226512533
cov1_r.iterations = 11
cov1_r.rel_tol = 8.288594638441735e-08
cov1_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov1_r.mat")
cov1_r.mat = np.array([
1, 0.487968018215892, 0.642651880010906, 0.4906386709070835,
0.6440990530811909, 0.8087111845493985, 0.487968018215892, 1,
0.5141147294352735, 0.2506688108312097, 0.672351311297074,
0.725832055882795, 0.642651880010906, 0.5141147294352735, 1,
0.596827778712154, 0.5821917790519067, 0.7449631633814129,
0.4906386709070835, 0.2506688108312097, 0.596827778712154, 1,
0.729882058012399, 0.772150225146826, 0.6440990530811909,
0.672351311297074, 0.5821917790519067, 0.729882058012399, 1,
0.813191720191944, 0.8087111845493985, 0.725832055882795,
0.7449631633814129, 0.772150225146826, 0.813191720191944, 1
]).reshape(6,6, order='F')
cov_r = Holder()
#nc <- nearPD(pr+0.01*diag(6), conv.tol = 1e-7, keepDiag = TRUE, doDykstra =FALSE, corr=FALSE)
#> cat_items(nc, prefix="cov_r.")
#cov_r.mat = '''<S4 object of class structure("dpoMatrix", package = "Matrix")>'''
cov_r.eigenvalues = np.array([
4.209897516692652, 0.7668341923072066, 0.518956980021938,
0.390838551407132, 0.1734728460460068, 4.209897516692652e-08
])
cov_r.corr = '''FALSE'''
cov_r.normF = 0.0623948693159157
cov_r.iterations = 11
cov_r.rel_tol = 5.83987595937896e-08
cov_r.converged = '''TRUE'''
#> mkarray2(as.matrix(nc$mat), name="cov_r.mat")
cov_r.mat = np.array([
1.01, 0.486207476951913, 0.6428524769306785, 0.4886092840296514,
0.645175579158233, 0.811533860074678, 0.486207476951913, 1.01,
0.514394615153752, 0.2478398278204047, 0.673852495852274,
0.7297661648968664, 0.6428524769306785, 0.514394615153752, 1.01,
0.5971503271420517, 0.582018469844712, 0.7445177382760834,
0.4886092840296514, 0.2478398278204047, 0.5971503271420517, 1.01,
0.73161232298669, 0.7766852947049376, 0.645175579158233,
0.673852495852274, 0.582018469844712, 0.73161232298669, 1.01,
0.8107916469252828, 0.811533860074678, 0.7297661648968664,
0.7445177382760834, 0.7766852947049376, 0.8107916469252828, 1.01
]).reshape(6,6, order='F')
def test_corr_psd():
# test positive definite matrix is unchanged
x = np.array([[1, -0.2, -0.9], [-0.2, 1, -0.2], [-0.9, -0.2, 1]])
y = corr_nearest(x, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(x, y, decimal=14)
y = corr_clipped(x)
assert_almost_equal(x, y, decimal=14)
y = cov_nearest(x, n_fact=100)
assert_almost_equal(x, y, decimal=14)
x2 = x + 0.001 * np.eye(3)
y = cov_nearest(x2, n_fact=100)
assert_almost_equal(x2, y, decimal=14)
class CheckCorrPSDMixin(object):
def test_nearest(self):
x = self.x
res_r = self.res
y = corr_nearest(x, threshold=1e-7, n_fact=100)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals / res_r.eigenvalues[::-1] - 1
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.003, atol=1e-7)
#print evals[0] / 1e-7 - 1
assert_allclose(evals[0], 1e-7, rtol=1e-6)
def test_clipped(self):
x = self.x
res_r = self.res
y = corr_clipped(x, threshold=1e-7)
#print np.max(np.abs(x - y)), np.max(np.abs((x - y) / y))
assert_almost_equal(y, res_r.mat, decimal=1)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
evals = np.linalg.eigvalsh(y)
assert_allclose(evals, res_r.eigenvalues[::-1], rtol=0.1, atol=1e-7)
assert_allclose(evals[0], 1e-7, rtol=0.02)
def test_cov_nearest(self):
x = self.x
res_r = self.res
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = cov_nearest(x, method='nearest', threshold=1e-7)
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.0015)
class TestCovPSD(object):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x + 0.01 * np.eye(6)
cls.res = cov_r
def test_cov_nearest(self):
x = self.x
res_r = self.res
y = cov_nearest(x, method='nearest')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=3)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.001)
y = cov_nearest(x, method='clipped')
#print np.max(np.abs(x - y))
assert_almost_equal(y, res_r.mat, decimal=2)
d = norm_f(x, y)
assert_allclose(d, res_r.normF, rtol=0.15)
class TestCorrPSD1(CheckCorrPSDMixin):
@classmethod
def setup_class(cls):
x = np.array([ 1, 0.477, 0.644, 0.478, 0.651, 0.826,
0.477, 1, 0.516, 0.233, 0.682, 0.75,
0.644, 0.516, 1, 0.599, 0.581, 0.742,
0.478, 0.233, 0.599, 1, 0.741, 0.8,
0.651, 0.682, 0.581, 0.741, 1, 0.798,
0.826, 0.75, 0.742, 0.8, 0.798, 1]).reshape(6,6)
cls.x = x
cls.res = cov1_r
def test_corrpsd_threshold():
x = np.array([[1, -0.9, -0.9], [-0.9, 1, -0.9], [-0.9, -0.9, 1]])
#print np.linalg.eigvalsh(x)
for threshold in [0, 1e-15, 1e-10, 1e-6]:
y = corr_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = corr_clipped(x, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
y = cov_nearest(x, method='nearest', n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=1e-6, atol=1e-15)
y = cov_nearest(x, n_fact=100, threshold=threshold)
evals = np.linalg.eigvalsh(y)
#print 'evals', evals, threshold
#print evals[0] / threshold - 1
assert_allclose(evals[0], threshold, rtol=0.25, atol=1e-15)
class Test_Factor(object):
def test_corr_nearest_factor_arrpack(self):
# regression results for svds call
u2 = np.array([[
6.39407581e-19, 9.15225947e-03, 1.82631698e-02,
2.72917181e-02, 3.61975557e-02, 4.49413101e-02,
5.34848732e-02, 6.17916613e-02, 6.98268388e-02,
7.75575058e-02, 8.49528448e-02, 9.19842264e-02,
9.86252769e-02, 1.04851906e-01, 1.10642305e-01,
1.15976906e-01, 1.20838331e-01, 1.25211306e-01,
1.29082570e-01, 1.32440778e-01, 1.35276397e-01,
1.37581605e-01, 1.39350201e-01, 1.40577526e-01,
1.41260396e-01, 1.41397057e-01, 1.40987160e-01,
1.40031756e-01, 1.38533306e-01, 1.36495727e-01,
1.33924439e-01, 1.30826443e-01, 1.27210404e-01,
1.23086750e-01, 1.18467769e-01, 1.13367717e-01,
1.07802909e-01, 1.01791811e-01, 9.53551023e-02,
8.85157320e-02, 8.12989329e-02, 7.37322125e-02,
6.58453049e-02, 5.76700847e-02, 4.92404406e-02,
4.05921079e-02, 3.17624629e-02, 2.27902803e-02,
1.37154584e-02, 4.57871801e-03, -4.57871801e-03,
-1.37154584e-02, -2.27902803e-02, -3.17624629e-02,
-4.05921079e-02, -4.92404406e-02, -5.76700847e-02,
-6.58453049e-02, -7.37322125e-02, -8.12989329e-02,
-8.85157320e-02, -9.53551023e-02, -1.01791811e-01,
-1.07802909e-01, -1.13367717e-01, -1.18467769e-01,
-1.23086750e-01, -1.27210404e-01, -1.30826443e-01,
-1.33924439e-01, -1.36495727e-01, -1.38533306e-01,
-1.40031756e-01, -1.40987160e-01, -1.41397057e-01,
-1.41260396e-01, -1.40577526e-01, -1.39350201e-01,
-1.37581605e-01, -1.35276397e-01, -1.32440778e-01,
-1.29082570e-01, -1.25211306e-01, -1.20838331e-01,
-1.15976906e-01, -1.10642305e-01, -1.04851906e-01,
-9.86252769e-02, -9.19842264e-02, -8.49528448e-02,
-7.75575058e-02, -6.98268388e-02, -6.17916613e-02,
-5.34848732e-02, -4.49413101e-02, -3.61975557e-02,
-2.72917181e-02, -1.82631698e-02, -9.15225947e-03,
-3.51829569e-17]]).T
s2 = np.array([ 24.88812183])
d = 100
dm = 1
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
_project_correlation_factors(X)
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1.)
from scipy.sparse.linalg import svds
u, s, vt = svds(mat, dm)
#difference in sign
dsign = np.sign(u[1]) * np.sign(u2[1])
assert_allclose(u, dsign * u2, rtol=1e-6, atol=1e-14)
assert_allclose(s, s2, rtol=1e-6)
def test_corr_nearest_factor(self):
objvals = [np.array([6241.8, 6241.8, 579.4, 264.6, 264.3]),
np.array([2104.9, 2104.9, 710.5, 266.3, 286.1])]
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
np.random.seed(10)
for j in range(dm):
X[:,j] = np.sin(x*(j+1)) + 1e-10 * np.random.randn(d)
_project_correlation_factors(X)
assert np.isfinite(X).all()
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1.)
# Try to recover the structure
rslt = corr_nearest_factor(mat, dm)
err_msg = 'rank=%d, niter=%d' % (dm, len(rslt.objective_values))
assert_allclose(rslt.objective_values[:5], objvals[dm - 1],
rtol=0.5, err_msg=err_msg)
assert_equal(rslt.Converged, True, err_msg=err_msg)
mat1 = rslt.corr.to_matrix()
assert_allclose(mat, mat1, rtol=0.25, atol=1e-3, err_msg=err_msg)
# Test that we get the same result if the input is dense or sparse
def test_corr_nearest_factor_sparse(self):
d = 100
for dm in 1,2:
# Generate a test matrix of factors
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
np.random.seed(10)
for j in range(dm):
X[:,j] = np.sin(x*(j+1)) + 1e-10 * np.random.randn(d)
# Get the correlation matrix
_project_correlation_factors(X)
X *= 0.7
mat = np.dot(X, X.T)
np.fill_diagonal(mat, 1)
# Threshold it
mat *= (np.abs(mat) >= 0.4)
smat = sparse.csr_matrix(mat)
rslt = corr_nearest_factor(smat, dm)
assert_equal(rslt.Converged, True)
mat_dense = rslt.corr.to_matrix()
rslt = corr_nearest_factor(smat, dm)
assert_equal(rslt.Converged, True)
mat_sparse = rslt.corr.to_matrix()
assert_allclose(mat_dense, mat_sparse, rtol=0.25,
atol=1e-3)
# Test on a quadratic function.
def test_spg_optim(self):
dm = 100
ind = np.arange(dm)
indmat = np.abs(ind[:,None] - ind[None,:])
M = 0.8**indmat
def obj(x):
return np.dot(x, np.dot(M, x))
def grad(x):
return 2*np.dot(M, x)
def project(x):
return x
x = np.random.normal(size=dm)
rslt = _spg_optim(obj, grad, x, project)
xnew = rslt.params
assert_equal(rslt.Converged, True)
assert_almost_equal(obj(xnew), 0, decimal=3)
def test_decorrelate(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 4))
fac = FactoredPSDMatrix(dg, root)
mat = fac.to_matrix()
rmat = np.linalg.cholesky(mat)
dcr = fac.decorrelate(rmat)
idm = np.dot(dcr, dcr.T)
assert_almost_equal(idm, np.eye(d))
rhs = np.random.normal(size=(d, 5))
mat2 = np.dot(rhs.T, np.linalg.solve(mat, rhs))
mat3 = fac.decorrelate(rhs)
mat3 = np.dot(mat3.T, mat3)
assert_almost_equal(mat2, mat3)
def test_logdet(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 4))
fac = FactoredPSDMatrix(dg, root)
mat = fac.to_matrix()
_, ld = np.linalg.slogdet(mat)
ld2 = fac.logdet()
assert_almost_equal(ld, ld2)
def test_solve(self):
d = 30
dg = np.linspace(1, 2, d)
root = np.random.normal(size=(d, 2))
fac = FactoredPSDMatrix(dg, root)
rhs = np.random.normal(size=(d, 5))
sr1 = fac.solve(rhs)
mat = fac.to_matrix()
sr2 = np.linalg.solve(mat, rhs)
assert_almost_equal(sr1, sr2)
def test_cov_nearest_factor_homog(self):
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
mat = np.dot(X, X.T)
np.fill_diagonal(mat, np.diag(mat) + 3.1)
# Try to recover the structure
rslt = cov_nearest_factor_homog(mat, dm)
mat1 = rslt.to_matrix()
assert_allclose(mat, mat1, rtol=0.25, atol=1e-3)
# Check that dense and sparse inputs give the same result
def test_cov_nearest_factor_homog_sparse(self):
d = 100
for dm in 1,2:
# Construct a test matrix with exact factor structure
X = np.zeros((d,dm), dtype=np.float64)
x = np.linspace(0, 2*np.pi, d)
for j in range(dm):
X[:,j] = np.sin(x*(j+1))
mat = np.dot(X, X.T)
np.fill_diagonal(mat, np.diag(mat) + 3.1)
# Fit to dense
rslt = cov_nearest_factor_homog(mat, dm)
mat1 = rslt.to_matrix()
# Fit to sparse
smat = sparse.csr_matrix(mat)
rslt = cov_nearest_factor_homog(smat, dm)
mat2 = rslt.to_matrix()
assert_allclose(mat1, mat2, rtol=0.25, atol=1e-3)
def test_corr_thresholded(self):
import datetime
t1 = datetime.datetime.now()
X = np.random.normal(size=(2000,10))
tcor = corr_thresholded(X, 0.2, max_elt=4e6)
t2 = datetime.datetime.now()
ss = (t2-t1).seconds
fcor = np.corrcoef(X)
fcor *= (np.abs(fcor) >= 0.2)
assert_allclose(tcor.todense(), fcor, rtol=0.25, atol=1e-3)
| bsd-3-clause |
albertjan/pypyjs-presentation | assets/js/pypy.js-0.3.1/lib/modules/test/test_ntpath.py | 8 | 14828 | # coding: utf-8
import ntpath
import os
import sys
from test.test_support import TestFailed
from test import test_support, test_genericpath
import unittest
def tester0(fn, wantResult):
gotResult = eval(fn)
if wantResult != gotResult:
raise TestFailed, "%s should return: %r but returned: %r" \
%(fn, wantResult, gotResult)
def tester(fn, wantResult):
fn = fn.replace("\\", "\\\\")
tester0(fn, wantResult)
class TestNtpath(unittest.TestCase):
def test_splitext(self):
tester('ntpath.splitext("foo.ext")', ('foo', '.ext'))
tester('ntpath.splitext("/foo/foo.ext")', ('/foo/foo', '.ext'))
tester('ntpath.splitext(".ext")', ('.ext', ''))
tester('ntpath.splitext("\\foo.ext\\foo")', ('\\foo.ext\\foo', ''))
tester('ntpath.splitext("foo.ext\\")', ('foo.ext\\', ''))
tester('ntpath.splitext("")', ('', ''))
tester('ntpath.splitext("foo.bar.ext")', ('foo.bar', '.ext'))
tester('ntpath.splitext("xx/foo.bar.ext")', ('xx/foo.bar', '.ext'))
tester('ntpath.splitext("xx\\foo.bar.ext")', ('xx\\foo.bar', '.ext'))
tester('ntpath.splitext("c:a/b\\c.d")', ('c:a/b\\c', '.d'))
def test_splitdrive(self):
tester('ntpath.splitdrive("c:\\foo\\bar")',
('c:', '\\foo\\bar'))
tester('ntpath.splitdrive("c:/foo/bar")',
('c:', '/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitdrive("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.splitdrive("\\\\\\conky\\mountpoint\\foo\\bar")',
('', '\\\\\\conky\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("///conky/mountpoint/foo/bar")',
('', '///conky/mountpoint/foo/bar'))
tester('ntpath.splitdrive("\\\\conky\\\\mountpoint\\foo\\bar")',
('', '\\\\conky\\\\mountpoint\\foo\\bar'))
tester('ntpath.splitdrive("//conky//mountpoint/foo/bar")',
('', '//conky//mountpoint/foo/bar'))
# Issue #19911: UNC part containing U+0130
self.assertEqual(ntpath.splitdrive(u'//conky/MOUNTPOİNT/foo/bar'),
(u'//conky/MOUNTPOİNT', '/foo/bar'))
def test_splitunc(self):
tester('ntpath.splitunc("c:\\foo\\bar")',
('', 'c:\\foo\\bar'))
tester('ntpath.splitunc("c:/foo/bar")',
('', 'c:/foo/bar'))
tester('ntpath.splitunc("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint', '\\foo\\bar'))
tester('ntpath.splitunc("//conky/mountpoint/foo/bar")',
('//conky/mountpoint', '/foo/bar'))
tester('ntpath.splitunc("\\\\\\conky\\mountpoint\\foo\\bar")',
('', '\\\\\\conky\\mountpoint\\foo\\bar'))
tester('ntpath.splitunc("///conky/mountpoint/foo/bar")',
('', '///conky/mountpoint/foo/bar'))
tester('ntpath.splitunc("\\\\conky\\\\mountpoint\\foo\\bar")',
('', '\\\\conky\\\\mountpoint\\foo\\bar'))
tester('ntpath.splitunc("//conky//mountpoint/foo/bar")',
('', '//conky//mountpoint/foo/bar'))
self.assertEqual(ntpath.splitunc(u'//conky/MOUNTPO\u0130NT/foo/bar'),
(u'//conky/MOUNTPO\u0130NT', u'/foo/bar'))
def test_split(self):
tester('ntpath.split("c:\\foo\\bar")', ('c:\\foo', 'bar'))
tester('ntpath.split("\\\\conky\\mountpoint\\foo\\bar")',
('\\\\conky\\mountpoint\\foo', 'bar'))
tester('ntpath.split("c:\\")', ('c:\\', ''))
tester('ntpath.split("\\\\conky\\mountpoint\\")',
('\\\\conky\\mountpoint\\', ''))
tester('ntpath.split("c:/")', ('c:/', ''))
tester('ntpath.split("//conky/mountpoint/")', ('//conky/mountpoint/', ''))
def test_isabs(self):
tester('ntpath.isabs("c:\\")', 1)
tester('ntpath.isabs("\\\\conky\\mountpoint\\")', 1)
tester('ntpath.isabs("\\foo")', 1)
tester('ntpath.isabs("\\foo\\bar")', 1)
def test_commonprefix(self):
tester('ntpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"])',
"/home/swen")
tester('ntpath.commonprefix(["\\home\\swen\\spam", "\\home\\swen\\eggs"])',
"\\home\\swen\\")
tester('ntpath.commonprefix(["/home/swen/spam", "/home/swen/spam"])',
"/home/swen/spam")
def test_join(self):
tester('ntpath.join("")', '')
tester('ntpath.join("", "", "")', '')
tester('ntpath.join("a")', 'a')
tester('ntpath.join("/a")', '/a')
tester('ntpath.join("\\a")', '\\a')
tester('ntpath.join("a:")', 'a:')
tester('ntpath.join("a:", "\\b")', 'a:\\b')
tester('ntpath.join("a", "\\b")', '\\b')
tester('ntpath.join("a", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a\\", "b", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b\\", "c")', 'a\\b\\c')
tester('ntpath.join("a", "b", "\\c")', '\\c')
tester('ntpath.join("d:\\", "\\pleep")', 'd:\\pleep')
tester('ntpath.join("d:\\", "a", "b")', 'd:\\a\\b')
tester("ntpath.join('', 'a')", 'a')
tester("ntpath.join('', '', '', '', 'a')", 'a')
tester("ntpath.join('a', '')", 'a\\')
tester("ntpath.join('a', '', '', '', '')", 'a\\')
tester("ntpath.join('a\\', '')", 'a\\')
tester("ntpath.join('a\\', '', '', '', '')", 'a\\')
tester("ntpath.join('a/', '')", 'a/')
tester("ntpath.join('a/b', 'x/y')", 'a/b\\x/y')
tester("ntpath.join('/a/b', 'x/y')", '/a/b\\x/y')
tester("ntpath.join('/a/b/', 'x/y')", '/a/b/x/y')
tester("ntpath.join('c:', 'x/y')", 'c:x/y')
tester("ntpath.join('c:a/b', 'x/y')", 'c:a/b\\x/y')
tester("ntpath.join('c:a/b/', 'x/y')", 'c:a/b/x/y')
tester("ntpath.join('c:/', 'x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', 'x/y')", 'c:/a/b\\x/y')
tester("ntpath.join('c:/a/b/', 'x/y')", 'c:/a/b/x/y')
tester("ntpath.join('//computer/share', 'x/y')", '//computer/share\\x/y')
tester("ntpath.join('//computer/share/', 'x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/a/b', 'x/y')", '//computer/share/a/b\\x/y')
tester("ntpath.join('a/b', '/x/y')", '/x/y')
tester("ntpath.join('/a/b', '/x/y')", '/x/y')
tester("ntpath.join('c:', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:a/b', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/', '/x/y')", 'c:/x/y')
tester("ntpath.join('c:/a/b', '/x/y')", 'c:/x/y')
tester("ntpath.join('//computer/share', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('//computer/share/a', '/x/y')", '//computer/share/x/y')
tester("ntpath.join('c:', 'C:x/y')", 'C:x/y')
tester("ntpath.join('c:a/b', 'C:x/y')", 'C:a/b\\x/y')
tester("ntpath.join('c:/', 'C:x/y')", 'C:/x/y')
tester("ntpath.join('c:/a/b', 'C:x/y')", 'C:/a/b\\x/y')
for x in ('', 'a/b', '/a/b', 'c:', 'c:a/b', 'c:/', 'c:/a/b'):
for y in ('d:', 'd:x/y', 'd:/', 'd:/x/y'):
tester("ntpath.join(%r, %r)" % (x, y), y)
def test_normpath(self):
tester("ntpath.normpath('A//////././//.//B')", r'A\B')
tester("ntpath.normpath('A/./B')", r'A\B')
tester("ntpath.normpath('A/foo/../B')", r'A\B')
tester("ntpath.normpath('C:A//B')", r'C:A\B')
tester("ntpath.normpath('D:A/./B')", r'D:A\B')
tester("ntpath.normpath('e:A/foo/../B')", r'e:A\B')
tester("ntpath.normpath('C:///A//B')", r'C:\A\B')
tester("ntpath.normpath('D:///A/./B')", r'D:\A\B')
tester("ntpath.normpath('e:///A/foo/../B')", r'e:\A\B')
tester("ntpath.normpath('..')", r'..')
tester("ntpath.normpath('.')", r'.')
tester("ntpath.normpath('')", r'.')
tester("ntpath.normpath('/')", '\\')
tester("ntpath.normpath('c:/')", 'c:\\')
tester("ntpath.normpath('/../.././..')", '\\')
tester("ntpath.normpath('c:/../../..')", 'c:\\')
tester("ntpath.normpath('../.././..')", r'..\..\..')
tester("ntpath.normpath('K:../.././..')", r'K:..\..\..')
tester("ntpath.normpath('C:////a/b')", r'C:\a\b')
tester("ntpath.normpath('//machine/share//a/b')", r'\\machine\share\a\b')
tester("ntpath.normpath('\\\\.\\NUL')", r'\\.\NUL')
tester("ntpath.normpath('\\\\?\\D:/XY\\Z')", r'\\?\D:/XY\Z')
def test_expandvars(self):
with test_support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
tester('ntpath.expandvars("foo")', "foo")
tester('ntpath.expandvars("$foo bar")', "bar bar")
tester('ntpath.expandvars("${foo}bar")', "barbar")
tester('ntpath.expandvars("$[foo]bar")', "$[foo]bar")
tester('ntpath.expandvars("$bar bar")', "$bar bar")
tester('ntpath.expandvars("$?bar")', "$?bar")
tester('ntpath.expandvars("$foo}bar")', "bar}bar")
tester('ntpath.expandvars("${foo")', "${foo")
tester('ntpath.expandvars("${{foo}}")', "baz1}")
tester('ntpath.expandvars("$foo$foo")', "barbar")
tester('ntpath.expandvars("$bar$bar")', "$bar$bar")
tester('ntpath.expandvars("%foo% bar")', "bar bar")
tester('ntpath.expandvars("%foo%bar")', "barbar")
tester('ntpath.expandvars("%foo%%foo%")', "barbar")
tester('ntpath.expandvars("%%foo%%foo%foo%")', "%foo%foobar")
tester('ntpath.expandvars("%?bar%")', "%?bar%")
tester('ntpath.expandvars("%foo%%bar")', "bar%bar")
tester('ntpath.expandvars("\'%foo%\'%bar")', "\'%foo%\'%bar")
@unittest.skipUnless(test_support.FS_NONASCII, 'need test_support.FS_NONASCII')
def test_expandvars_nonascii(self):
encoding = sys.getfilesystemencoding()
def check(value, expected):
tester0("ntpath.expandvars(%r)" % value, expected)
tester0("ntpath.expandvars(%r)" % value.decode(encoding),
expected.decode(encoding))
with test_support.EnvironmentVarGuard() as env:
env.clear()
unonascii = test_support.FS_NONASCII
snonascii = unonascii.encode(encoding)
env['spam'] = snonascii
env[snonascii] = 'ham' + snonascii
check('$spam bar', '%s bar' % snonascii)
check('$%s bar' % snonascii, '$%s bar' % snonascii)
check('${spam}bar', '%sbar' % snonascii)
check('${%s}bar' % snonascii, 'ham%sbar' % snonascii)
check('$spam}bar', '%s}bar' % snonascii)
check('$%s}bar' % snonascii, '$%s}bar' % snonascii)
check('%spam% bar', '%s bar' % snonascii)
check('%{}% bar'.format(snonascii), 'ham%s bar' % snonascii)
check('%spam%bar', '%sbar' % snonascii)
check('%{}%bar'.format(snonascii), 'ham%sbar' % snonascii)
def test_expanduser(self):
tester('ntpath.expanduser("test")', 'test')
with test_support.EnvironmentVarGuard() as env:
env.clear()
tester('ntpath.expanduser("~test")', '~test')
env['HOMEPATH'] = 'eric\\idle'
env['HOMEDRIVE'] = 'C:\\'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
del env['HOMEDRIVE']
tester('ntpath.expanduser("~test")', 'eric\\test')
tester('ntpath.expanduser("~")', 'eric\\idle')
env.clear()
env['USERPROFILE'] = 'C:\\eric\\idle'
tester('ntpath.expanduser("~test")', 'C:\\eric\\test')
tester('ntpath.expanduser("~")', 'C:\\eric\\idle')
env.clear()
env['HOME'] = 'C:\\idle\\eric'
tester('ntpath.expanduser("~test")', 'C:\\idle\\test')
tester('ntpath.expanduser("~")', 'C:\\idle\\eric')
tester('ntpath.expanduser("~test\\foo\\bar")',
'C:\\idle\\test\\foo\\bar')
tester('ntpath.expanduser("~test/foo/bar")',
'C:\\idle\\test/foo/bar')
tester('ntpath.expanduser("~\\foo\\bar")',
'C:\\idle\\eric\\foo\\bar')
tester('ntpath.expanduser("~/foo/bar")',
'C:\\idle\\eric/foo/bar')
def test_abspath(self):
# ntpath.abspath() can only be used on a system with the "nt" module
# (reasonably), so we protect this test with "import nt". This allows
# the rest of the tests for the ntpath module to be run to completion
# on any platform, since most of the module is intended to be usable
# from any platform.
# XXX this needs more tests
try:
import nt
except ImportError:
# check that the function is there even if we are not on Windows
ntpath.abspath
else:
tester('ntpath.abspath("C:\\")', "C:\\")
def test_relpath(self):
currentdir = os.path.split(os.getcwd())[-1]
tester('ntpath.relpath("a")', 'a')
tester('ntpath.relpath(os.path.abspath("a"))', 'a')
tester('ntpath.relpath("a/b")', 'a\\b')
tester('ntpath.relpath("../a/b")', '..\\a\\b')
tester('ntpath.relpath("a", "../b")', '..\\'+currentdir+'\\a')
tester('ntpath.relpath("a/b", "../c")', '..\\'+currentdir+'\\a\\b')
tester('ntpath.relpath("a", "b/c")', '..\\..\\a')
tester('ntpath.relpath("//conky/mountpoint/a", "//conky/mountpoint/b/c")', '..\\..\\a')
tester('ntpath.relpath("a", "a")', '.')
tester('ntpath.relpath("/foo/bar/bat", "/x/y/z")', '..\\..\\..\\foo\\bar\\bat')
tester('ntpath.relpath("/foo/bar/bat", "/foo/bar")', 'bat')
tester('ntpath.relpath("/foo/bar/bat", "/")', 'foo\\bar\\bat')
tester('ntpath.relpath("/", "/foo/bar/bat")', '..\\..\\..')
tester('ntpath.relpath("/foo/bar/bat", "/x")', '..\\foo\\bar\\bat')
tester('ntpath.relpath("/x", "/foo/bar/bat")', '..\\..\\..\\x')
tester('ntpath.relpath("/", "/")', '.')
tester('ntpath.relpath("/a", "/a")', '.')
tester('ntpath.relpath("/a/b", "/a/b")', '.')
tester('ntpath.relpath("c:/foo", "C:/FOO")', '.')
class NtCommonTest(test_genericpath.CommonTest):
pathmodule = ntpath
attributes = ['relpath', 'splitunc']
def test_main():
test_support.run_unittest(TestNtpath, NtCommonTest)
if __name__ == "__main__":
unittest.main()
| unlicense |
SVADemoAPP/Server | WebContent/WEB-INF/python/BluemixClientTest.py | 1 | 4032 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import requests
import json
import sys
from qpid.messaging import *
happy = 0
sad = 0
ids = {}
def send(payload):
r = requests.post('https://presenceinsights.ng.bluemix.net/conn-huawei-smallcell/v1/tenants/mk3l078f/orgs/l61b0ipq', auth=('1a1d0zn2','aXpgYAnl5hLJ'), headers = { 'content-type': 'application/json' }, data = payload)
global happy
global sad
if (r.status_code == 204):
happy = happy + 1
else:
print "sad %d : %r" % (r.status_code,payload)
sad = sad + 1
total = happy + sad
if ((total % 10) == 0):
print "%s unique:%d total:%d happy:%d sad:%d" % (time.strftime("%Y:%m:%d %H:%M:%S"),len(ids),total,happy,sad)
if ((total % 99) == 0):
print payload
print r.content
sys.stdout.flush()
if __name__ == "__main__":
if len(sys.argv) < 5:
sys.exit(-1)
print 'app name {}, broke ip {}, broker port {}, queue id {}'.format(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
sys.stdout.flush()
#broker = "{}/xxxx@{}:{}".format(sys.argv[1], sys.argv[2], sys.argv[3])
broker = "{}:{}".format(sys.argv[2], sys.argv[3])
#broker = "{}/User%40123456@{}:{}".format(sys.argv[1], sys.argv[2], sys.argv[3])
print broker
sys.stdout.flush()
address = "{}".format(sys.argv[4])
print address
sys.stdout.flush()
conn_options = {
'username' : 'app7',
'password' : 'User@123456',
'transport' : 'ssl',
'ssl_keyfile' : "ssl_cert_file/MSP.Key.pem",
'ssl_certfile' : "ssl_cert_file/MSP.pem.cer",
'ssl_trustfile' : "ssl_cert_file/Wireless Root CA.pem.cer",
'ssl_skip_hostname_check' : True,
}
connection = Connection(broker, **conn_options)
try:
#print "sending sample"
#sys.stdout.flush()
#send('{ "site": "3x6s0r8l", "locationstream":[ { "IdType":"IP", "Timestamp":1432108029000, "datatype":"coordinates", "location":{"x":448.0,"y":181.0,"z":1}, "userid":["0a8340e5"] } ] }')
print "open"
sys.stdout.flush()
connection.open()
print "open done"
sys.stdout.flush()
session = connection.session()
print "has a session"
sys.stdout.flush()
receiver = session.receiver(address)
print "session create success"
sys.stdout.flush()
while True:
message = str(receiver.fetch().content)
#print type(message)
#print message
#sys.stdout.flush()
session.acknowledge()
asJson = json.loads(message)
asJson["site"] = "227w01b0"
ip = asJson["locationstream"][0]['userid'][0]
if (ip in ids):
ids[ip] = ids[ip]+1
else:
print "new ip %s" % ip
ids[ip] = 1
asStr = json.dumps(asJson)
#print "ZZZ %r" % asStr
send(asStr)
except MessagingError, m:
print "MessagingError", m
sys.stdout.flush()
connection.close()
| apache-2.0 |
RobertABT/heightmap | build/scipy/scipy/stats/_discrete_distns.py | 4 | 20332 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
import numpy.random as mtrand
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf)
__all__ = [
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom',
'logser', 'poisson', 'planck', 'boltzmann', 'randint',
'zipf', 'dlaplace', 'skellam'
]
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
q = 1.0-p
mu = n * p
var = n * p * q
g1 = (q-p) / sqrt(n*p*q)
g2 = (1.0-6*p*q)/(n*p*q)
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
h = -np.sum(special.xlogy(vals, vals), axis=0)
return h
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
h = -special.xlogy(p, p) - special.xlogy(1 - p, 1 - p)
return h
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + x*log(1-p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return (k-1) * log(1-p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self, M, n, N)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
h = -np.sum(special.xlogy(vals, vals), axis=0)
return h
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return mtrand.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return mtrand.poisson(mu1, n) - mtrand.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
| mit |
zstackio/zstack-woodpecker | integrationtest/vm/mn_ha/test_all_hosts_force_stop_recover_vm_ha.py | 2 | 4114 | '''
Integration Test all hosts force stop and then recover, check ha vm auto recovery.
@author: SyZhao
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.node_operations as node_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import test_stub
import random
import time
import os
vm = None
mn_host_list = None
need_recover_mn_host_list = None
def test():
global vm
global mn_host_list
global need_recover_mn_host_list
ha_vm = test_stub.create_ha_vm()
mn_host_list = test_stub.get_mn_host(test_lib.all_scenario_config, test_lib.scenario_file)
mn_host_num = len(mn_host_list)
test_mn_host_list = random.sample(range(mn_host_num), (mn_host_num + 1) / 2)
for host in mn_host_list:
test_util.test_logger("force stop host [%s]" % (host.ip_))
test_stub.stop_host(host, test_lib.all_scenario_config, 'cold')
need_recover_mn_host_list = range(mn_host_num)
test_util.test_logger("wait 10s for MN VM to stop")
time.sleep(10)
mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(mn_host) != 0:
test_util.test_fail('MN VM is still running on %d host(s)' % len(mn_host))
for index in test_mn_host_list:
test_util.test_logger("recover host [%s]" % (mn_host_list[index].ip_))
test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
need_recover_mn_host_list.remove(index)
test_util.test_logger("wait for 20 seconds to see if management node VM starts on any host")
time.sleep(20)
new_mn_host_ip = test_stub.get_host_by_consul_leader(test_lib.all_scenario_config, test_lib.scenario_file)
if new_mn_host_ip == "":
test_util.test_fail("management node VM not run correctly on [%s] after its former host [%s] down for 20s" % (new_mn_host_ip, mn_host_list[0].ip_))
count = 60
while count > 0:
new_mn_host = test_stub.get_host_by_mn_vm(test_lib.all_scenario_config, test_lib.scenario_file)
if len(new_mn_host) == 1:
test_util.test_logger("management node VM run after its former host down for 30s")
break
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
time.sleep(5)
count -= 1
if len(new_mn_host) == 0:
test_util.test_fail("management node VM does not run after its former host down for 30s")
elif len(new_mn_host) > 1:
test_util.test_fail("management node VM runs on more than one host after its former host down")
#node_ops.wait_for_management_server_start(300)
test_stub.wrapper_of_wait_for_management_server_start(600)
test_stub.return_pass_ahead_if_3sites("TEST PASS")
ha_vm.set_state(vm_header.RUNNING)
ha_vm.check()
test_util.test_logger("try to create vm, timeout is 30s")
time_out = 30
while time_out > 0:
try:
vm = test_stub.create_basic_vm()
break
except:
time.sleep(1)
time_out -= 1
if time_out == 0:
test_util.test_fail('Fail to create vm after mn is ready')
vm.check()
vm.destroy()
test_util.test_pass('Create VM Test Success')
#Will be called what ever test result is
def env_recover():
if need_recover_mn_host_list:
for index in need_recover_mn_host_list:
test_util.test_logger("recover host: %s" % (mn_host_list[index].ip_))
test_stub.recover_host(mn_host_list[index], test_lib.all_scenario_config, test_lib.deploy_config)
test_stub.wait_for_mn_ha_ready(test_lib.all_scenario_config, test_lib.scenario_file)
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
| apache-2.0 |
birkin/ts_reporting_project | tech_services_reports/lib/cataloging_report_view_helper.py | 1 | 17136 | # -*- coding: utf-8 -*-
import datetime, json, logging, pprint
from collections import defaultdict
from operator import itemgetter
from django.conf import settings as project_settings
from django.core.urlresolvers import reverse
from tech_services_reports import settings_app
log = logging.getLogger("webapp")
class CatalogingReportViewHelper(object):
""" Prepares context for cataloging report view. """
def set_dates( self, year_str, month_num_str=None ):
""" Sets start and end dates from url vars.
Called by views.cataloging_report() """
report_date_header = None
if not month_num_str:
( year_num, month_num ) = ( int(year_str), 1 )
start = datetime.date( year_num, month_num, 1 ) # first day of year
end = datetime.date( year_num, 12, 31 ) # last day of year
else:
( year_num, month_num ) = ( int(year_str), int(month_num_str) )
start = datetime.date( year_num, month_num, 1 )
end = self.last_day_of_month( start )
report_date_header = start.strftime('%B')
return ( start, end, report_date_header )
def make_context( self, start, end, report_date_header, scheme, host ):
""" Manages context creation.
Called by views.cataloging_report_v2() """
# ( start, end, report_date_header ) = self.set_dates( year_str, month_num_str )
context = self.update_context_dates( start, end, report_date_header )
cr = CatalogingReport(start, end)
context = self.update_context_data( context, cr )
context = self.update_context_chart_data( context, cr )
context['HOME_URL'] = '{sch}://{hst}{url}'.format( sch=scheme, hst=host, url=reverse('index_url') )
context['csv_url'] = self.get_csv_url( context )
log.debug( 'type(context), `{typ}`;\n context, ```````{val}```````'.format( typ=type(context), val=pprint.pformat(context) ) )
return context
def make_context_v2( self, year_str, month_num_str, scheme, host ):
""" TODO: prepare a json-serializable context.
Manages context creation.
Called by views.cataloging_report_v2() """
context = {}
log.debug( 'type(context), `{typ}`;\n context, ```````{val}```````'.format( typ=type(context), val=pprint.pformat(context) ) )
return context
def last_day_of_month( self, date_obj ):
""" Returns the last day of the month for any given Python date object.
Code from: http://stackoverflow.com/questions/42950/get-last-day-of-the-month-in-python
Called by set_dates() """
if date_obj.month == 12:
new_dt = date_obj.replace( day=31 )
else:
new_dt = date_obj.replace( month=date_obj.month+1, day=1 ) - datetime.timedelta( days=1 )
return new_dt
def update_context_dates( self, start, end, report_date_header ):
""" Initializes and stores main date info.
Called by make_context() """
context = {}
context['STATIC_URL'] = project_settings.STATIC_URL
year = start.year
context['year'] = year
context['report_date_header'] = report_date_header
context['report_header'] = settings_app.CAT_STATS_REPORT_HEADER
context['start'] = start.strftime("%Y-%m-%d")
context['end'] = end.strftime("%Y-%m-%d")
return context
def update_context_data( self, context, cr ):
""" Updates context with CatalogingReport data.
Called by make_context() """
context['by_format'] = cr.by_format()
context['by_format_and_type'] = cr.by_format_and_type()
context['by_cataloger'] = cr.by_cataloger()
context['by_edit_type'] = cr.by_edit_type()
context['by_cataloger_and_format'] = cr.by_cataloger_and_format()
context['by_cataloger_and_edit_type'] = cr.by_cataloger_and_edit_type()
context['total_cataloged'] = cr.total_cataloged
context['report'] = cr
context['last_updated'] = cr.last_updated
return context
def update_context_chart_data( self, context, cr ):
""" Updates chart data.
Called by make_context() """
chart_label = ''
# if context['report_date_header']:
# log.debug( 'cataloging report_date_header, ```{}```'.format( context['report_date_header'] ) )
# chart_label += context['report_date_header']
# chart_label += ' ' + str(context['year'])
context['by_format_chart_url'] = cr.gchart(
context['by_format'], chart_label, 'Cataloging by format')
context['by_edit_type_chart_url'] = cr.gchart(
context['by_edit_type'], chart_label, 'Cataloging by type', color='3366CC')
return context
def get_csv_url( self, context ):
""" Prepares csv download url.
TODO: look into why these date-types are different from the accession date-types.
Called by make_context() """
log.debug( 'initial context, ```{}```'.format( pprint.pformat(context) ) )
# start_dt = datetime.datetime.strptime( context['start'], '%Y-%m-%d' )
# end_dt = datetime.datetime.strptime( context['end'], '%Y-%m-%d' )
# start_str = start_dt.strftime( '%m/%d/%Y' )
# end_str = end_dt.strftime( '%m/%d/%Y' )
csv_url_root = reverse( 'cataloging_csv' )
url = '{rt}?start={st}&end={en}'.format( rt=csv_url_root, st=context['start'], en=context['end'] )
log.debug( 'csv_url, ```{}```'.format(url) )
return url
## end class CatalogingReportViewHelper()
class CatalogingReport(object):
""" Prepares cataloging data. """
def __init__(self, start, end, cataloger=None):
from tech_services_reports.models import CatEdit, SummaryCatEdit
from itertools import chain
from django.db import connection
self.connection = connection
#self.catalogs = Cataloging.objects.filter(cat_date__gte=start,
# cat_date__lte=end)
self.edits = CatEdit.objects.filter(edit_date__gte=start,
edit_date__lte=end)
self.summary_edits = SummaryCatEdit.objects.filter(edit_date__gte=start,
edit_date__lte=end)
#Combine edits and summary edits
self.edits = list(chain(self.edits, self.summary_edits))
self.total_edited = len(self.edits)
self.total_cataloged = self.total_edited
self.last_updated = CatEdit.objects.latest('edit_date').edit_date
def _material_string(self, mat_type):
"""Returns full mat type string."""
try:
return settings_app.MAT_TYPE_MAP[mat_type]
except KeyError:
return 'Unknown'
def _cataloger(self, initials):
"""Returns full name of cataloger from initials."""
try:
return settings_app.CATALOGERS[initials]
except KeyError:
return 'Unknown'
def _distinct(self, field):
cursor = self.connection.cursor()
cursor.execute("SELECT DISTINCT %s from tech_services_reports_catedit;" % field)
distinct = cursor.fetchall()
try:
return [r[0] for r in distinct]
except TypeError:
return []
def _value(self, dbobj):
"""Check if this is a summary object or an individual object
and set a value to increment."""
v = 1
if dbobj._meta.model_name == 'summarycatedit':
v = dbobj.number
return v
def distinct_cat_types(self):
return settings_app.DISTINCT_CAT_TYPES
def _summary_csv(self, report):
"""Create list of rows that will be handled by a csv writer."""
out = []
out.append(report['header'])
for label, cell in report['data']:
out.append([label, cell])
totals = []
totals.append('Total')
for total in report['totals']:
totals.append(total)
out.append(totals)
return out
def _multi_summary_csv(self, report):
"""Create list of rows that will be handled by a csv writer."""
out = []
#write header row
out.append([' '] + report['header'])
#write data rows
for row_label, cell_data in report['data'].items():
this_row = []
this_row.append(row_label)
for header in report['header']:
for cell_type, data_point in cell_data.items():
if cell_type.title() == header:
this_row.append(data_point)
out.append(this_row)
total_row = ['Total']
for head in report['header']:
try:
total_row.append(report['cross'][head.lower()])
except KeyError:
total_row.append(0)
out.append(total_row)
return out
def distinct_formats(self):
formats = []
for bib in self.edits:
mt = bib.mat_type
if mt not in formats:
formats.append(mt)
return formats
def by_cataloger(self, report_format='html'):
summary = defaultdict(int)
total = 0
for ed in self.edits:
cataloger = self._cataloger(ed.editor.upper())
v = self._value(ed)
summary[cataloger] += v
total += v
# summary = sorted(summary.iteritems(), key=itemgetter(1), reverse=True)
summary = sorted(summary.items(), key=itemgetter(1), reverse=True)
header = ['Cataloger', 'Count']
report = {'header': header,
'data': summary,
'totals': [total]}
if report_format == 'html':
return report
else:
return self._summary_csv(report)
def by_edit_type(self, report_format='html'):
summary = defaultdict(int)
total = 0
for ed in self.edits:
v = self._value(ed)
ctype = ed.type.lower()
if ctype == 'catalog':
ctype = ed.source
summary[ctype.title()] += v
total += v
# summary = sorted(summary.iteritems(), key=itemgetter(1), reverse=True)
summary = sorted(summary.items(), key=itemgetter(1), reverse=True)
header = ['Edit type', 'Count']
report = {'header': header,
'data': summary,
'totals': [total]}
if report_format == 'csv':
return self._summary_csv(report)
else:
return report
def by_format(self, report_format='html'):
"""Reporting on CatEdits only."""
summary = defaultdict(int)
total = 0
for bib in self.edits:
v = self._value(bib)
mat_string = self._material_string(bib.mat_type)
summary[mat_string] += v
total += v
# summary = sorted(summary.iteritems(), key=itemgetter(1), reverse=True)
summary = sorted(summary.items(), key=itemgetter(1), reverse=True)
header = ['Format', 'Count']
report = {'header': header,
'data': summary,
'totals': [total]}
if report_format == 'csv':
return self._summary_csv(report)
else:
return report
def by_format_and_type(self, report_format='html'):
#Format Original Enriched Copy Total
summary = {}
cross = defaultdict(int)
bibs = self.edits
#Add distinct types from setting
cat_types = self.distinct_cat_types()
header = []
for bib in bibs:
mat_string = self._material_string(bib.mat_type)
format = mat_string
cat = bib.type.lower()
if cat == 'catalog':
cat = bib.source
summary[format] = summary.get(format, {})
for dc in cat_types:
dc = dc.lower()
ov = self._value(bib)
if dc == cat.lower():
v = ov
else:
v = 0
summary[format][dc] = summary[format].get(dc, 0) + v
summary[format]['total'] = summary[format].get('total', 0) + v
cross[dc] += v
cross['total'] += v
#header = [h.title() for h in cross.keys()]
#hspot = header.index('Total')
#header.pop(hspot)
header = [d.title() for d in cat_types]
header.append('Total')
#header.append('Total')
#Sort by totals
report = {'header': header,
'rows': summary.keys(),
'data': summary,
'cross': dict(cross)}
if report_format == 'csv':
return self._multi_summary_csv(report)
else:
return report
def by_cataloger_and_format(self, report_format='html'):
summary = {}
edits = self.edits
distinct_formats = self.distinct_formats()
cross = defaultdict(int)
for edit in edits:
try:
cataloger = settings_app.CATALOGERS[edit.editor.upper()]
except KeyError:
cataloger = edit.editor.upper()
summary[cataloger] = summary.get(cataloger, {})
for df in distinct_formats:
format = self._material_string(df)
ov = self._value(edit)
if df == edit.mat_type:
v = ov
else:
v = 0
summary[cataloger][format] = summary[cataloger].get(format, 0) + v
summary[cataloger]['total'] = summary[cataloger].get('total', 0) + v
cross[format.lower()] += v
cross['total'] += v
header = [self._material_string(d) for d in distinct_formats]
header.append('Total')
report = {'header': header,
'rows': summary.keys(),
'data': summary,
'cross': dict(cross)}
if report_format == 'csv':
return self._multi_summary_csv(report)
else:
return report
def by_cataloger_and_edit_type(self, report_format='html'):
summary = {}
edits = self.edits
distinct = self.distinct_cat_types()
cross = defaultdict(int)
for dt in distinct:
for edit in edits:
try:
cataloger = settings_app.CATALOGERS[edit.editor.upper()]
except KeyError:
cataloger = edit.editor.upper()
summary[cataloger] = summary.get(cataloger, {})
edit_type = edit.type.lower()
if edit_type == 'catalog':
edit_type = edit.source
ov = self._value(edit)
if edit_type == dt:
v = ov
else:
v = 0
summary[cataloger][dt] = summary[cataloger].get(dt, 0) + v
summary[cataloger]['total'] = summary[cataloger].get('total', 0) + v
cross[dt] += v
cross['total'] += v
header = [d.title() for d in distinct]
header.append('Total')
report = {'header': header,
'rows': summary.keys(),
'data': summary,
'cross': dict(cross)}
if report_format == 'csv':
return self._multi_summary_csv(report)
else:
return report
def gchart(self, vals, period, name, color='438043'):
return self.gchart_url(vals, period, name, color=color)
def gchart_url(self, vals, period, name, color='438043'):
""" TODO: refactor this with identical accession_report_view_helper.py function. """
data_labels = []
data_values = []
for label,val in vals['data']:
data_labels.append(label)
data_values.append(val)
low = 0
try:
high = data_values[0]
except IndexError:
return
data_values = "%s" % (','.join([str(d) for d in data_values]))
data_labels = '|'.join([l.replace(' ', '+') for l in data_labels])
range = "%s, %s" % (low, high)
chart_url = """http://chart.apis.google.com/chart?chs=450x300
&cht=p
&chco=%(color)s
&chds=%(range)s
&chd=t:%(data_values)s
&chl=%(data_labels)s
&chtt=%(period)s+%(chart_name)s
&chma=40,40,40,40
&chts=000000,18
""" % {'range':range,
'data_labels':data_labels,
'data_values':data_values,
'period': period.replace(' ', '+'),
'color':color,
'chart_name':name.replace(' ', '+')
}
#Remove line breaks and spaces from url.
return chart_url.replace('\n', '').replace(' ', '')
## end class CatalogingReport()
| mit |
tensorflow/tensorflow | tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py | 23 | 5906 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AudioMicrofrontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import ops
SAMPLE_RATE = 1000
WINDOW_SIZE = 25
WINDOW_STEP = 10
NUM_CHANNELS = 2
UPPER_BAND_LIMIT = 450.0
LOWER_BAND_LIMIT = 8.0
SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase):
def setUp(self):
super(AudioFeatureGenerationTest, self).setUp()
ops.disable_eager_execution()
def testSimple(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True)
self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]])
def testSimpleFloatScaled(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
out_scale=64,
out_type=tf.float32)
self.assertAllEqual(filterbanks.eval(),
[[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]])
def testStacking(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
right_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 436, 378], [410, 350, 391, 325]])
def testStackingWithOverlap(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
right_context=1)
self.assertAllEqual(
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
def testStackingDropFrame(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 479, 425], [436, 378, 410, 350]])
def testZeroPadding(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 7 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=2,
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
caithess/taskbuster | functional_tests/test_all_users.py | 1 | 2530 | # -*- coding: utf-8 -*-
from datetime import date
from selenium import webdriver
from django.core.urlresolvers import reverse
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils import formats
from django.utils.translation import activate
class HomeNewVisitorTest(StaticLiveServerTestCase):
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(3)
activate('en')
def tearDown(self):
self.browser.quit()
def get_full_url(self, namespace):
return self.live_server_url + reverse(namespace)
def test_home_title(self):
self.browser.get(self.get_full_url('home'))
self.assertIn('TaskBuster', self.browser.title)
def test_h1_css(self):
self.browser.get(self.get_full_url('home'))
h1 = self.browser.find_element_by_tag_name('h1')
self.assertEqual(
h1.value_of_css_property('color'),
'rgba(200, 50, 255, 1)'
)
def test_home_files(self):
self.browser.get(self.live_server_url + "/robots.txt")
self.assertNotIn("Page not found", self.browser.title)
self.browser.get(self.live_server_url + "/humans.txt")
self.assertNotIn("Page not found", self.browser.title)
def test_international_translation(self):
for lang, h1_text in [('en', 'Welcome to TaskBuster!'),
('es', '¡Bienvenido a TaskBuster!')]:
activate(lang)
self.browser.get(self.get_full_url("home"))
h1 = self.browser.find_element_by_tag_name("h1")
self.assertEqual(h1.text, h1_text)
def test_localization(self):
today = date.today()
for lang in ['en', 'es']:
activate(lang)
self.browser.get(self.get_full_url("home"))
local_date = self.browser.find_element_by_id("local-date")
non_local_date = self.browser.find_element_by_id("non-local-date")
self.assertEqual(formats.date_format(today, use_l10n=True),
local_date.text)
self.assertEqual(today.strftime('%Y-%m-%d'), non_local_date.text)
def test_time_zone(self):
self.browser.get(self.get_full_url("home"))
tz = self.browser.find_element_by_id("time-tz").text
utc = self.browser.find_element_by_id("time-utc").text
ny = self.browser.find_element_by_id("time-ny").text
self.assertNotEqual(tz, utc)
self.assertNotIn(ny, [tz, utc])
| mit |
luotao1/Paddle | python/paddle/fluid/dygraph/dygraph_to_static/ast_transformer.py | 1 | 6469 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# gast is a generic AST to represent Python2 and Python3's Abstract Syntax Tree(AST).
# It provides a compatibility layer between the AST of various Python versions,
# as produced by ast.parse from the standard ast module.
# See details in https://github.com/serge-sans-paille/gast/
import gast
from paddle.fluid.dygraph.dygraph_to_static.assert_transformer import AssertTransformer
from paddle.fluid.dygraph.dygraph_to_static.basic_api_transformer import BasicApiTransformer
from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import BreakContinueTransformer
from paddle.fluid.dygraph.dygraph_to_static.break_continue_transformer import BreakTransformOptimizer
from paddle.fluid.dygraph.dygraph_to_static.call_transformer import CallTransformer
from paddle.fluid.dygraph.dygraph_to_static.cast_transformer import CastTransformer
from paddle.fluid.dygraph.dygraph_to_static.ifelse_transformer import IfElseTransformer
from paddle.fluid.dygraph.dygraph_to_static.list_transformer import ListTransformer
from paddle.fluid.dygraph.dygraph_to_static.logical_transformer import LogicalTransformer
from paddle.fluid.dygraph.dygraph_to_static.loop_transformer import LoopTransformer
from paddle.fluid.dygraph.dygraph_to_static.print_transformer import PrintTransformer
from paddle.fluid.dygraph.dygraph_to_static.return_transformer import ReturnTransformer
from paddle.fluid.dygraph.dygraph_to_static.static_analysis import StaticAnalysisVisitor
from paddle.fluid.dygraph.dygraph_to_static.tensor_shape_transformer import TensorShapeTransformer
from paddle.fluid.dygraph.dygraph_to_static import logging_utils
from paddle.fluid.dygraph.dygraph_to_static.utils import ast_to_source_code
from paddle.fluid.dygraph.dygraph_to_static.utils import get_attribute_full_name
__all__ = ['DygraphToStaticAst']
DECORATOR_NAMES = ['declarative', 'to_static', 'dygraph_to_static_func']
class DygraphToStaticAst(gast.NodeTransformer):
"""
Main class to transform Dygraph to Static Graph
"""
def __init__(self):
self.translator_logger = logging_utils.TranslatorLogger()
def get_static_ast(self, root):
# save root for some analysis may need global AST
self.root = root
self.static_analysis_visitor = StaticAnalysisVisitor(root)
self.static_analysis_root = self.static_analysis_visitor.get_node_wrapper_root(
)
self.decorate_func_name = None
self.transfer_from_node_type(self.static_analysis_root)
return self.static_analysis_root
def _apply(self, transformer, node_wrapper, log_level):
transformer(node_wrapper).transform()
self.translator_logger.log_transformed_code(log_level, self.root,
transformer.__name__)
def transfer_from_node_type(self, node_wrapper):
self.translator_logger.log(
1, "Source code: \n{}".format(ast_to_source_code(self.root)))
# Generic transformation
self.visit(node_wrapper.node)
transformers = [
BasicApiTransformer, # Basic Api
TensorShapeTransformer, # Tensor.shape -> layers.shape(Tensor)
ListTransformer, # List used in control flow
BreakTransformOptimizer, # optimize transfromation of break in loops
BreakContinueTransformer, # break/continue in loops
ReturnTransformer, # return in functions
LogicalTransformer, # logical and/or/not
LoopTransformer, # for/while -> while_op
IfElseTransformer, # if/else -> cond_op
AssertTransformer, # assert statement
PrintTransformer, # print statement
CallTransformer, # transform call recursively
CastTransformer, # type casting statement
]
for index, transformer in enumerate(transformers):
self._apply(transformer, node_wrapper, log_level=index + 1)
self.translator_logger.log_transformed_code(
logging_utils.LOG_AllTransformer, self.root, "All Transformers")
def visit_FunctionDef(self, node):
if self.decorate_func_name is None:
self.decorate_func_name = node.name
self.generic_visit(node)
# Remove the decorated name of dygraph_to_static
if hasattr(node, 'decorator_list'):
decorator_list = []
for d in node.decorator_list:
if isinstance(d, gast.Name) and d.id not in DECORATOR_NAMES:
raise NotImplementedError(
"ProgramTranslator hasn't implemented multiple decorators. Please remove "
+ d.id + " in " + self.decorate_func_name)
if isinstance(d, gast.Attribute):
full_attribute_name = get_attribute_full_name(d)
has_translate_decorator = False
for deco in DECORATOR_NAMES:
if deco in full_attribute_name:
has_translate_decorator = True
break
if not has_translate_decorator:
raise NotImplementedError(
"ProgramTranslator hasn't implemented multiple decorators. Please remove "
+ full_attribute_name + " in " +
self.decorate_func_name)
node.decorator_list = decorator_list
return node
def get_module_name(self):
"""
Return the main function name which will be used as module name
in ast_to_func.
"""
# Should consider BaseAPITransformer which add new module name in Yamei's PR.
assert self.decorate_func_name, "decorate_func_name shall not be None."
return self.decorate_func_name
| apache-2.0 |
Vegaviet-DevTeam/android_kernel_pantech_ef63-common | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
Jgarcia-IAS/localizacion | openerp/addons-extra/odoo-pruebas/odoo-server/addons/hr_timesheet_sheet/__init__.py | 434 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_sheet
import wizard
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
p0cisk/Quantum-GIS | python/plugins/db_manager/dlg_import_vector.py | 1 | 14827 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Name : DB Manager
Description : Database manager plugin for QGIS
Date : Oct 13, 2011
copyright : (C) 2011 by Giuseppe Sucameli
email : brush.tyler@gmail.com
The content of this file is based on
- PG_Manager by Martin Dobias (GPLv2 license)
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from builtins import str
from builtins import range
from qgis.PyQt.QtCore import Qt, QSettings, QFileInfo
from qgis.PyQt.QtWidgets import QDialog, QFileDialog, QMessageBox, QApplication
from qgis.PyQt.QtGui import QCursor
from qgis.core import QgsDataSourceUri, QgsVectorLayer, QgsRasterLayer, QgsMimeDataUtils, QgsMapLayer, QgsProviderRegistry, QgsCoordinateReferenceSystem, QgsVectorLayerImport
from qgis.gui import QgsMessageViewer
from qgis.utils import iface
from .ui.ui_DlgImportVector import Ui_DbManagerDlgImportVector as Ui_Dialog
class DlgImportVector(QDialog, Ui_Dialog):
HAS_INPUT_MODE, ASK_FOR_INPUT_MODE = list(range(2))
def __init__(self, inLayer, outDb, outUri, parent=None):
QDialog.__init__(self, parent)
self.inLayer = inLayer
self.db = outDb
self.outUri = outUri
self.setupUi(self)
self.default_pk = "id"
self.default_geom = "geom"
self.mode = self.ASK_FOR_INPUT_MODE if self.inLayer is None else self.HAS_INPUT_MODE
# used to delete the inlayer whether created inside this dialog
self.inLayerMustBeDestroyed = False
self.populateSchemas()
self.populateTables()
self.populateLayers()
self.populateEncodings()
# updates of UI
self.setupWorkingMode(self.mode)
self.cboSchema.currentIndexChanged.connect(self.populateTables)
def setupWorkingMode(self, mode):
""" hide the widget to select a layer/file if the input layer is already set """
self.wdgInput.setVisible(mode == self.ASK_FOR_INPUT_MODE)
self.resize(450, 350)
self.cboTable.setEditText(self.outUri.table())
if mode == self.ASK_FOR_INPUT_MODE:
self.btnChooseInputFile.clicked.connect(self.chooseInputFile)
# self.cboInputLayer.lineEdit().editingFinished.connect(self.updateInputLayer)
self.cboInputLayer.editTextChanged.connect(self.inputPathChanged)
# self.cboInputLayer.currentIndexChanged.connect(self.updateInputLayer)
self.btnUpdateInputLayer.clicked.connect(self.updateInputLayer)
self.editPrimaryKey.setText(self.default_pk)
self.editGeomColumn.setText(self.default_geom)
else:
# set default values
self.checkSupports()
self.updateInputLayer()
def checkSupports(self):
""" update options available for the current input layer """
allowSpatial = self.db.connector.hasSpatialSupport()
hasGeomType = self.inLayer and self.inLayer.hasGeometryType()
isShapefile = self.inLayer and self.inLayer.providerType() == "ogr" and self.inLayer.storageType() == "ESRI Shapefile"
self.chkGeomColumn.setEnabled(allowSpatial and hasGeomType)
if not self.chkGeomColumn.isEnabled():
self.chkGeomColumn.setChecked(False)
self.chkSourceSrid.setEnabled(allowSpatial and hasGeomType)
if not self.chkSourceSrid.isEnabled():
self.chkSourceSrid.setChecked(False)
self.chkTargetSrid.setEnabled(allowSpatial and hasGeomType)
if not self.chkTargetSrid.isEnabled():
self.chkTargetSrid.setChecked(False)
self.chkSinglePart.setEnabled(allowSpatial and hasGeomType and isShapefile)
if not self.chkSinglePart.isEnabled():
self.chkSinglePart.setChecked(False)
self.chkSpatialIndex.setEnabled(allowSpatial and hasGeomType)
if not self.chkSpatialIndex.isEnabled():
self.chkSpatialIndex.setChecked(False)
def populateLayers(self):
self.cboInputLayer.clear()
for index, layer in enumerate(iface.legendInterface().layers()):
# TODO: add import raster support!
if layer.type() == QgsMapLayer.VectorLayer:
self.cboInputLayer.addItem(layer.name(), index)
def deleteInputLayer(self):
""" unset the input layer, then destroy it but only if it was created from this dialog """
if self.mode == self.ASK_FOR_INPUT_MODE and self.inLayer:
if self.inLayerMustBeDestroyed:
self.inLayer.deleteLater()
self.inLayer = None
self.inLayerMustBeDestroyed = False
return True
return False
def chooseInputFile(self):
vectorFormats = QgsProviderRegistry.instance().fileVectorFilters()
# get last used dir and format
settings = QSettings()
lastDir = settings.value("/db_manager/lastUsedDir", "")
lastVectorFormat = settings.value("/UI/lastVectorFileFilter", "")
# ask for a filename
filename, lastVectorFormat = QFileDialog.getOpenFileName(self, self.tr("Choose the file to import"),
lastDir, vectorFormats, lastVectorFormat)
if filename == "":
return
# store the last used dir and format
settings.setValue("/db_manager/lastUsedDir", QFileInfo(filename).filePath())
settings.setValue("/UI/lastVectorFileFilter", lastVectorFormat)
self.cboInputLayer.setEditText(filename)
def inputPathChanged(self, path):
if self.cboInputLayer.currentIndex() < 0:
return
self.cboInputLayer.blockSignals(True)
self.cboInputLayer.setCurrentIndex(-1)
self.cboInputLayer.setEditText(path)
self.cboInputLayer.blockSignals(False)
def reloadInputLayer(self):
""" create the input layer and update available options """
if self.mode != self.ASK_FOR_INPUT_MODE:
return True
self.deleteInputLayer()
index = self.cboInputLayer.currentIndex()
if index < 0:
filename = self.cboInputLayer.currentText()
if filename == "":
return False
layerName = QFileInfo(filename).completeBaseName()
layer = QgsVectorLayer(filename, layerName, "ogr")
if not layer.isValid() or layer.type() != QgsMapLayer.VectorLayer:
layer.deleteLater()
return False
self.inLayer = layer
self.inLayerMustBeDestroyed = True
else:
legendIndex = self.cboInputLayer.itemData(index)
self.inLayer = iface.legendInterface().layers()[legendIndex]
self.inLayerMustBeDestroyed = False
self.checkSupports()
return True
def updateInputLayer(self):
if not self.reloadInputLayer() or not self.inLayer:
return False
# update the output table name, pk and geom column
self.cboTable.setEditText(self.inLayer.name())
srcUri = QgsDataSourceUri(self.inLayer.source())
pk = srcUri.keyColumn() if srcUri.keyColumn() else self.default_pk
self.editPrimaryKey.setText(pk)
geom = srcUri.geometryColumn() if srcUri.geometryColumn() else self.default_geom
self.editGeomColumn.setText(geom)
srcCrs = self.inLayer.crs()
srid = srcCrs.postgisSrid() if srcCrs.isValid() else 4326
self.editSourceSrid.setText("%s" % srid)
self.editTargetSrid.setText("%s" % srid)
return True
def populateSchemas(self):
if not self.db:
return
self.cboSchema.clear()
schemas = self.db.schemas()
if schemas is None:
self.hideSchemas()
return
index = -1
for schema in schemas:
self.cboSchema.addItem(schema.name)
if schema.name == self.outUri.schema():
index = self.cboSchema.count() - 1
self.cboSchema.setCurrentIndex(index)
def hideSchemas(self):
self.cboSchema.setEnabled(False)
def populateTables(self):
if not self.db:
return
currentText = self.cboTable.currentText()
schemas = self.db.schemas()
if schemas is not None:
schema_name = self.cboSchema.currentText()
matching_schemas = [x for x in schemas if x.name == schema_name]
tables = matching_schemas[0].tables() if len(matching_schemas) > 0 else []
else:
tables = self.db.tables()
self.cboTable.clear()
for table in tables:
self.cboTable.addItem(table.name)
self.cboTable.setEditText(currentText)
def populateEncodings(self):
encodings = ['ISO-8859-1', 'ISO-8859-2', 'UTF-8', 'CP1250']
for enc in encodings:
self.cboEncoding.addItem(enc)
self.cboEncoding.setCurrentIndex(2)
def accept(self):
if self.mode == self.ASK_FOR_INPUT_MODE:
# create the input layer (if not already done) and
# update available options
self.reloadInputLayer()
# sanity checks
if self.inLayer is None:
QMessageBox.information(self, self.tr("Import to database"), self.tr("Input layer missing or not valid"))
return
if self.cboTable.currentText() == "":
QMessageBox.information(self, self.tr("Import to database"), self.tr("Output table name is required"))
return
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
try:
sourceSrid = self.editSourceSrid.text()
except ValueError:
QMessageBox.information(self, self.tr("Import to database"),
self.tr("Invalid source srid: must be an integer"))
return
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
try:
targetSrid = self.editTargetSrid.text()
except ValueError:
QMessageBox.information(self, self.tr("Import to database"),
self.tr("Invalid target srid: must be an integer"))
return
# override cursor
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
# store current input layer crs and encoding, so I can restore it
prevInCrs = self.inLayer.crs()
prevInEncoding = self.inLayer.dataProvider().encoding()
try:
schema = self.outUri.schema() if not self.cboSchema.isEnabled() else self.cboSchema.currentText()
table = self.cboTable.currentText()
# get pk and geom field names from the source layer or use the
# ones defined by the user
srcUri = QgsDataSourceUri(self.inLayer.source())
pk = srcUri.keyColumn() if not self.chkPrimaryKey.isChecked() else self.editPrimaryKey.text()
if not pk:
pk = self.default_pk
if self.inLayer.hasGeometryType() and self.chkGeomColumn.isEnabled():
geom = srcUri.geometryColumn() if not self.chkGeomColumn.isChecked() else self.editGeomColumn.text()
if not geom:
geom = self.default_geom
else:
geom = None
# get output params, update output URI
self.outUri.setDataSource(schema, table, geom, "", pk)
uri = self.outUri.uri(False)
providerName = self.db.dbplugin().providerName()
options = {}
if self.chkDropTable.isChecked():
options['overwrite'] = True
if self.chkSinglePart.isEnabled() and self.chkSinglePart.isChecked():
options['forceSinglePartGeometryType'] = True
outCrs = QgsCoordinateReferenceSystem()
if self.chkTargetSrid.isEnabled() and self.chkTargetSrid.isChecked():
targetSrid = int(self.editTargetSrid.text())
outCrs = QgsCoordinateReferenceSystem(targetSrid)
# update input layer crs and encoding
if self.chkSourceSrid.isEnabled() and self.chkSourceSrid.isChecked():
sourceSrid = int(self.editSourceSrid.text())
inCrs = QgsCoordinateReferenceSystem(sourceSrid)
self.inLayer.setCrs(inCrs)
if self.chkEncoding.isEnabled() and self.chkEncoding.isChecked():
enc = self.cboEncoding.currentText()
self.inLayer.setProviderEncoding(enc)
onlySelected = self.chkSelectedFeatures.isChecked()
# do the import!
ret, errMsg = QgsVectorLayerImport.importLayer(self.inLayer, uri, providerName, outCrs, onlySelected, False, options)
except Exception as e:
ret = -1
errMsg = str(e)
finally:
# restore input layer crs and encoding
self.inLayer.setCrs(prevInCrs)
self.inLayer.setProviderEncoding(prevInEncoding)
# restore cursor
QApplication.restoreOverrideCursor()
if ret != 0:
output = QgsMessageViewer()
output.setTitle(self.tr("Import to database"))
output.setMessageAsPlainText(self.tr("Error %d\n%s") % (ret, errMsg))
output.showMessage()
return
# create spatial index
if self.chkSpatialIndex.isEnabled() and self.chkSpatialIndex.isChecked():
self.db.connector.createSpatialIndex((schema, table), geom)
QMessageBox.information(self, self.tr("Import to database"), self.tr("Import was successful."))
return QDialog.accept(self)
def closeEvent(self, event):
# destroy the input layer instance but only if it was created
# from this dialog!
self.deleteInputLayer()
QDialog.closeEvent(self, event)
if __name__ == '__main__':
import sys
a = QApplication(sys.argv)
dlg = DlgImportVector()
dlg.show()
sys.exit(a.exec_())
| gpl-2.0 |
salamb/girder | plugins/geospatial/server/geospatial.py | 3 | 21289 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import bson.json_util
import six
from geojson import GeoJSON
from pymongo import GEOSPHERE
from pymongo.errors import OperationFailure
from girder.api import access
from girder.api.describe import Description, describeRoute
from girder.api.rest import loadmodel, Resource, RestException
from girder.constants import AccessType
GEOSPATIAL_FIELD = 'geo'
class GeospatialItem(Resource):
"""
Geospatial methods added to the API endpoint for items.
"""
@access.user
@describeRoute(
Description('Create new items from a GeoJSON feature or feature'
' collection.')
.param('folderId', 'The ID of the parent folder.', required=True,
paramType='query')
.param('geoJSON', 'A GeoJSON object containing the features or feature'
' collection to add.', required=True,
paramType='query')
.errorResponse()
.errorResponse('Invalid GeoJSON was passed in request body.')
.errorResponse('GeoJSON feature or feature collection was not passed in'
' request body.')
.errorResponse("GeoJSON feature did not contain a property named"
" 'name'.")
.errorResponse('Property name was invalid.')
.errorResponse('Write access was denied on the parent folder.', 403)
.notes("All GeoJSON features must contain a property named 'name' from"
" which the name of each created item is taken.")
)
def create(self, params):
"""
Create new items from a GeoJSON feature or feature collection. All
GeoJSON features must contain a property named 'name' from which the
name of each created item is taken.
:param params: parameters to the API call, including 'folderId' and
'geoJSON'.
:type params: dict[str, unknown]
:returns: filtered fields of the created items with properties appended
to the 'meta' field and geospatial data appended to the 'geo'
field of each item.
:rtype: list[dict[str, unknown]]
:raise RestException: on malformed, forbidden, or unauthorized API call.
"""
self.requireParams(('folderId', 'geoJSON'), params)
try:
geospatial = bson.json_util.loads(params['geoJSON'])
GeoJSON.to_instance(geospatial, strict=True)
except ValueError:
raise RestException('Invalid GeoJSON passed in request body.')
if geospatial['type'] == 'Feature':
features = [geospatial]
elif geospatial['type'] == 'FeatureCollection':
features = geospatial['features']
else:
raise RestException('GeoJSON feature or feature collection must be '
'passed in request body.')
data = []
for feature in features:
properties = feature['properties']
if 'name' not in properties:
raise RestException("All GeoJSON features must contain a"
" property named 'name'.")
name = properties['name']
del properties['name']
if 'description' in properties:
description = properties['description']
del properties['description']
else:
description = ''
for key in properties:
if not len(key):
raise RestException('Property names must be at least one'
' character long.')
if '.' in key or key[0] == '$':
raise RestException('The property name %s must not contain'
' a period or begin with a dollar'
' sign.' % key)
data.append({'name': name,
'description': description,
'metadata': properties,
'geometry': feature['geometry']})
user = self.getCurrentUser()
folder = self.model('folder').load(
id=params['folderId'], user=user, level=AccessType.WRITE, exc=True)
items = []
for datum in data:
newItem = self.model('item').createItem(
folder=folder, name=datum['name'], creator=user,
description=datum['description'])
self.model('item').setMetadata(newItem, datum['metadata'])
newItem[GEOSPATIAL_FIELD] = {'geometry': datum['geometry']}
newItem = self.model('item').updateItem(newItem)
items.append(newItem)
return [self._filter(item) for item in items]
@access.public
@describeRoute(
Description('Search for an item by geospatial data.')
.param('q', 'Search query as a JSON object.', required=True)
.param('limit', 'Result set size limit (default=50).', required=False,
dataType='integer')
.param('offset', 'Offset into result set (default=0).', required=False,
dataType='integer')
.errorResponse()
)
def find(self, params):
"""
Search for an item by geospatial data.
:param params: parameters to the API call, including 'q'.
:type params: dict[str, unknown]
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype: list[dict[str, unknown]]
:raise RestException: on malformed API call.
"""
self.requireParams(('q',), params)
try:
query = bson.json_util.loads(params['q'])
except ValueError:
raise RestException("Invalid JSON passed as 'q' parameter.")
limit, offset, sort = self.getPagingParameters(params, 'lowerName')
return self._find(query, limit, offset, sort)
@access.public
@describeRoute(
Description('Search for items that intersects with a GeoJSON object.')
.param('field', 'Name of field containing GeoJSON on which to search.',
required=True)
.param('geometry', 'Search query condition as a GeoJSON object.',
required=True)
.param('limit', 'Result set size limit (default=50).', required=False,
dataType='integer')
.param('offset', 'Offset into result set (default=0).', required=False,
dataType='integer')
.errorResponse()
)
def intersects(self, params):
"""
Search for items that intersects with a GeoJSON object.
:param params: parameters to the API call, including 'field' and
'geometry'.
:type params: dict[str, unknown]
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype: list[dict[str, unknown]]
:raise RestException: on malformed API call.
"""
self.requireParams(('field', 'geometry'), params)
try:
geometry = bson.json_util.loads(params['geometry'])
GeoJSON.to_instance(geometry, strict=True)
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry'"
" parameter.")
if params['field'][:3] == '%s.' % GEOSPATIAL_FIELD:
field = params['field'].strip()
else:
field = '%s.%s' % (GEOSPATIAL_FIELD, params['field'].strip())
query = {
field: {
'$geoIntersects': {
'$geometry': geometry
}
}
}
limit, offset, sort = self.getPagingParameters(params, 'lowerName')
return self._find(query, limit, offset, sort)
def _getGeometry(self, params):
try:
geometry = bson.json_util.loads(params['geometry'])
GeoJSON.to_instance(geometry, strict=True)
if geometry['type'] != 'Point':
raise ValueError
return geometry
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry'"
" parameter.")
@access.public
@describeRoute(
Description('Search for items that are in proximity to a GeoJSON'
' point.')
.param('field', 'Name of field containing GeoJSON on which to search.',
required=True)
.param('geometry', 'Search query condition as a GeoJSON point.',
required=True)
.param('maxDistance', 'Limits results to items that are at most this'
' distance in meters from the GeoJSON point.',
required=False, dataType='number')
.param('minDistance', 'Limits results to items that are at least this'
' distance in meters from the GeoJSON point.',
required=False, dataType='number')
.param('ensureIndex', 'Create a 2dsphere index on the field on which to'
' search if one does not exist.', required=False,
dataType='boolean')
.param('limit', 'Result set size limit (default=50).', required=False,
dataType='integer')
.param('offset', 'Offset into result set (default=0).', required=False,
dataType='integer')
.errorResponse()
.errorResponse('Field on which to search was not indexed.')
.errorResponse('Index creation was denied.', 403)
.notes("Field on which to search be indexed by a 2dsphere index."
" Anonymous users may not use 'ensureIndex' to create such an"
" index.")
)
def near(self, params):
"""
Search for items that are in proximity to a GeoJSON point. The field on
which to search must be indexed by a '2dsphere' index. Anonymous users
may not use 'ensureIndex' to create such an index.
:param params: parameters to the API call, including 'field' and
'geometry'.
:type params: dict[str, unknown]
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype: list[dict[str, unknown]]
:raise RestException: on malformed or forbidden API call.
"""
self.requireParams(('field', 'geometry'), params)
condition = {
'$geometry': self._getGeometry(params)
}
for param in ('maxDistance', 'minDistance'):
if param in params:
try:
distance = float(params[param])
if distance < 0.0:
raise ValueError
except ValueError:
raise RestException("Parameter '%s' must be a number." %
param)
condition['$'+param] = distance
if params['field'][:3] == '%s.' % GEOSPATIAL_FIELD:
field = params['field'].strip()
else:
field = '%s.%s' % (GEOSPATIAL_FIELD, params['field'].strip())
if params.get('ensureIndex', False):
user = self.getCurrentUser()
if not user:
raise RestException('Index creation denied.', 403)
self.model('item').collection.create_index([(field, GEOSPHERE)])
query = {
field: {
'$near': condition
}
}
limit, offset, sort = self.getPagingParameters(params, 'lowerName')
try:
items = self._find(query, limit, offset, sort)
except OperationFailure:
raise RestException("Field '%s' must be indexed by a 2dsphere"
" index." % field)
return items
_RADIUS_OF_EARTH = 6378137.0 # average in meters
@access.public
@describeRoute(
Description('Search for items that are entirely within either a GeoJSON'
' polygon or a circular region.')
.param('field', 'Name of field containing GeoJSON on which to search.',
required=True)
.param('geometry', 'Search query condition as a GeoJSON polygon.',
required=False)
.param('center', 'Center of search radius as a GeoJSON point.',
required=False)
.param('radius', 'Search radius in meters.', required=False,
dataType='number')
.param('limit', 'Result set size limit (default=50).', required=False,
dataType='integer')
.param('offset', 'Offset into result set (default=0).', required=False,
dataType='integer')
.errorResponse()
.notes("Either parameter 'geometry' or both parameters 'center' "
" and 'radius' are required.")
)
def within(self, params):
"""
Search for items that are entirely within either a GeoJSON polygon or a
circular region. Either parameter 'geometry' or both parameters 'center'
and 'radius' are required.
:param params: parameters to the API call, including 'field' and either
'geometry' or both 'center' and 'radius'.
:type params: dict[str, unknown]
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype: list[dict[str, unknown]]
:raise RestException: on malformed API call.
"""
self.requireParams(('field',), params)
if 'geometry' in params:
try:
geometry = bson.json_util.loads(params['geometry'])
GeoJSON.to_instance(geometry, strict=True)
if geometry['type'] != 'Polygon':
raise ValueError
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'geometry'"
" parameter.")
condition = {
'$geometry': geometry
}
elif 'center' in params and 'radius' in params:
try:
radius = float(params['radius']) / self._RADIUS_OF_EARTH
if radius < 0.0:
raise ValueError
except ValueError:
raise RestException("Parameter 'radius' must be a number.")
try:
center = bson.json_util.loads(params['center'])
GeoJSON.to_instance(center, strict=True)
if center['type'] != 'Point':
raise ValueError
except (TypeError, ValueError):
raise RestException("Invalid GeoJSON passed as 'center'"
" parameter.")
condition = {
'$centerSphere': [center['coordinates'], radius]
}
else:
raise RestException("Either parameter 'geometry' or both parameters"
" 'center' and 'radius' are required.")
if params['field'][:3] == '%s.' % GEOSPATIAL_FIELD:
field = params['field'].strip()
else:
field = '%s.%s' % (GEOSPATIAL_FIELD, params['field'].strip())
limit, offset, sort = self.getPagingParameters(params, 'lowerName')
query = {
field: {
'$geoWithin': condition
}
}
return self._find(query, limit, offset, sort)
@access.public
@loadmodel(model='item', level=AccessType.READ)
@describeRoute(
Description('Get an item and its geospatial data by ID.')
.param('id', 'The ID of the item.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('Read access was denied for the item.', 403)
)
def getGeospatial(self, item, params):
"""
Get an item and its geospatial data by ID.
:param item: item to return along with its geospatial data.
:type item: dict[str, unknown]
:param params: parameters to the API call, unused.
:type params: dict[str, unknown]
:returns: filtered fields of the item with geospatial data appended to
its 'geo' field.
:rtype : dict[str, unknown]
:raise RestException: on malformed or forbidden API call.
"""
return self._filter(item)
@access.user
@loadmodel(model='item', level=AccessType.WRITE)
@describeRoute(
Description('Set geospatial fields on an item.')
.notes('Set geospatial fields to null to delete them.')
.param('id', 'The ID of the item.', paramType='path')
.param('body', 'A JSON object containing the geospatial fields to add.',
paramType='body')
.errorResponse('ID was invalid.')
.errorResponse('Invalid JSON was passed in request body.')
.errorResponse('Geospatial key name was invalid.')
.errorResponse('Geospatial field did not contain valid GeoJSON.')
.errorResponse('Write access was denied for the item.', 403)
)
def setGeospatial(self, item, params):
"""
Set geospatial data on an item.
:param item: item on which to set geospatial data.
:type item: dict[str, unknown]
:param params: parameters to the API call, unused.
:type params: dict[str, unknown]
:returns: filtered fields of the item with geospatial data appended to
its 'geo' field.
:rtype : dict[str, unknown]
:raise RestException: on malformed, forbidden, or unauthorized API call.
"""
geospatial = self.getBodyJson()
for k, v in six.viewitems(geospatial):
if '.' in k or k[0] == '$':
raise RestException('Geospatial key name %s must not contain a'
' period or begin with a dollar sign.' % k)
if v:
try:
GeoJSON.to_instance(v, strict=True)
except ValueError:
raise RestException('Geospatial field with key %s does not'
' contain valid GeoJSON: %s' % (k, v))
if GEOSPATIAL_FIELD not in item:
item[GEOSPATIAL_FIELD] = dict()
item[GEOSPATIAL_FIELD].update(six.viewitems(geospatial))
keys = [k for k, v in six.viewitems(item[GEOSPATIAL_FIELD])
if v is None]
for key in keys:
del item[GEOSPATIAL_FIELD][key]
item = self.model('item').updateItem(item)
return self._filter(item)
def _filter(self, item):
"""
Helper to filter the fields of an item and append its geospatial data.
:param item: item whose fields to filter and geospatial data append.
:type item: dict[str, unknown]
:returns: filtered fields of the item with geospatial data appended to
its 'geo' field.
:rtype : dict[str, unknown]
"""
filtered = self.model('item').filter(item)
if GEOSPATIAL_FIELD in item:
filtered[GEOSPATIAL_FIELD] = item[GEOSPATIAL_FIELD]
else:
filtered[GEOSPATIAL_FIELD] = {}
return filtered
def _find(self, query, limit, offset, sort):
"""
Helper to search the geospatial data of items and return the filtered
fields and geospatial data of the matching items.
:param query: geospatial search query.
:type query: dict[str, unknown]
:param limit: maximum number of matching items to return.
:type limit: int
:param offset: offset of matching items to return.
:type offset: int
:param sort: field by which to sort the matching items
:type sort: str
:returns: filtered fields of the matching items with geospatial data
appended to the 'geo' field of each item.
:rtype : list[dict[str, unknown]]
"""
user = self.getCurrentUser()
cursor = self.model('item').find(query, sort=sort)
return [self._filter(result) for result in
self.model('item')
.filterResultsByPermission(cursor, user, AccessType.READ,
limit, offset)]
| apache-2.0 |
Obus/scikit-learn | examples/decomposition/plot_image_denoising.py | 84 | 5820 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
JioEducation/edx-platform | common/lib/chem/chem/chemtools.py | 250 | 10721 | """This module originally includes functions for grading Vsepr problems.
Also, may be this module is the place for other chemistry-related grade functions. TODO: discuss it.
"""
import json
import unittest
import itertools
def vsepr_parse_user_answer(user_input):
"""
user_input is json generated by vsepr.js from dictionary.
There are must be only two keys in original user_input dictionary: "geometry" and "atoms".
Format: u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}'
Order of elements inside "atoms" subdict does not matters.
Return dict from parsed json.
"Atoms" subdict stores positions of atoms in molecule.
General types of positions:
c0 - central atom
p0..pN - peripheral atoms
a0..aN - axial atoms
e0..eN - equatorial atoms
Each position is dictionary key, i.e. user_input["atoms"]["c0"] is central atom, user_input["atoms"]["a0"] is one of axial atoms.
Special position only for AX6 (Octahedral) geometry:
e10, e12 - atom pairs opposite the central atom,
e20, e22 - atom pairs opposite the central atom,
e1 and e2 pairs lying crosswise in equatorial plane.
In user_input["atoms"] may be only 3 set of keys:
(c0,p0..pN),
(c0, a0..aN, e0..eN),
(c0, a0, a1, e10,e11,e20,e21) - if geometry is AX6.
"""
return json.loads(user_input)
def vsepr_build_correct_answer(geometry, atoms):
"""
geometry is string.
atoms is dict of atoms with proper positions.
Example:
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
returns a dictionary composed from input values:
{'geometry': geometry, 'atoms': atoms}
"""
return {'geometry': geometry, 'atoms': atoms}
def vsepr_grade(user_input, correct_answer, convert_to_peripheral=False):
"""
This function does comparison between user_input and correct_answer.
Comparison is successful if all steps are successful:
1) geometries are equal
2) central atoms (index in dictionary 'c0') are equal
3):
In next steps there is comparing of corresponding subsets of atom positions: equatorial (e0..eN), axial (a0..aN) or peripheral (p0..pN)
If convert_to_peripheral is True, then axial and equatorial positions are converted to peripheral.
This means that user_input from:
"atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}' after parsing to json
is converted to:
{"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"}
i.e. aX and eX -> pX
So if converted, p subsets are compared,
if not a and e subsets are compared
If all subsets are equal, grade succeeds.
There is also one special case for AX6 geometry.
In this case user_input["atoms"] contains special 3 symbol keys: e10, e12, e20, and e21.
Correct answer for this geometry can be of 3 types:
1) c0 and peripheral
2) c0 and axial and equatorial
3) c0 and axial and equatorial-subset-1 (e1X) and equatorial-subset-2 (e2X)
If correct answer is type 1 or 2, then user_input is converted from type 3 to type 2 (or to type 1 if convert_to_peripheral is True)
If correct_answer is type 3, then we done special case comparison. We have 3 sets of atoms positions both in user_input and correct_answer: axial, eq-1 and eq-2.
Answer will be correct if these sets are equals for one of permutations. For example, if :
user_axial = correct_eq-1
user_eq-1 = correct-axial
user_eq-2 = correct-eq-2
"""
if user_input['geometry'] != correct_answer['geometry']:
return False
if user_input['atoms']['c0'] != correct_answer['atoms']['c0']:
return False
if convert_to_peripheral:
# convert user_input from (a,e,e1,e2) to (p)
# correct_answer must be set in (p) using this flag
c0 = user_input['atoms'].pop('c0')
user_input['atoms'] = {'p' + str(i): v for i, v in enumerate(user_input['atoms'].values())}
user_input['atoms']['c0'] = c0
# special case for AX6
if 'e10' in correct_answer['atoms']: # need check e1x, e2x symmetry for AX6..
a_user = {}
a_correct = {}
for ea_position in ['a', 'e1', 'e2']: # collecting positions:
a_user[ea_position] = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct[ea_position] = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
correct = [sorted(a_correct['a'])] + [sorted(a_correct['e1'])] + [sorted(a_correct['e2'])]
for permutation in itertools.permutations(['a', 'e1', 'e2']):
if correct == [sorted(a_user[permutation[0]])] + [sorted(a_user[permutation[1]])] + [sorted(a_user[permutation[2]])]:
return True
return False
else: # no need to check e1x,e2x symmetry - convert them to ex
if 'e10' in user_input['atoms']: # e1x exists, it is AX6.. case
e_index = 0
for k, v in user_input['atoms'].items():
if len(k) == 3: # e1x
del user_input['atoms'][k]
user_input['atoms']['e' + str(e_index)] = v
e_index += 1
# common case
for ea_position in ['p', 'a', 'e']:
# collecting atoms:
a_user = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
# print a_user, a_correct
if len(a_user) != len(a_correct):
return False
if sorted(a_user) != sorted(a_correct):
return False
return True
class Test_Grade(unittest.TestCase):
''' test grade function '''
def test_incorrect_geometry(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX4E0","atoms":{"c0": "N","p0": "H","p1": "(ep)","p2": "H", "p3": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "test", "a1": "(ep)", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae_convert_to_p_but_input_not_in_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_ae_convert_to_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_e1e2_in_a(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "(ep)","a1": "(ep)","e10": "H","e11": "H","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e1(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "(ep)","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_incorrect_answer_e1e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "H","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_incorrect_c0(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "H", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "H","a0": "test","a1": "(ep)","e0": "H","e1": "H","e2": "(ep)","e3": "H"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def suite():
testcases = [Test_Grade]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
praekelt/rapidpro | temba/channels/migrations/0015_auto_20150703_2048.py | 3 | 3334 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, connection
class Migration(migrations.Migration):
def install_channellog_trigger(apps, schema_editor):
"""
Installs a Postgres trigger that will increment or decrement our our success and error
log counts based on insertion in channels_channellog
"""
#language=SQL
install_trigger = """
CREATE OR REPLACE FUNCTION update_channellog_count() RETURNS TRIGGER AS $$
BEGIN
-- ChannelLog being added
IF TG_OP = 'INSERT' THEN
-- Error, increment our error count
IF NEW.is_error THEN
UPDATE channels_channel SET error_log_count=error_log_count+1 WHERE id=NEW.channel_id;
-- Success, increment that count instead
ELSE
UPDATE channels_channel SET success_log_count=success_log_count+1 WHERE id=NEW.channel_id;
END IF;
-- ChannelLog being removed
ELSIF TG_OP = 'DELETE' THEN
-- Error, decrement our error count
if OLD.is_error THEN
UPDATE channels_channel SET error_log_count=error_log_count-1 WHERE id=OLD.channel_id;
-- Success, decrement that count instead
ELSE
UPDATE channels_channel SET success_log_count=success_log_count-1 WHERE id=OLD.channel_id;
END IF;
-- Updating is_error is forbidden
ELSIF TG_OP = 'UPDATE' THEN
RAISE EXCEPTION 'Cannot update is_error or channel_id on ChannelLog events';
-- Table being cleared, reset all counts
ELSIF TG_OP = 'TRUNCATE' THEN
UPDATE channels_channel SET error_log_count=0, success_log_count=0;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql;
-- Install INSERT, UPDATE and DELETE triggers
DROP TRIGGER IF EXISTS when_channellog_changes_then_update_channel_trg on channels_channellog;
CREATE TRIGGER when_channellog_changes_then_update_channel_trg
AFTER INSERT OR DELETE OR UPDATE OF is_error, channel_id
ON channels_channellog
FOR EACH ROW
EXECUTE PROCEDURE update_channellog_count();
-- Install TRUNCATE trigger
DROP TRIGGER IF EXISTS when_channellog_truncate_then_update_channel_trg on channels_channellog;
CREATE TRIGGER when_channellog_truncate_then_update_channel_trg
AFTER TRUNCATE
ON channels_channellog
EXECUTE PROCEDURE update_channellog_count();
"""
cursor = connection.cursor()
cursor.execute(install_trigger)
dependencies = [
('channels', '0014_create_channellog_index'),
]
operations = [
migrations.AlterField(
model_name='channellog',
name='channel',
field=models.ForeignKey(related_name='logs', to='channels.Channel', help_text='The channel the message was sent on'),
preserve_default=True,
),
migrations.RunPython(
install_channellog_trigger,
),
]
| agpl-3.0 |
0jpq0/kbengine | kbe/res/scripts/common/Lib/idlelib/GrepDialog.py | 61 | 5143 | import os
import fnmatch
import re # for htest
import sys
from tkinter import StringVar, BooleanVar, Checkbutton # for GrepDialog
from tkinter import Tk, Text, Button, SEL, END # for htest
from idlelib import SearchEngine
import itertools
from idlelib.SearchDialogBase import SearchDialogBase
# Importing OutputWindow fails due to import loop
# EditorWindow -> GrepDialop -> OutputWindow -> EditorWindow
def grep(text, io=None, flist=None):
root = text._root()
engine = SearchEngine.get(root)
if not hasattr(engine, "_grepdialog"):
engine._grepdialog = GrepDialog(root, engine, flist)
dialog = engine._grepdialog
searchphrase = text.get("sel.first", "sel.last")
dialog.open(text, searchphrase, io)
class GrepDialog(SearchDialogBase):
title = "Find in Files Dialog"
icon = "Grep"
needwrapbutton = 0
def __init__(self, root, engine, flist):
SearchDialogBase.__init__(self, root, engine)
self.flist = flist
self.globvar = StringVar(root)
self.recvar = BooleanVar(root)
def open(self, text, searchphrase, io=None):
SearchDialogBase.open(self, text, searchphrase)
if io:
path = io.filename or ""
else:
path = ""
dir, base = os.path.split(path)
head, tail = os.path.splitext(base)
if not tail:
tail = ".py"
self.globvar.set(os.path.join(dir, "*" + tail))
def create_entries(self):
SearchDialogBase.create_entries(self)
self.globent = self.make_entry("In files:", self.globvar)[0]
def create_other_buttons(self):
f = self.make_frame()[0]
btn = Checkbutton(f, anchor="w",
variable=self.recvar,
text="Recurse down subdirectories")
btn.pack(side="top", fill="both")
btn.select()
def create_command_buttons(self):
SearchDialogBase.create_command_buttons(self)
self.make_button("Search Files", self.default_command, 1)
def default_command(self, event=None):
prog = self.engine.getprog()
if not prog:
return
path = self.globvar.get()
if not path:
self.top.bell()
return
from idlelib.OutputWindow import OutputWindow # leave here!
save = sys.stdout
try:
sys.stdout = OutputWindow(self.flist)
self.grep_it(prog, path)
finally:
sys.stdout = save
def grep_it(self, prog, path):
dir, base = os.path.split(path)
list = self.findfiles(dir, base, self.recvar.get())
list.sort()
self.close()
pat = self.engine.getpat()
print("Searching %r in %s ..." % (pat, path))
hits = 0
try:
for fn in list:
try:
with open(fn, errors='replace') as f:
for lineno, line in enumerate(f, 1):
if line[-1:] == '\n':
line = line[:-1]
if prog.search(line):
sys.stdout.write("%s: %s: %s\n" %
(fn, lineno, line))
hits += 1
except OSError as msg:
print(msg)
print(("Hits found: %s\n"
"(Hint: right-click to open locations.)"
% hits) if hits else "No hits.")
except AttributeError:
# Tk window has been closed, OutputWindow.text = None,
# so in OW.write, OW.text.insert fails.
pass
def findfiles(self, dir, base, rec):
try:
names = os.listdir(dir or os.curdir)
except OSError as msg:
print(msg)
return []
list = []
subdirs = []
for name in names:
fn = os.path.join(dir, name)
if os.path.isdir(fn):
subdirs.append(fn)
else:
if fnmatch.fnmatch(name, base):
list.append(fn)
if rec:
for subdir in subdirs:
list.extend(self.findfiles(subdir, base, rec))
return list
def close(self, event=None):
if self.top:
self.top.grab_release()
self.top.withdraw()
def _grep_dialog(parent): # for htest
from idlelib.PyShell import PyShellFileList
root = Tk()
root.title("Test GrepDialog")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
flist = PyShellFileList(root)
text = Text(root, height=5)
text.pack()
def show_grep_dialog():
text.tag_add(SEL, "1.0", END)
grep(text, flist=flist)
text.tag_remove(SEL, "1.0", END)
button = Button(root, text="Show GrepDialog", command=show_grep_dialog)
button.pack()
root.mainloop()
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_grep_dialog)
| lgpl-3.0 |
kohnle-lernmodule/KITexe201based | exe/engine/linuxconfig.py | 4 | 3054 | # ===========================================================================
# eXe config
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
The LinuxConfig overrides the Config class with Linux specific
configuration
"""
import os
from exe.engine.config import Config
from exe.engine.path import Path
# ===========================================================================
class LinuxConfig(Config):
"""
The LinuxConfig overrides the Config class with Linux specific
configuration
"""
def _overrideDefaultVals(self):
"""
Setup with our default settings
"""
# Override the default settings
if Path("/usr/share/exe").isdir():
self.webDir = Path("/usr/share/exe")
self.jsDir = Path("/usr/share/exe")
self.localeDir = Path("/usr/share/exe/locale")
self.mediaProfilePath = Path("/usr/share/exe/mediaprofiles")
self.dataDir = Path(os.environ['HOME'])
self.configDir = Path(self.dataDir)/'.exe'
self.stylesDir =Path(self.configDir)/'style'
self.lastDir = Path(os.environ['HOME'])
# Media converters - defaults for now
self.videoMediaConverter_ogv = ""
self.videoMediaConverter_3gp = '/usr/bin/ffmpeg -i %(infile)s -s qcif -vcodec h263 -acodec libvo_aacenc -ac 1 -ar 8000 -r 25 -ab 32 -y %(outfile)s'
self.videoMediaConverter_mpg = "/usr/bin/ffmpeg -i %(infile)s -s qcif -vcodec mpeg1 -acodec wav -ac 1 -ar 8000 -r 25 -ab 32 -y %(outfile)s"
self.audioMediaConverter_au = "/usr/bin/sox %(infile)s %(outfile)s"
self.audioMediaConverter_wav = "/usr/bin/sox %(infile)s %(outfile)s"
self.audioMediaConverter_mp3 = "/usr/bin/sox %(infile)s -t wav - | /usr/bin/lame -b 32 - %(outfile)s"
self.ffmpegPath = "/usr/bin/ffmpeg"
def _getConfigPathOptions(self):
"""
Returns the best places for a linux config file
"""
return [Path(os.environ["HOME"])/'.exe/exe.conf',
Path('/etc/exe/exe.conf'),
self.webDir/'exe.conf']
# ===========================================================================
| gpl-2.0 |
aurofable/medhack-server | venv/lib/python2.7/site-packages/sqlalchemy/dialects/oracle/zxjdbc.py | 34 | 7661 | # oracle/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Oracle database via the zxjdbc JDBC connector.
JDBC Driver
-----------
The official Oracle JDBC driver is at
http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import base, default
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
rrs.next()
except SQLException, sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return base.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(base.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date : _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
| mit |
czgu/metaHack | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py | 170 | 8755 | from binascii import hexlify, unhexlify
from hashlib import md5, sha1
from ..exceptions import SSLError
SSLContext = None
HAS_SNI = False
create_default_context = None
import errno
import ssl
try: # Test for SSL features
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
try:
from ssl import _DEFAULT_CIPHERS
except ImportError:
_DEFAULT_CIPHERS = (
'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+HIGH:'
'DH+HIGH:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+HIGH:RSA+3DES:ECDH+RC4:'
'DH+RC4:RSA+RC4:!aNULL:!eNULL:!MD5'
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = sys.version_info >= (2, 7)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, location):
self.ca_certs = location
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None):
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, odd = divmod(len(fingerprint), 2)
if odd or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=ssl.CERT_REQUIRED,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or _DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
context.check_hostname = (context.verify_mode == ssl.CERT_REQUIRED)
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None):
"""
All arguments except for server_hostname and ssl_context have the same
meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
"""
context = ssl_context
if context is None:
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs:
try:
context.load_verify_locations(ca_certs)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
| apache-2.0 |
virneo/nupic | tests/unit/nupic/frameworks/opf/opf_metrics_test.py | 7 | 28753 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import numpy as np
import unittest2 as unittest
from nupic.frameworks.opf.metrics import getModule, MetricSpec, MetricMulti
class OPFMetricsTest(unittest.TestCase):
DELTA = 0.01
VERBOSITY = 0
def testRMSE(self):
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testNRMSE(self):
nrmse = getModule(MetricSpec("nrmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
nrmse.addInstance(gt[i], p[i])
target = 3.5856858280031814
self.assertAlmostEqual(nrmse.getMetric()["value"], target)
def testWindowedRMSE(self):
wrmse = getModule(MetricSpec("rmse", None, None,
{"verbosity": OPFMetricsTest.VERBOSITY, "window":3}))
gt = [9, 4, 4, 100, 44]
p = [0, 13, 4, 6, 7]
for gv, pv in zip(gt, p):
wrmse.addInstance(gv, pv)
target = 58.324
self.assertTrue (abs(wrmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testAAE(self):
aae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aae.addInstance(gt[i], p[i])
target = 6.0
self.assertTrue(abs(aae.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testTrivialAAE(self):
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(100)]
p = [i for i in range(100)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTrivialAccuracy(self):
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAAE (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(100)]
p = [str(i) for i in range(100)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuract(self):
"""Trivial AAE metric test"""
trivialaae = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"aae"}))
gt = [i/4+1 for i in range(1000)]
p = [i for i in range(1000)]
for i in xrange(len(gt)):
trivialaae.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialaae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testWindowedTrivialAccuracy(self):
"""Trivial Accuracy metric test"""
trivialaccuracy = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"acc"}))
gt = [str(i/4+1) for i in range(1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialaccuracy.addInstance(gt[i], p[i])
target = .75
self.assertTrue(abs(trivialaccuracy.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedTrivialAverageError (self):
"""Trivial Average Error metric test"""
trivialAveErr = getModule(MetricSpec("trivial", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,"errorMetric":"avg_err"}))
gt = [str(i/4+1) for i in range(500, 1000)]
p = [str(i) for i in range(1000)]
for i in xrange(len(gt)):
trivialAveErr.addInstance(gt[i], p[i])
target = .25
self.assertTrue(abs(trivialAveErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMultistepAAE(self):
"""Multistep AAE metric test"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": 3}))
# Make each ground truth 1 greater than the prediction
gt = [i+1 for i in range(100)]
p = [{3: {i: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 1
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepAAEMultipleSteps(self):
"""Multistep AAE metric test, predicting 2 different step sizes"""
msp = getModule(MetricSpec("multiStep", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps": [3,6]}))
# Make each 3 step prediction +1 over ground truth and each 6 step
# prediction +0.5 over ground truth
gt = [i for i in range(100)]
p = [{3: {i+1: .7, 5: 0.3},
6: {i+0.5: .7, 5: 0.3}} for i in range(100)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
target = 0.75 # average of +1 error and 0.5 error
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbability(self):
"""Multistep with probabilities metric test"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"aae",
"steps":3}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100
target = 283.35
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMultistepProbabilityMultipleSteps(self):
"""Multistep with probabilities metric test, predicting 2 different step
sizes"""
msp = getModule(MetricSpec("multiStepProbability", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100,
"errorMetric":"aae", "steps": [1,3]}))
gt = [5 for i in range(1000)]
p = [{3: {i: .3, 5: .7},
1: {5: 1.0}} for i in range(1000)]
for i in xrange(len(gt)):
msp.addInstance(gt[i], p[i])
#(((999-5)(1000-5)/2-(899-5)(900-5)/2)*.3/100) / 2
# / 2 because the 1-step prediction is 100% accurate
target = 283.35/2
self.assertTrue(abs(msp.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testMovingMeanAbsoluteError(self):
"""Moving mean Average Absolute Error metric test"""
movingMeanAAE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"aae"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanAAE.addInstance(gt[i], p[i])
res.append(movingMeanAAE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanAAE.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingMeanRMSE(self):
"""Moving mean RMSE metric test"""
movingMeanRMSE = getModule(MetricSpec("moving_mean", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mean_window":3,
"errorMetric":"rmse"}))
gt = [i for i in range(890)]
gt.extend([2*i for i in range(110)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingMeanRMSE.addInstance(gt[i], p[i])
res.append(movingMeanRMSE.getMetric()["value"])
self.assertTrue(max(res[1:890]) == 2.0)
self.assertTrue(min(res[891:])>=4.0)
target = 4.0
self.assertTrue(abs(movingMeanRMSE.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testMovingModeAverageError(self):
"""Moving mode Average Error metric test"""
movingModeAvgErr = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"avg_err"}))
#Should initially assymptote to .5
#Then after 900 should go to 1.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeAvgErr.addInstance(gt[i], p[i])
res.append(movingModeAvgErr.getMetric()["value"])
#Make sure that there is no point where the average error is >.5
self.assertTrue(max(res[1:890]) == .5)
#Make sure that after the statistics switch the error goes to 1.0
self.assertTrue(min(res[891:])>=.5)
#Make sure that the statistics change is still noticeable while it is
#in the window
self.assertTrue(res[998]<1.0)
target = 1.0
self.assertTrue(abs(movingModeAvgErr.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testMovingModeAccuracy(self):
"""Moving mode Accuracy metric test"""
movingModeACC = getModule(MetricSpec("moving_mode", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "mode_window":3,
"errorMetric":"acc"}))
#Should initially asymptote to .5
#Then after 900 should go to 0.0 as the predictions will always be offset
gt = [i/4 for i in range(900)]
gt.extend([2*i/4 for i in range(100)])
p = [i for i in range(1000)]
res = []
for i in xrange(len(gt)):
movingModeACC.addInstance(gt[i], p[i])
res.append(movingModeACC.getMetric()["value"])
#Make sure that there is no point where the average acc is <.5
self.assertTrue(min(res[1:899]) == .5)
#Make sure that after the statistics switch the acc goes to 0.0
self.assertTrue(max(res[900:])<=.5)
#Make sure that the statistics change is still noticeable while it
#is in the window
self.assertTrue(res[998]>0.0)
target = 0.0
self.assertTrue(abs(movingModeACC.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalars(self):
"""Two gram scalars test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, \
"window":100, "predictionField":"test",
"errorMetric":"acc"}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testTwoGramScalarsStepsGreaterOne(self):
"""Two gram scalars test with step size other than 1"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY,\
"window":100, "predictionField":"test",
"errorMetric":"acc", "steps": 2}))
# Sequences of 0,1,2,3,4,0,1,2,3,4,...
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
gt = [i%5 for i in range(1000)]
res = []
for i in xrange(len(gt)):
if i == 20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
# Feed in next groundTruth
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = 1.0
self.assertTrue(abs(oneGram.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA)
def testTwoGramStrings(self):
"""One gram string test"""
oneGram = getModule(MetricSpec("two_gram", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100, "errorMetric":"acc",
"predictionField":"test"}))
# Sequences of "0", "1", "2", "3", "4", "0", "1", ...
gt = [str(i%5) for i in range(1000)]
encodings = [np.zeros(10) for i in range(5)]
for i in range(len(encodings)):
encoding = encodings[i]
encoding[i] = 1
# Make every 5th element random
newElem = 100
for i in range(5, 1000, 5):
gt[i] = str(newElem)
newElem += 20
res = []
for i in xrange(len(gt)):
if i==20:
# Make sure we don"t barf with missing values
oneGram.addInstance(np.zeros(10), prediction=None,
record={"test":None})
else:
oneGram.addInstance(encodings[i%5], prediction=None,
record={"test":gt[i]})
res.append(oneGram.getMetric()["value"])
target = .8
self.assertTrue(abs(oneGram.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testWindowedAAE(self):
"""Windowed AAE"""
waae = getModule(MetricSpec("aae", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":1}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
waae.addInstance(gt[i], p[i])
target = 3.0
self.assertTrue( abs(waae.getMetric()["value"]-target) \
< OPFMetricsTest.DELTA, "Got %s" %waae.getMetric())
def testAccuracy(self):
"""Accuracy"""
acc = getModule(MetricSpec("acc", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.5
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAccuracy(self):
"""Windowed accuracy"""
acc = getModule(MetricSpec("acc", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
acc.addInstance(gt[i], p[i])
target = 0.0
self.assertTrue(abs(acc.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testAverageError(self):
"""Ave Error"""
err = getModule(MetricSpec("avg_err", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY}))
gt = [1, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = (2.0/3.0)
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testWindowedAverageError(self):
"""Windowed Ave Error"""
err = getModule(MetricSpec("avg_err", None, None, \
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":2}))
gt = [0, 1, 2, 3, 4, 5]
p = [0, 1, 2, 4, 5, 6]
for i in xrange(len(gt)):
err.addInstance(gt[i], p[i])
target = 1.0
self.assertTrue(abs(err.getMetric()["value"]-target) < OPFMetricsTest.DELTA)
def testLongWindowRMSE(self):
"""RMSE"""
rmse = getModule(MetricSpec("rmse", None, None,
{"verbosity" : OPFMetricsTest.VERBOSITY, "window":100}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
rmse.addInstance(gt[i], p[i])
target = 6.71
self.assertTrue(abs(rmse.getMetric()["value"]-target)\
< OPFMetricsTest.DELTA)
def testCustomErrorMetric(self):
customFunc = """def getError(pred,ground,tools):
return abs(pred-ground)"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc, "errorWindow":3}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
aggErr = customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
# insure that addInstance returns the aggregate error - other
# uber metrics depend on this behavior.
self.assertEqual(aggErr, customEM.getMetric()["value"])
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
customFunc = """def getError(pred,ground,tools):
sum = 0
for i in range(min(3,tools.getBufferLen())):
sum+=abs(tools.getPrediction(i)-tools.getGroundTruth(i))
return sum/3"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [9, 4, 5, 6]
p = [0, 13, 8, 3]
for i in xrange(len(gt)):
customEM.addInstance(gt[i], p[i])
target = 5.0
delta = 0.001
self.assertTrue(abs(customEM.getMetric()["value"]-target) < delta)
# Test custom error metric helper functions
# Test getPrediction
# Not-Windowed
storeWindow=4
failed = False
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == p[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getPrediction(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if lookBack>=storeWindow-1:
pass
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == p[i-lookBack])
#Test getGroundTruth
#Not-Windowed
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == gt[i-lookBack])
#Windowed
for lookBack in range(5):
customFunc = """def getError(pred,ground,tools):
return tools.getGroundTruth(%d)""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == gt[i-lookBack])
#Test getFieldValue
#Not-Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed Scalar
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue( customEM.getMetric()["value"] == t1[i-lookBack])
#Not-Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue( not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Windowed category
for lookBack in range(3):
customFunc = """def getError(pred,ground,tools):
return tools.getFieldValue(%d,"test1")""" % lookBack
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
if i < lookBack or lookBack>=storeWindow:
try:
customEM.addInstance(gt[i], p[i], curRecord)
failed = True
except:
self.assertTrue (not failed ,
"An exception should have been generated, but wasn't")
else:
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == t1[i-lookBack])
#Test getBufferLen
#Not-Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == i+1)
#Windowed
customFunc = """def getError(pred,ground,tools):
return tools.getBufferLen()"""
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":storeWindow}))
gt = [i for i in range(100)]
p = [2*i for i in range(100)]
t1 = [3*i for i in range(100)]
t2 = [str(4*i) for i in range(100)]
for i in xrange(len(gt)):
curRecord = {"pred":p[i], "ground":gt[i], "test1":t1[i], "test2":t2[i]}
customEM.addInstance(gt[i], p[i], curRecord)
self.assertTrue (customEM.getMetric()["value"] == min(i+1, 4))
#Test initialization edge cases
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"errorWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
try:
customEM = getModule(MetricSpec("custom_error_metric", None, None,
{"customFuncSource":customFunc,"storeWindow":0}))
self.assertTrue (False , "error Window of 0 should fail self.assertTrue")
except:
pass
def testMultiMetric(self):
ms1 = MetricSpec(field='a', metric='trivial', inferenceElement='prediction', params={'errorMetric': 'aae', 'window': 1000, 'steps': 1})
ms2 = MetricSpec(metric='trivial', inferenceElement='prediction', field='a', params={'window': 10, 'steps': 1, 'errorMetric': 'rmse'})
metric1000 = getModule(ms1)
metric10 = getModule(ms2)
# create multi metric
multi = MetricMulti(weights=[0.2, 0.8], metrics=[metric10, metric1000])
multi.verbosity = 1
# create reference metrics (must be diff from metrics above used in MultiMetric, as they keep history)
metric1000ref = getModule(ms1)
metric10ref = getModule(ms2)
gt = range(500, 1000)
p = range(500)
for i in xrange(len(gt)):
v10=metric10ref.addInstance(gt[i], p[i])
v1000=metric1000ref.addInstance(gt[i], p[i])
if v10 is None or v1000 is None:
check=None
else:
check=0.2*float(v10) + 0.8*float(v1000)
metricValue = multi.addInstance(gt[i], p[i])
self.assertEqual(check, metricValue, "iter i= %s gt=%s pred=%s multi=%s sub1=%s sub2=%s" % (i, gt[i], p[i], metricValue, v10, v1000))
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
bartosh/zipline | tests/test_benchmark.py | 5 | 8023 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from pandas.util.testing import assert_series_equal
from zipline.data.data_portal import DataPortal
from zipline.errors import (
BenchmarkAssetNotAvailableTooEarly,
BenchmarkAssetNotAvailableTooLate,
InvalidBenchmarkAsset)
from zipline.sources.benchmark_source import BenchmarkSource
from zipline.testing import (
MockDailyBarReader,
create_minute_bar_data,
tmp_bcolz_equity_minute_bar_reader,
)
from zipline.testing.fixtures import (
WithDataPortal,
WithSimParams,
WithTradingCalendars,
ZiplineTestCase,
)
class TestBenchmark(WithDataPortal, WithSimParams, WithTradingCalendars,
ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-03', tz='utc')
END_DATE = pd.Timestamp('2006-12-29', tz='utc')
@classmethod
def make_equity_info(cls):
return pd.DataFrame.from_dict(
{
1: {
'symbol': 'A',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
2: {
'symbol': 'B',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
3: {
'symbol': 'C',
'start_date': pd.Timestamp('2006-05-26', tz='utc'),
'end_date': pd.Timestamp('2006-08-09', tz='utc'),
"exchange": "TEST",
},
4: {
'symbol': 'D',
'start_date': cls.START_DATE,
'end_date': cls.END_DATE + pd.Timedelta(days=1),
"exchange": "TEST",
},
},
orient='index',
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader()
@classmethod
def make_stock_dividends_data(cls):
declared_date = cls.sim_params.sessions[45]
ex_date = cls.sim_params.sessions[50]
record_date = pay_date = cls.sim_params.sessions[55]
return pd.DataFrame({
'sid': np.array([4], dtype=np.uint32),
'payment_sid': np.array([5], dtype=np.uint32),
'ratio': np.array([2], dtype=np.float64),
'declared_date': np.array([declared_date], dtype='datetime64[ns]'),
'ex_date': np.array([ex_date], dtype='datetime64[ns]'),
'record_date': np.array([record_date], dtype='datetime64[ns]'),
'pay_date': np.array([pay_date], dtype='datetime64[ns]'),
})
def test_normal(self):
days_to_use = self.sim_params.sessions[1:]
source = BenchmarkSource(
self.env.asset_finder.retrieve_asset(1),
self.trading_calendar,
days_to_use,
self.data_portal
)
# should be the equivalent of getting the price history, then doing
# a pct_change on it
manually_calculated = self.data_portal.get_history_window(
[1],
days_to_use[-1],
len(days_to_use),
"1d",
"close",
"daily",
)[1].pct_change()
# compare all the fields except the first one, for which we don't have
# data in manually_calculated
for idx, day in enumerate(days_to_use[1:]):
self.assertEqual(
source.get_value(day),
manually_calculated[idx + 1]
)
# compare a slice of the data
assert_series_equal(
source.get_range(days_to_use[1], days_to_use[10]),
manually_calculated[1:11]
)
def test_asset_not_trading(self):
benchmark = self.env.asset_finder.retrieve_asset(3)
benchmark_start = benchmark.start_date
benchmark_end = benchmark.end_date
with self.assertRaises(BenchmarkAssetNotAvailableTooEarly) as exc:
BenchmarkSource(
benchmark,
self.trading_calendar,
self.sim_params.sessions[1:],
self.data_portal
)
self.assertEqual(
'Equity(3 [C]) does not exist on %s. It started trading on %s.' %
(self.sim_params.sessions[1], benchmark_start),
exc.exception.message
)
with self.assertRaises(BenchmarkAssetNotAvailableTooLate) as exc2:
BenchmarkSource(
benchmark,
self.trading_calendar,
self.sim_params.sessions[120:],
self.data_portal
)
self.assertEqual(
'Equity(3 [C]) does not exist on %s. It stopped trading on %s.' %
(self.sim_params.sessions[-1], benchmark_end),
exc2.exception.message
)
def test_asset_IPOed_same_day(self):
# gotta get some minute data up in here.
# add sid 4 for a couple of days
minutes = self.trading_calendar.minutes_for_sessions_in_range(
self.sim_params.sessions[0],
self.sim_params.sessions[5]
)
tmp_reader = tmp_bcolz_equity_minute_bar_reader(
self.trading_calendar,
self.trading_calendar.all_sessions,
create_minute_bar_data(minutes, [2]),
)
with tmp_reader as reader:
data_portal = DataPortal(
self.env.asset_finder, self.trading_calendar,
first_trading_day=reader.first_trading_day,
equity_minute_reader=reader,
equity_daily_reader=self.bcolz_equity_daily_bar_reader,
adjustment_reader=self.adjustment_reader,
)
source = BenchmarkSource(
self.env.asset_finder.retrieve_asset(2),
self.trading_calendar,
self.sim_params.sessions,
data_portal
)
days_to_use = self.sim_params.sessions
# first value should be 0.0, coming from daily data
self.assertAlmostEquals(0.0, source.get_value(days_to_use[0]))
manually_calculated = data_portal.get_history_window(
[2], days_to_use[-1],
len(days_to_use),
"1d",
"close",
"daily",
)[2].pct_change()
for idx, day in enumerate(days_to_use[1:]):
self.assertEqual(
source.get_value(day),
manually_calculated[idx + 1]
)
def test_no_stock_dividends_allowed(self):
# try to use sid(4) as benchmark, should blow up due to the presence
# of a stock dividend
with self.assertRaises(InvalidBenchmarkAsset) as exc:
BenchmarkSource(
self.env.asset_finder.retrieve_asset(4),
self.trading_calendar,
self.sim_params.sessions,
self.data_portal
)
self.assertEqual("Equity(4 [D]) cannot be used as the benchmark "
"because it has a stock dividend on 2006-03-16 "
"00:00:00. Choose another asset to use as the "
"benchmark.",
exc.exception.message)
| apache-2.0 |
chris-chris/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 19 | 17210 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
# This was checked to not be None at construction time.
static_event_rank = self.event_shape.ndims
# Expand the rank of x up to static_event_rank times so that
# broadcasting works correctly.
def expand(x):
expanded_x = x
for _ in range(static_event_rank):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
cat_probs = [expand(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
bentilly/heroes | heroes/trophies/controllers.py | 1 | 1666 | """Trophies-related controllers.
"""
from flask import Blueprint, render_template, redirect, request
from google.appengine.ext import ndb
from .models import Trophy
trophy_bp = Blueprint('trophy', __name__)
@trophy_bp.route('/new/')
@trophy_bp.route('/new/<sport_key>/', methods=['GET', 'POST'])
def new_trophy_page(sport_key=None):
"""Display 'New Trophie' page and create new trophie in db.
"""
data = {'object_title': 'Trophy'}
if sport_key is not None:
sport_key = ndb.Key(urlsafe=sport_key)
sport = sport_key.get()
data['breadcrumb'] = [sport]
data['sport_object'] = sport
# display page.
if request.method == 'POST':
# store data.
entry_data = request.form.to_dict()
if sport_key:
entry_data['parent'] = sport_key
trophy = Trophy.create_new_revision(**entry_data)
return render_template('/admin/trophy.html', **data)
@trophy_bp.route('/<key>')
def read_trophie():
pass
@trophy_bp.route('/update/<uid>/', methods=['GET', 'POST'])
def update_trophy(uid):
# get latest revision of trophy.
trophy = Trophy.get_latest_revision(uid)
data = {'object_title': 'Trophy',
'breadcrumb': [trophy.key.parent().get()],
'trophy_object': trophy}
if request.method == 'POST':
entry_data = request.form.to_dict()
entry_data['uid'] = uid
entry_data['parent'] = trophy.key.parent()
trophy = Trophy.create_new_revision(**entry_data)
data['trophy_object'] = trophy
return render_template('/admin/trophy.html', **data)
@trophy_bp.route('/<key>')
def delete_trophie():
pass
| apache-2.0 |
gef756/statsmodels | statsmodels/sandbox/contrast_old.py | 34 | 4686 | import copy
import numpy as np
from numpy.linalg import pinv
from statsmodels.sandbox import utils_old as utils
class ContrastResults(object):
"""
Results from looking at a particular contrast of coefficients in
a parametric model. The class does nothing, it is a container
for the results from T and F contrasts.
"""
def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
df_num=None):
if F is not None:
self.F = F
self.df_denom = df_denom
self.df_num = df_num
else:
self.t = t
self.sd = sd
self.effect = effect
self.df_denom = df_denom
def __array__(self):
if hasattr(self, "F"):
return self.F
else:
return self.t
def __str__(self):
if hasattr(self, 'F'):
return '<F contrast: F=%s, df_denom=%d, df_num=%d>' % \
(repr(self.F), self.df_denom, self.df_num)
else:
return '<T contrast: effect=%s, sd=%s, t=%s, df_denom=%d>' % \
(repr(self.effect), repr(self.sd), repr(self.t), self.df_denom)
class Contrast(object):
"""
This class is used to construct contrast matrices in regression models.
They are specified by a (term, formula) pair.
The term, T, is a linear combination of columns of the design
matrix D=formula(). The matrix attribute is
a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D. Further, the matrix
Tnew = dot(C, D)
is full rank. The rank attribute is the rank of
dot(D, dot(pinv(D), T))
In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0
for each column of Tnew.
"""
def __init__(self, term, formula, name=''):
self.term = term
self.formula = formula
if name is '':
self.name = str(term)
else:
self.name = name
def __str__(self):
return '<contrast:%s>' % \
repr({'term':str(self.term), 'formula':str(self.formula)})
def compute_matrix(self, *args, **kw):
"""
Construct a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D=self.D=self.formula().
If the design, self.D is already set,
then evaldesign can be set to False.
"""
t = copy.copy(self.term)
t.namespace = self.formula.namespace
T = np.transpose(np.array(t(*args, **kw)))
if T.ndim == 1:
T.shape = (T.shape[0], 1)
self.T = utils.clean0(T)
self.D = self.formula.design(*args, **kw)
self._matrix = contrastfromcols(self.T, self.D)
try:
self.rank = self.matrix.shape[1]
except:
self.rank = 1
def _get_matrix(self):
"""
This will fail if the formula needs arguments to construct
the design.
"""
if not hasattr(self, "_matrix"):
self.compute_matrix()
return self._matrix
matrix = property(_get_matrix)
def contrastfromcols(L, D, pseudo=None):
"""
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
Note that this always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError('shape of L and D mismatched')
if pseudo is None:
pseudo = pinv(D)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = L
C = np.dot(pseudo, np.dot(D, C.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if utils.rank(Lp) != Lp.shape[1]:
Lp = utils.fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
| bsd-3-clause |
adityacs/ansible | lib/ansible/modules/cloud/univention/udm_dns_zone.py | 21 | 7623 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: udm_dns_zone
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage dns zones on a univention corporate server
description:
- "This module allows to manage dns zones on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the dns zone is present or not.
type:
required: true
choices: [ forward_zone, reverse_zone ]
description:
- Define if the zone is a forward or reverse DNS zone.
zone:
required: true
description:
- DNS zone name, e.g. C(example.com).
nameserver:
required: false
description:
- List of appropriate name servers. Required if C(state=present).
interfaces:
required: false
description:
- List of interface IP addresses, on which the server should
response this zone. Required if C(state=present).
refresh:
required: false
default: 3600
description:
- Interval before the zone should be refreshed.
retry:
required: false
default: 1800
description:
- Interval that should elapse before a failed refresh should be retried.
expire:
required: false
default: 604800
description:
- Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
ttl:
required: false
default: 600
description:
- Minimum TTL field that should be exported with any RR from this zone.
contact:
required: false
default: ''
description:
- Contact person in the SOA record.
mx:
required: false
default: []
description:
- List of MX servers. (Must declared as A or AAAA records).
'''
EXAMPLES = '''
# Create a DNS zone on a UCS
- udm_dns_zone:
zone: example.com
type: forward_zone
nameserver:
- ucs.example.com
interfaces:
- 192.0.2.1
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
(24 * 60 * 60 , 'days'),
(60 * 60 , 'hours'),
(60 , 'minutes'),
(1 , 'seconds'),
]
if time == 0:
return ('0', 'seconds')
for unit in units:
if time >= unit[0]:
return ('{}'.format(time // unit[0]), unit[1])
def main():
module = AnsibleModule(
argument_spec = dict(
type = dict(required=True,
type='str'),
zone = dict(required=True,
aliases=['name'],
type='str'),
nameserver = dict(default=[],
type='list'),
interfaces = dict(default=[],
type='list'),
refresh = dict(default=3600,
type='int'),
retry = dict(default=1800,
type='int'),
expire = dict(default=604800,
type='int'),
ttl = dict(default=600,
type='int'),
contact = dict(default='',
type='str'),
mx = dict(default=[],
type='list'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['nameserver', 'interfaces'])
])
)
type = module.params['type']
zone = module.params['zone']
nameserver = module.params['nameserver']
interfaces = module.params['interfaces']
refresh = module.params['refresh']
retry = module.params['retry']
expire = module.params['expire']
ttl = module.params['ttl']
contact = module.params['contact']
mx = module.params['mx']
state = module.params['state']
changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
attr=['dNSZone']
))
exists = bool(len(obj))
container = 'cn=dns,{}'.format(base_dn())
dn = 'zoneName={},{}'.format(zone, container)
if contact == '':
contact = 'root@{}.'.format(zone)
if state == 'present':
try:
if not exists:
obj = umc_module_for_add('dns/{}'.format(type), container)
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
obj['zone'] = zone
obj['nameserver'] = nameserver
obj['a'] = interfaces
obj['refresh'] = convert_time(refresh)
obj['retry'] = convert_time(retry)
obj['expire'] = convert_time(expire)
obj['ttl'] = convert_time(ttl)
obj['contact'] = contact
obj['mx'] = mx
diff = obj.diff()
if exists:
for k in obj.keys():
if obj.hasChanged(k):
changed = True
else:
changed = True
if not module.check_mode:
if not exists:
obj.create()
elif changed:
obj.modify()
except Exception as e:
module.fail_json(
msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
if not module.check_mode:
obj.remove()
changed = True
except Exception as e:
module.fail_json(
msg='Removing dns zone {} failed: {}'.format(zone, e)
)
module.exit_json(
changed=changed,
diff=diff,
zone=zone
)
if __name__ == '__main__':
main()
| gpl-3.0 |
transientskp/tkp | tests/test_steps/test_varmetric.py | 1 | 1675 | import unittest
import logging
import tkp.db.model
from tkp.testutil.alchemy import gen_band, gen_dataset, gen_skyregion,\
gen_lightcurve
from tkp.testutil.decorators import database_disabled
import tkp.db
from tkp.steps.varmetric import execute_store_varmetric
logging.basicConfig(level=logging.INFO)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
class TestApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Can't use a regular skip here, due to a Nose bug:
# https://github.com/nose-devs/nose/issues/946
if database_disabled():
raise unittest.SkipTest("Database functionality disabled "
"in configuration.")
cls.db = tkp.db.Database()
cls.db.connect()
def setUp(self):
self.session = self.db.Session()
self.dataset = gen_dataset('test varmetric step')
band = gen_band(dataset=self.dataset, central=150**6)
skyregion = gen_skyregion(self.dataset)
lightcurve = gen_lightcurve(band, self.dataset, skyregion)
self.session.add_all(lightcurve)
self.session.flush()
self.session.commit()
def test_execute_store_varmetric(self):
session = self.db.Session()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush()
def test_execute_store_varmetric_twice(self):
session = self.db.Session()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush()
execute_store_varmetric(session=session, dataset_id=self.dataset.id)
self.session.flush()
| bsd-2-clause |
romansalin/django-seo2 | tests/settings.py | 3 | 2490 | """
Django settings for tests project.
"""
from __future__ import unicode_literals
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=)c(th7-3@w*n9mf9_b+2qg685lc6qgfars@yu1g516xu5&is)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.redirects',
'django.contrib.admin',
'django.contrib.flatpages',
'djangoseo',
'userapp',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or
# "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
'django.core.context_processors.request',
)
ROOT_URLCONF = 'tests.urls'
# WSGI_APPLICATION = 'tests.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
SITE_ID = 1
CACHE_BACKEND = 'dummy://'
# Enable when testing cache
# CACHE_BACKEND = "locmem://?timeout=30&max_entries=400"
SEO_MODELS = ('userapp',)
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| mit |
mgaitan/scipy | scipy/sparse/bsr.py | 66 | 20937 | """Compressed Block Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats
from .sputils import isshape, getdtype, to_native, upcast, get_index_dtype
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_pass1,
bsr_matmat_pass2, bsr_transpose, bsr_sort_indices)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self.shape = arg1
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
idx_dtype = get_index_dtype(maxval=N//C)
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
idx_dtype = get_index_dtype((indices, indptr), check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self.shape = (M*R,N*C)
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self.shape = shape
if dtype is not None:
self.data = self.data.astype(dtype)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self):
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
nnz = property(fget=getnnz)
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (blocksize = %dx%d) in %s format>" % \
(self.shape + (self.dtype.type, nnz) + self.blocksize +
(_formats[format][1],))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
M,N = self.shape
R,C = self.blocksize
y = np.empty(min(M,N), dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(M//R, N//C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
##########################
# NotImplemented methods #
##########################
def getdata(self,ind):
raise NotImplementedError
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
def matvec(self, other):
return self * other
def matmat(self, other):
return self * other
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=(M//R)*(N//C))
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
csr_matmat_pass1(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
indptr)
bnnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = indptr.astype(idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat_pass2(M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self,blocksize=None,copy=False):
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self):
return self.tocoo(copy=False).tocsr()
# TODO make this more efficient
def tocsc(self):
return self.tocoo(copy=False).tocsc()
def tocoo(self,copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def transpose(self):
R,C = self.blocksize
M,N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N,M), blocksize=(C,R),
dtype=self.dtype)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK,C,R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data,indices,indptr), shape=(N,M))
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
if len(nonzero_blocks) == 0:
return # nothing to do
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
from .csr import csr_matrix
# modifies self.indptr and self.indices *in place*
# since CSR constructor may end up in making copies (in case
# our index arrays are invalid in some way), play it safe
proxy = csr_matrix((mask,self.indices,self.indptr),shape=(M//R,N//C))
proxy.indices = self.indices
proxy.indptr = self.indptr
proxy.eliminate_zeros()
self.prune()
def sum_duplicates(self):
raise NotImplementedError
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
return isinstance(x, bsr_matrix)
| bsd-3-clause |
yueyueyang/inf1340_2015_asst1 | test_exercise2.py | 16 | 2575 | #!/usr/bin/env python
""" Module to test exercise2.py """
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
import pytest
import mock
from exercise2 import name_that_shape
def test_accepted_inputs(capsys):
"""
Inputs that are the range 3-10 inclusive
"""
with mock.patch("__builtin__.raw_input", return_value="3"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "triangle\n"
with mock.patch("__builtin__.raw_input", return_value="4"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "quadrilateral\n"
with mock.patch("__builtin__.raw_input", return_value="5"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "pentagon\n"
with mock.patch("__builtin__.raw_input", return_value="6"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "hexagon\n"
with mock.patch("__builtin__.raw_input", return_value="7"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "heptagon\n"
with mock.patch("__builtin__.raw_input", return_value="8"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "octagon\n"
with mock.patch("__builtin__.raw_input", return_value="9"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "nonagon\n"
with mock.patch("__builtin__.raw_input", return_value="10"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "decagon\n"
def test_rejected_inputs(capsys):
"""
Inputs that are outside the 3-10 range
"""
with mock.patch("__builtin__.raw_input", return_value="1"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "Error\n"
with mock.patch("__builtin__.raw_input", return_value="2"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "Error\n"
with mock.patch("__builtin__.raw_input", return_value="11"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "Error\n"
with mock.patch("__builtin__.raw_input", return_value="-3"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "Error\n"
with mock.patch("__builtin__.raw_input", return_value="string"):
name_that_shape()
out, err = capsys.readouterr()
assert out == "Error\n" | mit |
ntoll/yotta | yotta/lib/component.py | 2 | 27734 | # Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import json
import os
import logging
import os.path as path
from collections import OrderedDict
import subprocess
# access, , get components, internal
import access
import access_common
# pool, , shared thread pool, internal
from pool import pool
# version, , represent versions and specifications, internal
import version
# vcs, , represent version controlled directories, internal
import vcs
# fsutils, , misc filesystem utils, internal
import fsutils
# Pack, , common parts of Components/Targets, internal
import pack
# Target, , represent an installed target, internal
import target
# sourceparse, , parse version source urls, internal
import sourceparse
# !!! FIXME: should components lock their description file while they exist?
# If not there are race conditions where the description file is modified by
# another process (or in the worst case replaced by a symlink) after it has
# been opened and before it is re-written
# Constants
Modules_Folder = 'yotta_modules'
Targets_Folder = 'yotta_targets'
Component_Description_File = 'module.json'
Component_Description_File_Fallback = 'package.json'
Registry_Namespace = 'modules'
Schema_File = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema', 'module.json')
logger = logging.getLogger('components')
VVVERBOSE_DEBUG = logging.DEBUG - 8
# API
class Component(pack.Pack):
def __init__(self, path, installed_linked=False, latest_suitable_version=None, test_dependency=False):
''' How to use a Component:
Initialise it with the directory into which the component has been
downloaded, (or with a symlink that points to a directory
containing the component)
Check that 'if component:' is true, which indicates that the
download is indeed a valid component.
Check that component.getVersion() returns the version you think
you've downloaded.
Use component.getDependencySpecs() to get the names of the
dependencies of the component, or component.getDependencies() to
get Component objects (which may not be valid unless the
dependencies have been installed) for each of the dependencies.
'''
logger.log(VVVERBOSE_DEBUG, "Component: " + path + ' installed_linked=' + str(installed_linked))
warn_deprecated_filename = False
if (not os.path.exists(os.path.join(path, Component_Description_File))) and \
os.path.exists(os.path.join(path, Component_Description_File_Fallback)):
warn_deprecated_filename = True
description_filename = Component_Description_File_Fallback
else:
description_filename = Component_Description_File
super(Component, self).__init__(
path,
description_filename = description_filename,
installed_linked = installed_linked,
schema_filename = Schema_File,
latest_suitable_version = latest_suitable_version
)
if warn_deprecated_filename:
logger.warning(
"Component %s uses deprecated %s file, use %s instead." % (
self.getName(),
Component_Description_File_Fallback,
Component_Description_File
)
)
self.installed_dependencies = False
self.dependencies_failed = False
self.is_test_dependency = test_dependency
def getDependencySpecs(self, target=None):
''' Returns [DependencySpec]
These are returned in the order that they are listed in the
component description file: this is so that dependency resolution
proceeds in a predictable way.
'''
deps = []
deps += [pack.DependencySpec(x[0], x[1], False) for x in self.description.get('dependencies', {}).items()]
target_deps = self.description.get('targetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if target.getConfigValue(conf_key) or conf_key in target.getSimilarTo_Deprecated():
logger.debug(
'Adding target-dependent dependency specs for target config %s to component %s' %
(conf_key, self.getName())
)
deps += [pack.DependencySpec(x[0], x[1], False) for x in target_conf_deps.items()]
deps += [pack.DependencySpec(x[0], x[1], True) for x in self.description.get('testDependencies', {}).items()]
target_deps = self.description.get('testTargetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if target.getConfigValue(conf_key) or conf_key in target.getSimilarTo_Deprecated():
logger.debug(
'Adding test-target-dependent dependency specs for target config %s to component %s' %
(conf_key, self.getName())
)
deps += [pack.DependencySpec(x[0], x[1], True) for x in target_conf_deps.items()]
# remove duplicates (use the first occurrence)
seen = set()
r = []
for dep in deps:
if not dep.name in seen:
r.append(dep)
seen.add(dep.name)
return r
def hasDependency(self, name, target=None):
''' Check if this module has any dependencies with the specified name
in its dependencies list, or in target dependencies for the
specified target
'''
if name in self.description.get('dependencies', {}).keys():
return True
target_deps = self.description.get('targetDependencies', {})
if target is not None:
for conf_key, target_conf_deps in target_deps.items():
if target.getConfigValue(conf_key) or conf_key in target.getSimilarTo_Deprecated():
if name in target_conf_deps:
return True
return False
def getDependencies(self,
available_components = None,
search_dirs = None,
target = None,
available_only = False,
test = False,
warnings = True
):
''' Returns {component_name:component}
'''
if search_dirs is None:
search_dirs = [self.modulesPath()]
available_components = self.ensureOrderedDict(available_components)
components, errors = self.__getDependenciesWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
update_installed = False,
provider = self.provideInstalled,
test = test
)
if warnings:
for error in errors:
logger.warning(error)
if available_only:
components = OrderedDict((k, v) for k, v in components.items() if v)
return components
def __getDependenciesWithProvider(self,
available_components = None,
search_dirs = None,
target = None,
update_installed = False,
provider = None,
test = False
):
''' Get installed components using "provider" to find (and possibly
install) components.
See documentation for __getDependenciesRecursiveWithProvider
returns (components, errors)
'''
errors = []
modules_path = self.modulesPath()
def satisfyDep(dspec):
try:
r = provider(
dspec,
available_components,
search_dirs,
modules_path,
update_installed,
self.getName()
)
if r and not sourceparse.parseSourceURL(dspec.version_req).semanticSpecMatches(r.getVersion()):
logger.debug('%s does not meet specification %s required by %s' % (r.getName(), dspec.version_req, self.getName()))
r.setError('does not meet specification %s required by %s' % (dspec.version_req, self.getName()))
return r
except access_common.Unavailable as e:
errors.append(e)
self.dependencies_failed = True
except vcs.VCSError as e:
errors.append(e)
self.dependencies_failed = True
specs = self.getDependencySpecs(target=target)
if not test:
# filter out things that aren't test dependencies if necessary:
specs = [x for x in specs if not x.is_test_dependency]
#dependencies = pool.map(
dependencies = map(
satisfyDep, specs
)
self.installed_dependencies = True
# stable order is important!
return (OrderedDict([((d and d.getName()) or specs[i].name, d) for i, d in enumerate(dependencies)]), errors)
def __getDependenciesRecursiveWithProvider(self,
available_components = None,
search_dirs = None,
target = None,
traverse_links = False,
update_installed = False,
provider = None,
test = False,
_processed = None
):
''' Get installed components using "provider" to find (and possibly
install) components.
This function is called with different provider functions in order
to retrieve a list of all of the dependencies, or install all
dependencies.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
traverse_links:
False (default) or True: whether to recurse into linked
dependencies. You normally want to set this to "True" when
getting a list of dependencies, and False when installing
them (unless the user has explicitly asked dependencies to
be installed in linked components).
provider: None (default) or function:
provider(
dependency_spec,
available_components,
search_dirs,
working_directory,
update_if_installed
)
test:
True, False, 'toplevel': should test-only dependencies be
included (yes, no, or only at this level, not recursively)
'''
def recursionFilter(c):
if not c:
logger.debug('do not recurse into failed component')
# don't recurse into failed components
return False
if c.getName() in _processed:
logger.debug('do not recurse into already processed component: %s' % c)
return False
if c.installedLinked() and not traverse_links:
return False
return True
available_components = self.ensureOrderedDict(available_components)
if search_dirs is None:
search_dirs = []
if _processed is None:
_processed = set()
assert(test in [True, False, 'toplevel'])
search_dirs.append(self.modulesPath())
logger.debug('process %s\nsearch dirs:%s' % (self.getName(), search_dirs))
if self.isTestDependency():
logger.debug("won't provide test dependencies recursively for test dependency %s", self.getName())
test = False
components, errors = self.__getDependenciesWithProvider(
available_components = available_components,
search_dirs = search_dirs,
update_installed = update_installed,
target = target,
provider = provider,
test = test
)
_processed.add(self.getName())
if errors:
errors = ['Failed to satisfy dependencies of %s:' % self.path] + errors
need_recursion = filter(recursionFilter, components.values())
available_components.update(components)
logger.debug('processed %s\nneed recursion: %s\navailable:%s\nsearch dirs:%s' % (self.getName(), need_recursion, available_components, search_dirs))
if test == 'toplevel':
test = False
# NB: can't perform this step in parallel, since the available
# components list must be updated in order
for c in need_recursion:
dep_components, dep_errors = c.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test,
_processed = _processed
)
available_components.update(dep_components)
components.update(dep_components)
errors += dep_errors
return (components, errors)
def provideInstalled(self,
dspec,
available_components,
search_dirs,
working_directory,
update_if_installed,
dep_of_name
):
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
r = access.satisfyVersionFromSearchPaths(dspec.name, dspec.version_req, search_dirs, update_if_installed)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# return a module initialised to the path where we would have
# installed this module, so that it's possible to use
# getDependenciesRecursive to find a list of failed dependencies,
# as well as just available ones
# note that this Component object may still be valid (usable to
# attempt a build), if a different version was previously installed
# on disk at this location (which means we need to check if the
# existing version is linked)
default_path = os.path.join(self.modulesPath(), dspec.name)
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path)
)
return r
def getDependenciesRecursive(self,
available_components = None,
processed = None,
search_dirs = None,
target = None,
available_only = False,
test = False
):
''' Get available and already installed components, don't check for
remotely available components. See also
satisfyDependenciesRecursive()
Returns {component_name:component}
'''
components, errors = self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = True,
update_installed = False,
provider = self.provideInstalled,
test = test
)
for error in errors:
logger.error(error)
if available_only:
components = OrderedDict((k, v) for k, v in components.items() if v)
return components
def modulesPath(self):
return os.path.join(self.path, Modules_Folder)
def targetsPath(self):
return os.path.join(self.path, Targets_Folder)
def satisfyDependenciesRecursive(
self,
available_components = None,
search_dirs = None,
update_installed = False,
traverse_links = False,
target = None,
test = False
):
''' Retrieve and install all the dependencies of this component and its
dependencies, recursively, or satisfy them from a collection of
available_components or from disk.
Returns
=======
(components, errors)
components: dictionary of name:Component
errors: sequence of errors
Parameters
==========
available_components:
None (default) or a dictionary of name:component. This is
searched before searching directories or fetching remote
components
search_dirs:
None (default), or sequence of directories to search for
already installed, (but not yet loaded) components. Used so
that manually installed or linked components higher up the
dependency tree are found by their users lower down.
These directories are searched in order, and finally the
current directory is checked.
update_installed:
False (default), or True: whether to check the available
versions of installed components, and update if a newer
version is available.
traverse_links:
False (default) or True: whether to recurse into linked
dependencies when updating/installing.
target:
None (default), or a Target object. If specified the target
name and it's similarTo list will be used in resolving
dependencies. If None, then only target-independent
dependencies will be installed
test:
True, False, or 'toplevel: should test-only dependencies be
installed? (yes, no, or only for this module, not its
dependencies).
'''
def provider(
dspec,
available_components,
search_dirs,
working_directory,
update_if_installed,
dep_of_name=None
):
r = access.satisfyFromAvailable(dspec.name, available_components)
if r:
if r.isTestDependency() and not dspec.is_test_dependency:
logger.debug('test dependency subsequently occurred as real dependency: %s', r.getName())
r.setTestDependency(False)
return r
r = access.satisfyVersionFromSearchPaths(dspec.name, dspec.version_req, search_dirs, update_if_installed)
if r:
r.setTestDependency(dspec.is_test_dependency)
return r
# before resorting to install this module, check if we have an
# existing linked module (which wasn't picked up because it didn't
# match the version specification) - if we do, then we shouldn't
# try to install, but should return that anyway:
default_path = os.path.join(self.modulesPath(), dspec.name)
if fsutils.isLink(default_path):
r = Component(
default_path,
test_dependency = dspec.is_test_dependency,
installed_linked = fsutils.isLink(default_path)
)
if r:
assert(r.installedLinked())
return r
else:
logger.error('linked module %s is invalid: %s', dspec.name, r.getError())
return r
r = access.satisfyVersionByInstalling(dspec.name, dspec.version_req, self.modulesPath())
if not r:
logger.error('could not install %s' % name)
if r is not None:
r.setTestDependency(dspec.is_test_dependency)
return r
return self.__getDependenciesRecursiveWithProvider(
available_components = available_components,
search_dirs = search_dirs,
target = target,
traverse_links = traverse_links,
update_installed = update_installed,
provider = provider,
test = test
)
def satisfyTarget(self, target_name_and_version, update_installed=False):
''' Ensure that the specified target name (and optionally version,
github ref or URL) is installed in the targets directory of the
current component
'''
application_dir = None
if self.isApplication():
application_dir = self.path
return target.getDerivedTarget(
target_name_and_version,
self.targetsPath(),
application_dir = application_dir,
update_installed = update_installed
)
def installedDependencies(self):
''' Return true if satisfyDependencies has been called.
Note that this is slightly different to when all of the
dependencies are actually satisfied, but can be used as if it means
that.
'''
return self.installed_dependencies
def isApplication(self):
''' Return true if this module is an application instead of a reusable
library '''
return bool(len(self.getBinaries()))
def getBinaries(self):
''' Return a dictionary of binaries to compile: {"dirname":"exename"},
this is used when automatically generating CMakeLists '''
# the module.json syntax is a subset of the package.json syntax: a
# single string that defines the source directory to use to build an
# executable with the same name as the component. This may be extended
# to include the rest of the npm syntax in future (map of source-dir to
# exe name).
if 'bin' in self.description:
return {self.description['bin']: self.getName()}
else:
return {}
def licenses(self):
''' Return a list of licenses that apply to this module. (Strings,
which may be SPDX identifiers)
'''
if 'license' in self.description:
return [self.description['license']]
else:
return [x['type'] for x in self.description['licenses']]
def getExtraIncludes(self):
''' Some components must export whole directories full of headers into
the search path. This is really really bad, and they shouldn't do
it, but support is provided as a concession to compatibility.
'''
if 'extraIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraIncludes']]
else:
return []
def getExtraSysIncludes(self):
''' Some components (e.g. libc) must export directories of header files
into the system include search path. They do this by adding a
'extraSysIncludes' : [ array of directories ] field in their
package description. This function returns the list of directories
(or an empty list), if it doesn't exist.
'''
if 'extraSysIncludes' in self.description:
return [os.path.normpath(x) for x in self.description['extraSysIncludes']]
else:
return []
def getRegistryNamespace(self):
return Registry_Namespace
def setTestDependency(self, status):
self.is_test_dependency = status
def isTestDependency(self):
return self.is_test_dependency
def __saveSpecForComponent(self, component):
version = component.getVersion()
if version.isTip():
spec = '*'
elif version.major() == 0:
# for 0.x.x versions, when we save a dependency we don't use ^0.x.x
# a that would peg to the exact version - instead we use ~ to peg
# to the same minor version
spec = '~' + str(version)
else:
spec = '^' + str(version)
return spec
def saveDependency(self, component, spec=None):
if not 'dependencies' in self.description:
self.description['dependencies'] = OrderedDict()
if spec is None:
spec = self.__saveSpecForComponent(component)
self.description['dependencies'][component.getName()] = spec
return spec
def removeDependency(self, component):
if not component in self.description.get('dependencies', {}):
logger.error('%s is not listed as a dependency', component)
return False
del self.description['dependencies'][component]
return True
def getTestFilterCommand(self):
''' return the test-output filtering command (array of strings) that
this module defines, if any. '''
if 'scripts' in self.description and 'testReporter' in self.description['scripts']:
return self.description['scripts']['testReporter']
else:
return None
| apache-2.0 |
yousafsyed/casperjs | bin/Lib/test/test_email/test_headerregistry.py | 72 | 56910 | import datetime
import textwrap
import unittest
from email import errors
from email import policy
from email.message import Message
from test.test_email import TestEmailBase, parameterize
from email import headerregistry
from email.headerregistry import Address, Group
DITTO = object()
class TestHeaderRegistry(TestEmailBase):
def test_arbitrary_name_unstructured(self):
factory = headerregistry.HeaderRegistry()
h = factory('foobar', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_name_case_ignored(self):
factory = headerregistry.HeaderRegistry()
# Whitebox check that test is valid
self.assertNotIn('Subject', factory.registry)
h = factory('Subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UniqueUnstructuredHeader)
class FooBase:
def __init__(self, *args, **kw):
pass
def test_override_default_base_class(self):
factory = headerregistry.HeaderRegistry(base_class=self.FooBase)
h = factory('foobar', 'test')
self.assertIsInstance(h, self.FooBase)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
class FooDefault:
parse = headerregistry.UnstructuredHeader.parse
def test_override_default_class(self):
factory = headerregistry.HeaderRegistry(default_class=self.FooDefault)
h = factory('foobar', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, self.FooDefault)
def test_override_default_class_only_overrides_default(self):
factory = headerregistry.HeaderRegistry(default_class=self.FooDefault)
h = factory('subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UniqueUnstructuredHeader)
def test_dont_use_default_map(self):
factory = headerregistry.HeaderRegistry(use_default_map=False)
h = factory('subject', 'test')
self.assertIsInstance(h, headerregistry.BaseHeader)
self.assertIsInstance(h, headerregistry.UnstructuredHeader)
def test_map_to_type(self):
factory = headerregistry.HeaderRegistry()
h1 = factory('foobar', 'test')
factory.map_to_type('foobar', headerregistry.UniqueUnstructuredHeader)
h2 = factory('foobar', 'test')
self.assertIsInstance(h1, headerregistry.BaseHeader)
self.assertIsInstance(h1, headerregistry.UnstructuredHeader)
self.assertIsInstance(h2, headerregistry.BaseHeader)
self.assertIsInstance(h2, headerregistry.UniqueUnstructuredHeader)
class TestHeaderBase(TestEmailBase):
factory = headerregistry.HeaderRegistry()
def make_header(self, name, value):
return self.factory(name, value)
class TestBaseHeaderFeatures(TestHeaderBase):
def test_str(self):
h = self.make_header('subject', 'this is a test')
self.assertIsInstance(h, str)
self.assertEqual(h, 'this is a test')
self.assertEqual(str(h), 'this is a test')
def test_substr(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h[5:7], 'is')
def test_has_name(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h.name, 'subject')
def _test_attr_ro(self, attr):
h = self.make_header('subject', 'this is a test')
with self.assertRaises(AttributeError):
setattr(h, attr, 'foo')
def test_name_read_only(self):
self._test_attr_ro('name')
def test_defects_read_only(self):
self._test_attr_ro('defects')
def test_defects_is_tuple(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(len(h.defects), 0)
self.assertIsInstance(h.defects, tuple)
# Make sure it is still true when there are defects.
h = self.make_header('date', '')
self.assertEqual(len(h.defects), 1)
self.assertIsInstance(h.defects, tuple)
# XXX: FIXME
#def test_CR_in_value(self):
# # XXX: this also re-raises the issue of embedded headers,
# # need test and solution for that.
# value = '\r'.join(['this is', ' a test'])
# h = self.make_header('subject', value)
# self.assertEqual(h, value)
# self.assertDefectsEqual(h.defects, [errors.ObsoleteHeaderDefect])
@parameterize
class TestUnstructuredHeader(TestHeaderBase):
def string_as_value(self,
source,
decoded,
*args):
l = len(args)
defects = args[0] if l>0 else []
header = 'Subject:' + (' ' if source else '')
folded = header + (args[1] if l>1 else source) + '\n'
h = self.make_header('Subject', source)
self.assertEqual(h, decoded)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h.fold(policy=policy.default), folded)
string_params = {
'rfc2047_simple_quopri': (
'=?utf-8?q?this_is_a_test?=',
'this is a test',
[],
'this is a test'),
'rfc2047_gb2312_base64': (
'=?gb2312?b?1eLKx9bQzsSy4srUo6E=?=',
'\u8fd9\u662f\u4e2d\u6587\u6d4b\u8bd5\uff01',
[],
'=?utf-8?b?6L+Z5piv5Lit5paH5rWL6K+V77yB?='),
'rfc2047_simple_nonascii_quopri': (
'=?utf-8?q?=C3=89ric?=',
'Éric'),
'rfc2047_quopri_with_regular_text': (
'The =?utf-8?q?=C3=89ric=2C?= Himself',
'The Éric, Himself'),
}
@parameterize
class TestDateHeader(TestHeaderBase):
datestring = 'Sun, 23 Sep 2001 20:10:55 -0700'
utcoffset = datetime.timedelta(hours=-7)
tz = datetime.timezone(utcoffset)
dt = datetime.datetime(2001, 9, 23, 20, 10, 55, tzinfo=tz)
def test_parse_date(self):
h = self.make_header('date', self.datestring)
self.assertEqual(h, self.datestring)
self.assertEqual(h.datetime, self.dt)
self.assertEqual(h.datetime.utcoffset(), self.utcoffset)
self.assertEqual(h.defects, ())
def test_set_from_datetime(self):
h = self.make_header('date', self.dt)
self.assertEqual(h, self.datestring)
self.assertEqual(h.datetime, self.dt)
self.assertEqual(h.defects, ())
def test_date_header_properties(self):
h = self.make_header('date', self.datestring)
self.assertIsInstance(h, headerregistry.UniqueDateHeader)
self.assertEqual(h.max_count, 1)
self.assertEqual(h.defects, ())
def test_resent_date_header_properties(self):
h = self.make_header('resent-date', self.datestring)
self.assertIsInstance(h, headerregistry.DateHeader)
self.assertEqual(h.max_count, None)
self.assertEqual(h.defects, ())
def test_no_value_is_defect(self):
h = self.make_header('date', '')
self.assertEqual(len(h.defects), 1)
self.assertIsInstance(h.defects[0], errors.HeaderMissingRequiredValue)
def test_datetime_read_only(self):
h = self.make_header('date', self.datestring)
with self.assertRaises(AttributeError):
h.datetime = 'foo'
def test_set_date_header_from_datetime(self):
m = Message(policy=policy.default)
m['Date'] = self.dt
self.assertEqual(m['Date'], self.datestring)
self.assertEqual(m['Date'].datetime, self.dt)
@parameterize
class TestContentTypeHeader(TestHeaderBase):
def content_type_as_value(self,
source,
content_type,
maintype,
subtype,
*args):
l = len(args)
parmdict = args[0] if l>0 else {}
defects = args[1] if l>1 else []
decoded = args[2] if l>2 and args[2] is not DITTO else source
header = 'Content-Type:' + ' ' if source else ''
folded = args[3] if l>3 else header + source + '\n'
h = self.make_header('Content-Type', source)
self.assertEqual(h.content_type, content_type)
self.assertEqual(h.maintype, maintype)
self.assertEqual(h.subtype, subtype)
self.assertEqual(h.params, parmdict)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
content_type_params = {
# Examples from RFC 2045.
'RFC_2045_1': (
'text/plain; charset=us-ascii (Plain text)',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
'RFC_2045_2': (
'text/plain; charset=us-ascii',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
'RFC_2045_3': (
'text/plain; charset="us-ascii"',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'}),
# RFC 2045 5.2 says syntactically invalid values are to be treated as
# text/plain.
'no_subtype_in_content_type': (
'text/',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'no_slash_in_content_type': (
'foo',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'junk_text_in_content_type': (
'<crazy "stuff">',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
'too_many_slashes_in_content_type': (
'image/jpeg/foo',
'text/plain',
'text',
'plain',
{},
[errors.InvalidHeaderDefect]),
# But unknown names are OK. We could make non-IANA names a defect, but
# by not doing so we make ourselves future proof. The fact that they
# are unknown will be detectable by the fact that they don't appear in
# the mime_registry...and the application is free to extend that list
# to handle them even if the core library doesn't.
'unknown_content_type': (
'bad/names',
'bad/names',
'bad',
'names'),
# The content type is case insensitive, and CFWS is ignored.
'mixed_case_content_type': (
'ImAge/JPeg',
'image/jpeg',
'image',
'jpeg'),
'spaces_in_content_type': (
' text / plain ',
'text/plain',
'text',
'plain'),
'cfws_in_content_type': (
'(foo) text (bar)/(baz)plain(stuff)',
'text/plain',
'text',
'plain'),
# test some parameters (more tests could be added for parameters
# associated with other content types, but since parameter parsing is
# generic they would be redundant for the current implementation).
'charset_param': (
'text/plain; charset="utf-8"',
'text/plain',
'text',
'plain',
{'charset': 'utf-8'}),
'capitalized_charset': (
'text/plain; charset="US-ASCII"',
'text/plain',
'text',
'plain',
{'charset': 'US-ASCII'}),
'unknown_charset': (
'text/plain; charset="fOo"',
'text/plain',
'text',
'plain',
{'charset': 'fOo'}),
'capitalized_charset_param_name_and_comment': (
'text/plain; (interjection) Charset="utf-8"',
'text/plain',
'text',
'plain',
{'charset': 'utf-8'},
[],
# Should the parameter name be lowercased here?
'text/plain; Charset="utf-8"'),
# Since this is pretty much the ur-mimeheader, we'll put all the tests
# that exercise the parameter parsing and formatting here.
#
# XXX: question: is minimal quoting preferred?
'unquoted_param_value': (
'text/plain; title=foo',
'text/plain',
'text',
'plain',
{'title': 'foo'},
[],
'text/plain; title="foo"'),
'param_value_with_tspecials': (
'text/plain; title="(bar)foo blue"',
'text/plain',
'text',
'plain',
{'title': '(bar)foo blue'}),
'param_with_extra_quoted_whitespace': (
'text/plain; title=" a loong way \t home "',
'text/plain',
'text',
'plain',
{'title': ' a loong way \t home '}),
'bad_params': (
'blarg; baz; boo',
'text/plain',
'text',
'plain',
{'baz': '', 'boo': ''},
[errors.InvalidHeaderDefect]*3),
'spaces_around_param_equals': (
'Multipart/mixed; boundary = "CPIMSSMTPC06p5f3tG"',
'multipart/mixed',
'multipart',
'mixed',
{'boundary': 'CPIMSSMTPC06p5f3tG'},
[],
'Multipart/mixed; boundary="CPIMSSMTPC06p5f3tG"'),
'spaces_around_semis': (
('image/jpeg; name="wibble.JPG" ; x-mac-type="4A504547" ; '
'x-mac-creator="474B4F4E"'),
'image/jpeg',
'image',
'jpeg',
{'name': 'wibble.JPG',
'x-mac-type': '4A504547',
'x-mac-creator': '474B4F4E'},
[],
('image/jpeg; name="wibble.JPG"; x-mac-type="4A504547"; '
'x-mac-creator="474B4F4E"'),
# XXX: it could be that we will eventually prefer to fold starting
# from the decoded value, in which case these spaces and similar
# spaces in other tests will be wrong.
('Content-Type: image/jpeg; name="wibble.JPG" ; '
'x-mac-type="4A504547" ;\n'
' x-mac-creator="474B4F4E"\n'),
),
'semis_inside_quotes': (
'image/jpeg; name="Jim&&Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': 'Jim&&Jill'}),
'single_quotes_inside_quotes': (
'image/jpeg; name="Jim \'Bob\' Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': "Jim 'Bob' Jill"}),
'double_quotes_inside_quotes': (
r'image/jpeg; name="Jim \"Bob\" Jill"',
'image/jpeg',
'image',
'jpeg',
{'name': 'Jim "Bob" Jill'},
[],
r'image/jpeg; name="Jim \"Bob\" Jill"'),
# XXX: This test works except for the refolding of the header. I'll
# deal with that bug when I deal with the other folding bugs.
#'non_ascii_in_params': (
# ('foo\xa7/bar; b\xa7r=two; '
# 'baz=thr\xa7e'.encode('latin-1').decode('us-ascii',
# 'surrogateescape')),
# 'foo\uFFFD/bar',
# 'foo\uFFFD',
# 'bar',
# {'b\uFFFDr': 'two', 'baz': 'thr\uFFFDe'},
# [errors.UndecodableBytesDefect]*3,
# 'foo�/bar; b�r="two"; baz="thr�e"',
# ),
# RFC 2231 parameter tests.
'rfc2231_segmented_normal_values': (
'image/jpeg; name*0="abc"; name*1=".html"',
'image/jpeg',
'image',
'jpeg',
{'name': "abc.html"},
[],
'image/jpeg; name="abc.html"'),
'quotes_inside_rfc2231_value': (
r'image/jpeg; bar*0="baz\"foobar"; bar*1="\"baz"',
'image/jpeg',
'image',
'jpeg',
{'bar': 'baz"foobar"baz'},
[],
r'image/jpeg; bar="baz\"foobar\"baz"'),
# XXX: This test works except for the refolding of the header. I'll
# deal with that bug when I deal with the other folding bugs.
#'non_ascii_rfc2231_value': (
# ('text/plain; charset=us-ascii; '
# "title*=us-ascii'en'This%20is%20"
# 'not%20f\xa7n').encode('latin-1').decode('us-ascii',
# 'surrogateescape'),
# 'text/plain',
# 'text',
# 'plain',
# {'charset': 'us-ascii', 'title': 'This is not f\uFFFDn'},
# [errors.UndecodableBytesDefect],
# 'text/plain; charset="us-ascii"; title="This is not f�n"'),
'rfc2231_encoded_charset': (
'text/plain; charset*=ansi-x3.4-1968\'\'us-ascii',
'text/plain',
'text',
'plain',
{'charset': 'us-ascii'},
[],
'text/plain; charset="us-ascii"'),
# This follows the RFC: no double quotes around encoded values.
'rfc2231_encoded_no_double_quotes': (
("text/plain;"
"\tname*0*=''This%20is%20;"
"\tname*1*=%2A%2A%2Afun%2A%2A%2A%20;"
'\tname*2="is it not.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is ***fun*** is it not.pdf'},
[],
'text/plain; name="This is ***fun*** is it not.pdf"',
('Content-Type: text/plain;\tname*0*=\'\'This%20is%20;\n'
'\tname*1*=%2A%2A%2Afun%2A%2A%2A%20;\tname*2="is it not.pdf"\n'),
),
# Make sure we also handle it if there are spurious double quotes.
'rfc2231_encoded_with_double_quotes': (
("text/plain;"
'\tname*0*="us-ascii\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it not.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it not.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it not.pdf"',
('Content-Type: text/plain;\t'
'name*0*="us-ascii\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it not.pdf"\n'),
),
'rfc2231_single_quote_inside_double_quotes': (
('text/plain; charset=us-ascii;'
'\ttitle*0*="us-ascii\'en\'This%20is%20really%20";'
'\ttitle*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\ttitle*2="isn\'t it!"'),
'text/plain',
'text',
'plain',
{'charset': 'us-ascii', 'title': "This is really ***fun*** isn't it!"},
[errors.InvalidHeaderDefect]*2,
('text/plain; charset="us-ascii"; '
'title="This is really ***fun*** isn\'t it!"'),
('Content-Type: text/plain; charset=us-ascii;\n'
'\ttitle*0*="us-ascii\'en\'This%20is%20really%20";\n'
'\ttitle*1*="%2A%2A%2Afun%2A%2A%2A%20";\ttitle*2="isn\'t it!"\n'),
),
'rfc2231_single_quote_in_value_with_charset_and_lang': (
('application/x-foo;'
"\tname*0*=\"us-ascii'en-us'Frank's\"; name*1*=\" Document\""),
'application/x-foo',
'application',
'x-foo',
{'name': "Frank's Document"},
[errors.InvalidHeaderDefect]*2,
'application/x-foo; name="Frank\'s Document"',
('Content-Type: application/x-foo;\t'
'name*0*="us-ascii\'en-us\'Frank\'s";\n'
' name*1*=" Document"\n'),
),
'rfc2231_single_quote_in_non_encoded_value': (
('application/x-foo;'
"\tname*0=\"us-ascii'en-us'Frank's\"; name*1=\" Document\""),
'application/x-foo',
'application',
'x-foo',
{'name': "us-ascii'en-us'Frank's Document"},
[],
'application/x-foo; name="us-ascii\'en-us\'Frank\'s Document"',
('Content-Type: application/x-foo;\t'
'name*0="us-ascii\'en-us\'Frank\'s";\n'
' name*1=" Document"\n'),
),
'rfc2231_no_language_or_charset': (
'text/plain; NAME*0*=english_is_the_default.html',
'text/plain',
'text',
'plain',
{'name': 'english_is_the_default.html'},
[errors.InvalidHeaderDefect],
'text/plain; NAME="english_is_the_default.html"'),
'rfc2231_encoded_no_charset': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
# XXX: see below...the first name line here should be *0 not *0*.
'rfc2231_partly_encoded': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more ***fun*** is it.pdf'},
[errors.InvalidHeaderDefect]*2,
'text/plain; name="This is even more ***fun*** is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1*="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
'rfc2231_partly_encoded_2': (
("text/plain;"
'\tname*0*="\'\'This%20is%20even%20more%20";'
'\tname*1="%2A%2A%2Afun%2A%2A%2A%20";'
'\tname*2="is it.pdf"'),
'text/plain',
'text',
'plain',
{'name': 'This is even more %2A%2A%2Afun%2A%2A%2A%20is it.pdf'},
[errors.InvalidHeaderDefect],
'text/plain; name="This is even more %2A%2A%2Afun%2A%2A%2A%20is it.pdf"',
('Content-Type: text/plain;\t'
'name*0*="\'\'This%20is%20even%20more%20";\n'
'\tname*1="%2A%2A%2Afun%2A%2A%2A%20";\tname*2="is it.pdf"\n'),
),
'rfc2231_unknown_charset_treated_as_ascii': (
"text/plain; name*0*=bogus'xx'ascii_is_the_default",
'text/plain',
'text',
'plain',
{'name': 'ascii_is_the_default'},
[],
'text/plain; name="ascii_is_the_default"'),
'rfc2231_bad_character_in_charset_parameter_value': (
"text/plain; charset*=ascii''utf-8%F1%F2%F3",
'text/plain',
'text',
'plain',
{'charset': 'utf-8\uFFFD\uFFFD\uFFFD'},
[errors.UndecodableBytesDefect],
'text/plain; charset="utf-8\uFFFD\uFFFD\uFFFD"'),
'rfc2231_utf_8_in_supposedly_ascii_charset_parameter_value': (
"text/plain; charset*=ascii''utf-8%E2%80%9D",
'text/plain',
'text',
'plain',
{'charset': 'utf-8”'},
[errors.UndecodableBytesDefect],
'text/plain; charset="utf-8”"',
),
# XXX: if the above were *re*folded, it would get tagged as utf-8
# instead of ascii in the param, since it now contains non-ASCII.
'rfc2231_encoded_then_unencoded_segments': (
('application/x-foo;'
'\tname*0*="us-ascii\'en-us\'My";'
'\tname*1=" Document";'
'\tname*2=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': 'My Document For You'},
[errors.InvalidHeaderDefect],
'application/x-foo; name="My Document For You"',
('Content-Type: application/x-foo;\t'
'name*0*="us-ascii\'en-us\'My";\n'
'\tname*1=" Document";\tname*2=" For You"\n'),
),
# My reading of the RFC is that this is an invalid header. The RFC
# says that if charset and language information is given, the first
# segment *must* be encoded.
'rfc2231_unencoded_then_encoded_segments': (
('application/x-foo;'
'\tname*0=us-ascii\'en-us\'My;'
'\tname*1*=" Document";'
'\tname*2*=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': 'My Document For You'},
[errors.InvalidHeaderDefect]*3,
'application/x-foo; name="My Document For You"',
("Content-Type: application/x-foo;\tname*0=us-ascii'en-us'My;\t"
# XXX: the newline is in the wrong place, come back and fix
# this when the rest of tests pass.
'name*1*=" Document"\n;'
'\tname*2*=" For You"\n'),
),
# XXX: I would say this one should default to ascii/en for the
# "encoded" segment, since the first segment is not encoded and is
# in double quotes, making the value a valid non-encoded string. The
# old parser decodes this just like the previous case, which may be the
# better Postel rule, but could equally result in borking headers that
# intentionally have quoted quotes in them. We could get this 98%
# right if we treat it as a quoted string *unless* it matches the
# charset'lang'value pattern exactly *and* there is at least one
# encoded segment. Implementing that algorithm will require some
# refactoring, so I haven't done it (yet).
'rfc2231_qouted_unencoded_then_encoded_segments': (
('application/x-foo;'
'\tname*0="us-ascii\'en-us\'My";'
'\tname*1*=" Document";'
'\tname*2*=" For You"'),
'application/x-foo',
'application',
'x-foo',
{'name': "us-ascii'en-us'My Document For You"},
[errors.InvalidHeaderDefect]*2,
'application/x-foo; name="us-ascii\'en-us\'My Document For You"',
('Content-Type: application/x-foo;\t'
'name*0="us-ascii\'en-us\'My";\n'
'\tname*1*=" Document";\tname*2*=" For You"\n'),
),
}
@parameterize
class TestContentTransferEncoding(TestHeaderBase):
def cte_as_value(self,
source,
cte,
*args):
l = len(args)
defects = args[0] if l>0 else []
decoded = args[1] if l>1 and args[1] is not DITTO else source
header = 'Content-Transfer-Encoding:' + ' ' if source else ''
folded = args[2] if l>2 else header + source + '\n'
h = self.make_header('Content-Transfer-Encoding', source)
self.assertEqual(h.cte, cte)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
cte_params = {
'RFC_2183_1': (
'base64',
'base64',),
'no_value': (
'',
'7bit',
[errors.HeaderMissingRequiredValue],
'',
'Content-Transfer-Encoding:\n',
),
'junk_after_cte': (
'7bit and a bunch more',
'7bit',
[errors.InvalidHeaderDefect]),
}
@parameterize
class TestContentDisposition(TestHeaderBase):
def content_disp_as_value(self,
source,
content_disposition,
*args):
l = len(args)
parmdict = args[0] if l>0 else {}
defects = args[1] if l>1 else []
decoded = args[2] if l>2 and args[2] is not DITTO else source
header = 'Content-Disposition:' + ' ' if source else ''
folded = args[3] if l>3 else header + source + '\n'
h = self.make_header('Content-Disposition', source)
self.assertEqual(h.content_disposition, content_disposition)
self.assertEqual(h.params, parmdict)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h, decoded)
self.assertEqual(h.fold(policy=policy.default), folded)
content_disp_params = {
# Examples from RFC 2183.
'RFC_2183_1': (
'inline',
'inline',),
'RFC_2183_2': (
('attachment; filename=genome.jpeg;'
' modification-date="Wed, 12 Feb 1997 16:29:51 -0500";'),
'attachment',
{'filename': 'genome.jpeg',
'modification-date': 'Wed, 12 Feb 1997 16:29:51 -0500'},
[],
('attachment; filename="genome.jpeg"; '
'modification-date="Wed, 12 Feb 1997 16:29:51 -0500"'),
('Content-Disposition: attachment; filename=genome.jpeg;\n'
' modification-date="Wed, 12 Feb 1997 16:29:51 -0500";\n'),
),
'no_value': (
'',
None,
{},
[errors.HeaderMissingRequiredValue],
'',
'Content-Disposition:\n'),
'invalid_value': (
'ab./k',
'ab.',
{},
[errors.InvalidHeaderDefect]),
'invalid_value_with_params': (
'ab./k; filename="foo"',
'ab.',
{'filename': 'foo'},
[errors.InvalidHeaderDefect]),
}
@parameterize
class TestMIMEVersionHeader(TestHeaderBase):
def version_string_as_MIME_Version(self,
source,
decoded,
version,
major,
minor,
defects):
h = self.make_header('MIME-Version', source)
self.assertEqual(h, decoded)
self.assertEqual(h.version, version)
self.assertEqual(h.major, major)
self.assertEqual(h.minor, minor)
self.assertDefectsEqual(h.defects, defects)
if source:
source = ' ' + source
self.assertEqual(h.fold(policy=policy.default),
'MIME-Version:' + source + '\n')
version_string_params = {
# Examples from the RFC.
'RFC_2045_1': (
'1.0',
'1.0',
'1.0',
1,
0,
[]),
'RFC_2045_2': (
'1.0 (produced by MetaSend Vx.x)',
'1.0 (produced by MetaSend Vx.x)',
'1.0',
1,
0,
[]),
'RFC_2045_3': (
'(produced by MetaSend Vx.x) 1.0',
'(produced by MetaSend Vx.x) 1.0',
'1.0',
1,
0,
[]),
'RFC_2045_4': (
'1.(produced by MetaSend Vx.x)0',
'1.(produced by MetaSend Vx.x)0',
'1.0',
1,
0,
[]),
# Other valid values.
'1_1': (
'1.1',
'1.1',
'1.1',
1,
1,
[]),
'2_1': (
'2.1',
'2.1',
'2.1',
2,
1,
[]),
'whitespace': (
'1 .0',
'1 .0',
'1.0',
1,
0,
[]),
'leading_trailing_whitespace_ignored': (
' 1.0 ',
' 1.0 ',
'1.0',
1,
0,
[]),
# Recoverable invalid values. We can recover here only because we
# already have a valid value by the time we encounter the garbage.
# Anywhere else, and we don't know where the garbage ends.
'non_comment_garbage_after': (
'1.0 <abc>',
'1.0 <abc>',
'1.0',
1,
0,
[errors.InvalidHeaderDefect]),
# Unrecoverable invalid values. We *could* apply more heuristics to
# get something out of the first two, but doing so is not worth the
# effort.
'non_comment_garbage_before': (
'<abc> 1.0',
'<abc> 1.0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'non_comment_garbage_inside': (
'1.<abc>0',
'1.<abc>0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'two_periods': (
'1..0',
'1..0',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'2_x': (
'2.x',
'2.x',
None, # This could be 2, but it seems safer to make it None.
None,
None,
[errors.InvalidHeaderDefect]),
'foo': (
'foo',
'foo',
None,
None,
None,
[errors.InvalidHeaderDefect]),
'missing': (
'',
'',
None,
None,
None,
[errors.HeaderMissingRequiredValue]),
}
@parameterize
class TestAddressHeader(TestHeaderBase):
example_params = {
'empty':
('<>',
[errors.InvalidHeaderDefect],
'<>',
'',
'<>',
'',
'',
None),
'address_only':
('zippy@pinhead.com',
[],
'zippy@pinhead.com',
'',
'zippy@pinhead.com',
'zippy',
'pinhead.com',
None),
'name_and_address':
('Zaphrod Beblebrux <zippy@pinhead.com>',
[],
'Zaphrod Beblebrux <zippy@pinhead.com>',
'Zaphrod Beblebrux',
'zippy@pinhead.com',
'zippy',
'pinhead.com',
None),
'quoted_local_part':
('Zaphrod Beblebrux <"foo bar"@pinhead.com>',
[],
'Zaphrod Beblebrux <"foo bar"@pinhead.com>',
'Zaphrod Beblebrux',
'"foo bar"@pinhead.com',
'foo bar',
'pinhead.com',
None),
'quoted_parens_in_name':
(r'"A \(Special\) Person" <person@dom.ain>',
[],
'"A (Special) Person" <person@dom.ain>',
'A (Special) Person',
'person@dom.ain',
'person',
'dom.ain',
None),
'quoted_backslashes_in_name':
(r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>',
[],
r'"Arthur \\Backslash\\ Foobar" <person@dom.ain>',
r'Arthur \Backslash\ Foobar',
'person@dom.ain',
'person',
'dom.ain',
None),
'name_with_dot':
('John X. Doe <jxd@example.com>',
[errors.ObsoleteHeaderDefect],
'"John X. Doe" <jxd@example.com>',
'John X. Doe',
'jxd@example.com',
'jxd',
'example.com',
None),
'quoted_strings_in_local_part':
('""example" example"@example.com',
[errors.InvalidHeaderDefect]*3,
'"example example"@example.com',
'',
'"example example"@example.com',
'example example',
'example.com',
None),
'escaped_quoted_strings_in_local_part':
(r'"\"example\" example"@example.com',
[],
r'"\"example\" example"@example.com',
'',
r'"\"example\" example"@example.com',
r'"example" example',
'example.com',
None),
'escaped_escapes_in_local_part':
(r'"\\"example\\" example"@example.com',
[errors.InvalidHeaderDefect]*5,
r'"\\example\\\\ example"@example.com',
'',
r'"\\example\\\\ example"@example.com',
r'\example\\ example',
'example.com',
None),
'spaces_in_unquoted_local_part_collapsed':
('merwok wok @example.com',
[errors.InvalidHeaderDefect]*2,
'"merwok wok"@example.com',
'',
'"merwok wok"@example.com',
'merwok wok',
'example.com',
None),
'spaces_around_dots_in_local_part_removed':
('merwok. wok . wok@example.com',
[errors.ObsoleteHeaderDefect],
'merwok.wok.wok@example.com',
'',
'merwok.wok.wok@example.com',
'merwok.wok.wok',
'example.com',
None),
'rfc2047_atom_is_decoded':
('=?utf-8?q?=C3=89ric?= <foo@example.com>',
[],
'Éric <foo@example.com>',
'Éric',
'foo@example.com',
'foo',
'example.com',
None),
'rfc2047_atom_in_phrase_is_decoded':
('The =?utf-8?q?=C3=89ric=2C?= Himself <foo@example.com>',
[],
'"The Éric, Himself" <foo@example.com>',
'The Éric, Himself',
'foo@example.com',
'foo',
'example.com',
None),
'rfc2047_atom_in_quoted_string_is_decoded':
('"=?utf-8?q?=C3=89ric?=" <foo@example.com>',
[errors.InvalidHeaderDefect],
'Éric <foo@example.com>',
'Éric',
'foo@example.com',
'foo',
'example.com',
None),
}
# XXX: Need many more examples, and in particular some with names in
# trailing comments, which aren't currently handled. comments in
# general are not handled yet.
def example_as_address(self, source, defects, decoded, display_name,
addr_spec, username, domain, comment):
h = self.make_header('sender', source)
self.assertEqual(h, decoded)
self.assertDefectsEqual(h.defects, defects)
a = h.address
self.assertEqual(str(a), decoded)
self.assertEqual(len(h.groups), 1)
self.assertEqual([a], list(h.groups[0].addresses))
self.assertEqual([a], list(h.addresses))
self.assertEqual(a.display_name, display_name)
self.assertEqual(a.addr_spec, addr_spec)
self.assertEqual(a.username, username)
self.assertEqual(a.domain, domain)
# XXX: we have no comment support yet.
#self.assertEqual(a.comment, comment)
def example_as_group(self, source, defects, decoded, display_name,
addr_spec, username, domain, comment):
source = 'foo: {};'.format(source)
gdecoded = 'foo: {};'.format(decoded) if decoded else 'foo:;'
h = self.make_header('to', source)
self.assertEqual(h, gdecoded)
self.assertDefectsEqual(h.defects, defects)
self.assertEqual(h.groups[0].addresses, h.addresses)
self.assertEqual(len(h.groups), 1)
self.assertEqual(len(h.addresses), 1)
a = h.addresses[0]
self.assertEqual(str(a), decoded)
self.assertEqual(a.display_name, display_name)
self.assertEqual(a.addr_spec, addr_spec)
self.assertEqual(a.username, username)
self.assertEqual(a.domain, domain)
def test_simple_address_list(self):
value = ('Fred <dinsdale@python.org>, foo@example.com, '
'"Harry W. Hastings" <hasty@example.com>')
h = self.make_header('to', value)
self.assertEqual(h, value)
self.assertEqual(len(h.groups), 3)
self.assertEqual(len(h.addresses), 3)
for i in range(3):
self.assertEqual(h.groups[i].addresses[0], h.addresses[i])
self.assertEqual(str(h.addresses[0]), 'Fred <dinsdale@python.org>')
self.assertEqual(str(h.addresses[1]), 'foo@example.com')
self.assertEqual(str(h.addresses[2]),
'"Harry W. Hastings" <hasty@example.com>')
self.assertEqual(h.addresses[2].display_name,
'Harry W. Hastings')
def test_complex_address_list(self):
examples = list(self.example_params.values())
source = ('dummy list:;, another: (empty);,' +
', '.join([x[0] for x in examples[:4]]) + ', ' +
r'"A \"list\"": ' +
', '.join([x[0] for x in examples[4:6]]) + ';,' +
', '.join([x[0] for x in examples[6:]])
)
# XXX: the fact that (empty) disappears here is a potential API design
# bug. We don't currently have a way to preserve comments.
expected = ('dummy list:;, another:;, ' +
', '.join([x[2] for x in examples[:4]]) + ', ' +
r'"A \"list\"": ' +
', '.join([x[2] for x in examples[4:6]]) + ';, ' +
', '.join([x[2] for x in examples[6:]])
)
h = self.make_header('to', source)
self.assertEqual(h.split(','), expected.split(','))
self.assertEqual(h, expected)
self.assertEqual(len(h.groups), 7 + len(examples) - 6)
self.assertEqual(h.groups[0].display_name, 'dummy list')
self.assertEqual(h.groups[1].display_name, 'another')
self.assertEqual(h.groups[6].display_name, 'A "list"')
self.assertEqual(len(h.addresses), len(examples))
for i in range(4):
self.assertIsNone(h.groups[i+2].display_name)
self.assertEqual(str(h.groups[i+2].addresses[0]), examples[i][2])
for i in range(7, 7 + len(examples) - 6):
self.assertIsNone(h.groups[i].display_name)
self.assertEqual(str(h.groups[i].addresses[0]), examples[i-1][2])
for i in range(len(examples)):
self.assertEqual(str(h.addresses[i]), examples[i][2])
self.assertEqual(h.addresses[i].addr_spec, examples[i][4])
def test_address_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.address = 'foo'
def test_addresses_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.addresses = 'foo'
def test_groups_read_only(self):
h = self.make_header('sender', 'abc@xyz.com')
with self.assertRaises(AttributeError):
h.groups = 'foo'
def test_addresses_types(self):
source = 'me <who@example.com>'
h = self.make_header('to', source)
self.assertIsInstance(h.addresses, tuple)
self.assertIsInstance(h.addresses[0], Address)
def test_groups_types(self):
source = 'me <who@example.com>'
h = self.make_header('to', source)
self.assertIsInstance(h.groups, tuple)
self.assertIsInstance(h.groups[0], Group)
def test_set_from_Address(self):
h = self.make_header('to', Address('me', 'foo', 'example.com'))
self.assertEqual(h, 'me <foo@example.com>')
def test_set_from_Address_list(self):
h = self.make_header('to', [Address('me', 'foo', 'example.com'),
Address('you', 'bar', 'example.com')])
self.assertEqual(h, 'me <foo@example.com>, you <bar@example.com>')
def test_set_from_Address_and_Group_list(self):
h = self.make_header('to', [Address('me', 'foo', 'example.com'),
Group('bing', [Address('fiz', 'z', 'b.com'),
Address('zif', 'f', 'c.com')]),
Address('you', 'bar', 'example.com')])
self.assertEqual(h, 'me <foo@example.com>, bing: fiz <z@b.com>, '
'zif <f@c.com>;, you <bar@example.com>')
self.assertEqual(h.fold(policy=policy.default.clone(max_line_length=40)),
'to: me <foo@example.com>,\n'
' bing: fiz <z@b.com>, zif <f@c.com>;,\n'
' you <bar@example.com>\n')
def test_set_from_Group_list(self):
h = self.make_header('to', [Group('bing', [Address('fiz', 'z', 'b.com'),
Address('zif', 'f', 'c.com')])])
self.assertEqual(h, 'bing: fiz <z@b.com>, zif <f@c.com>;')
class TestAddressAndGroup(TestEmailBase):
def _test_attr_ro(self, obj, attr):
with self.assertRaises(AttributeError):
setattr(obj, attr, 'foo')
def test_address_display_name_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'display_name')
def test_address_username_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'username')
def test_address_domain_ro(self):
self._test_attr_ro(Address('foo', 'bar', 'baz'), 'domain')
def test_group_display_name_ro(self):
self._test_attr_ro(Group('foo'), 'display_name')
def test_group_addresses_ro(self):
self._test_attr_ro(Group('foo'), 'addresses')
def test_address_from_username_domain(self):
a = Address('foo', 'bar', 'baz')
self.assertEqual(a.display_name, 'foo')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'foo <bar@baz>')
def test_address_from_addr_spec(self):
a = Address('foo', addr_spec='bar@baz')
self.assertEqual(a.display_name, 'foo')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'foo <bar@baz>')
def test_address_with_no_display_name(self):
a = Address(addr_spec='bar@baz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, 'bar')
self.assertEqual(a.domain, 'baz')
self.assertEqual(a.addr_spec, 'bar@baz')
self.assertEqual(str(a), 'bar@baz')
def test_null_address(self):
a = Address()
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, '<>')
self.assertEqual(str(a), '<>')
def test_domain_only(self):
# This isn't really a valid address.
a = Address(domain='buzz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, 'buzz')
self.assertEqual(a.addr_spec, '@buzz')
self.assertEqual(str(a), '@buzz')
def test_username_only(self):
# This isn't really a valid address.
a = Address(username='buzz')
self.assertEqual(a.display_name, '')
self.assertEqual(a.username, 'buzz')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, 'buzz')
self.assertEqual(str(a), 'buzz')
def test_display_name_only(self):
a = Address('buzz')
self.assertEqual(a.display_name, 'buzz')
self.assertEqual(a.username, '')
self.assertEqual(a.domain, '')
self.assertEqual(a.addr_spec, '<>')
self.assertEqual(str(a), 'buzz <>')
def test_quoting(self):
# Ideally we'd check every special individually, but I'm not up for
# writing that many tests.
a = Address('Sara J.', 'bad name', 'example.com')
self.assertEqual(a.display_name, 'Sara J.')
self.assertEqual(a.username, 'bad name')
self.assertEqual(a.domain, 'example.com')
self.assertEqual(a.addr_spec, '"bad name"@example.com')
self.assertEqual(str(a), '"Sara J." <"bad name"@example.com>')
def test_il8n(self):
a = Address('Éric', 'wok', 'exàmple.com')
self.assertEqual(a.display_name, 'Éric')
self.assertEqual(a.username, 'wok')
self.assertEqual(a.domain, 'exàmple.com')
self.assertEqual(a.addr_spec, 'wok@exàmple.com')
self.assertEqual(str(a), 'Éric <wok@exàmple.com>')
# XXX: there is an API design issue that needs to be solved here.
#def test_non_ascii_username_raises(self):
# with self.assertRaises(ValueError):
# Address('foo', 'wők', 'example.com')
def test_non_ascii_username_in_addr_spec_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec='wők@example.com')
def test_address_addr_spec_and_username_raises(self):
with self.assertRaises(TypeError):
Address('foo', username='bing', addr_spec='bar@baz')
def test_address_addr_spec_and_domain_raises(self):
with self.assertRaises(TypeError):
Address('foo', domain='bing', addr_spec='bar@baz')
def test_address_addr_spec_and_username_and_domain_raises(self):
with self.assertRaises(TypeError):
Address('foo', username='bong', domain='bing', addr_spec='bar@baz')
def test_space_in_addr_spec_username_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec="bad name@example.com")
def test_bad_addr_sepc_raises(self):
with self.assertRaises(ValueError):
Address('foo', addr_spec="name@ex[]ample.com")
def test_empty_group(self):
g = Group('foo')
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo:;')
def test_empty_group_list(self):
g = Group('foo', addresses=[])
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo:;')
def test_null_group(self):
g = Group()
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'None:;')
def test_group_with_addresses(self):
addrs = [Address('b', 'b', 'c'), Address('a', 'b','c')]
g = Group('foo', addrs)
self.assertEqual(g.display_name, 'foo')
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'foo: b <b@c>, a <b@c>;')
def test_group_with_addresses_no_display_name(self):
addrs = [Address('b', 'b', 'c'), Address('a', 'b','c')]
g = Group(addresses=addrs)
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'None: b <b@c>, a <b@c>;')
def test_group_with_one_address_no_display_name(self):
addrs = [Address('b', 'b', 'c')]
g = Group(addresses=addrs)
self.assertIsNone(g.display_name)
self.assertEqual(g.addresses, tuple(addrs))
self.assertEqual(str(g), 'b <b@c>')
def test_display_name_quoting(self):
g = Group('foo.bar')
self.assertEqual(g.display_name, 'foo.bar')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), '"foo.bar":;')
def test_display_name_blanks_not_quoted(self):
g = Group('foo bar')
self.assertEqual(g.display_name, 'foo bar')
self.assertEqual(g.addresses, tuple())
self.assertEqual(str(g), 'foo bar:;')
def test_set_message_header_from_address(self):
a = Address('foo', 'bar', 'example.com')
m = Message(policy=policy.default)
m['To'] = a
self.assertEqual(m['to'], 'foo <bar@example.com>')
self.assertEqual(m['to'].addresses, (a,))
def test_set_message_header_from_group(self):
g = Group('foo bar')
m = Message(policy=policy.default)
m['To'] = g
self.assertEqual(m['to'], 'foo bar:;')
self.assertEqual(m['to'].addresses, g.addresses)
class TestFolding(TestHeaderBase):
def test_short_unstructured(self):
h = self.make_header('subject', 'this is a test')
self.assertEqual(h.fold(policy=policy.default),
'subject: this is a test\n')
def test_long_unstructured(self):
h = self.make_header('Subject', 'This is a long header '
'line that will need to be folded into two lines '
'and will demonstrate basic folding')
self.assertEqual(h.fold(policy=policy.default),
'Subject: This is a long header line that will '
'need to be folded into two lines\n'
' and will demonstrate basic folding\n')
def test_unstructured_short_max_line_length(self):
h = self.make_header('Subject', 'this is a short header '
'that will be folded anyway')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
textwrap.dedent("""\
Subject: this is a
short header that
will be folded
anyway
"""))
def test_fold_unstructured_single_word(self):
h = self.make_header('Subject', 'test')
self.assertEqual(h.fold(policy=policy.default), 'Subject: test\n')
def test_fold_unstructured_short(self):
h = self.make_header('Subject', 'test test test')
self.assertEqual(h.fold(policy=policy.default),
'Subject: test test test\n')
def test_fold_unstructured_with_overlong_word(self):
h = self.make_header('Subject', 'thisisaverylonglineconsistingofa'
'singlewordthatwontfit')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
'Subject: thisisaverylonglineconsistingofasinglewordthatwontfit\n')
def test_fold_unstructured_with_two_overlong_words(self):
h = self.make_header('Subject', 'thisisaverylonglineconsistingofa'
'singlewordthatwontfit plusanotherverylongwordthatwontfit')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=20)),
'Subject: thisisaverylonglineconsistingofasinglewordthatwontfit\n'
' plusanotherverylongwordthatwontfit\n')
def test_fold_unstructured_with_slightly_long_word(self):
h = self.make_header('Subject', 'thislongwordislessthanmaxlinelen')
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=35)),
'Subject:\n thislongwordislessthanmaxlinelen\n')
def test_fold_unstructured_with_commas(self):
# The old wrapper would fold this at the commas.
h = self.make_header('Subject', "This header is intended to "
"demonstrate, in a fairly succinct way, that we now do "
"not give a , special treatment in unstructured headers.")
self.assertEqual(
h.fold(policy=policy.default.clone(max_line_length=60)),
textwrap.dedent("""\
Subject: This header is intended to demonstrate, in a fairly
succinct way, that we now do not give a , special treatment
in unstructured headers.
"""))
def test_fold_address_list(self):
h = self.make_header('To', '"Theodore H. Perfect" <yes@man.com>, '
'"My address is very long because my name is long" <foo@bar.com>, '
'"Only A. Friend" <no@yes.com>')
self.assertEqual(h.fold(policy=policy.default), textwrap.dedent("""\
To: "Theodore H. Perfect" <yes@man.com>,
"My address is very long because my name is long" <foo@bar.com>,
"Only A. Friend" <no@yes.com>
"""))
def test_fold_date_header(self):
h = self.make_header('Date', 'Sat, 2 Feb 2002 17:00:06 -0800')
self.assertEqual(h.fold(policy=policy.default),
'Date: Sat, 02 Feb 2002 17:00:06 -0800\n')
if __name__ == '__main__':
unittest.main()
| mit |
mvaled/OpenUpgrade | addons/account_payment/wizard/account_payment_order.py | 338 | 5906 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp.tools.translate import _
class payment_order_create(osv.osv_memory):
"""
Create a payment object with lines corresponding to the account move line
to pay according to the date and the mode provided by the user.
Hypothesis:
- Small number of non-reconciled move line, payment mode and bank account type,
- Big number of partner and bank account.
If a type is given, unsuitable account Entry lines are ignored.
"""
_name = 'payment.order.create'
_description = 'payment.order.create'
_columns = {
'duedate': fields.date('Due Date', required=True),
'entries': fields.many2many('account.move.line', 'line_pay_rel', 'pay_id', 'line_id', 'Entries')
}
_defaults = {
'duedate': lambda *a: time.strftime('%Y-%m-%d'),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if not context: context = {}
res = super(payment_order_create, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
if context and 'line_ids' in context:
doc = etree.XML(res['arch'])
nodes = doc.xpath("//field[@name='entries']")
for node in nodes:
node.set('domain', '[("id", "in", '+ str(context['line_ids'])+')]')
res['arch'] = etree.tostring(doc)
return res
def create_payment(self, cr, uid, ids, context=None):
order_obj = self.pool.get('payment.order')
line_obj = self.pool.get('account.move.line')
payment_obj = self.pool.get('payment.line')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
line_ids = [entry.id for entry in data.entries]
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
payment = order_obj.browse(cr, uid, context['active_id'], context=context)
t = None
line2bank = line_obj.line2bank(cr, uid, line_ids, t, context)
## Finally populate the current payment with new lines:
for line in line_obj.browse(cr, uid, line_ids, context=context):
if payment.date_prefered == "now":
#no payment date => immediate payment
date_to_pay = False
elif payment.date_prefered == 'due':
date_to_pay = line.date_maturity
elif payment.date_prefered == 'fixed':
date_to_pay = payment.date_scheduled
payment_obj.create(cr, uid,{
'move_line_id': line.id,
'amount_currency': line.amount_residual_currency,
'bank_id': line2bank.get(line.id),
'order_id': payment.id,
'partner_id': line.partner_id and line.partner_id.id or False,
'communication': line.ref or '/',
'state': line.invoice and line.invoice.reference_type != 'none' and 'structured' or 'normal',
'date': date_to_pay,
'currency': (line.invoice and line.invoice.currency_id.id) or line.journal_id.currency.id or line.journal_id.company_id.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
def search_entries(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
mod_obj = self.pool.get('ir.model.data')
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
search_due_date = data.duedate
# payment = self.pool.get('payment.order').browse(cr, uid, context['active_id'], context=context)
# Search for move line to pay:
domain = [('reconcile_id', '=', False), ('account_id.type', '=', 'payable'), ('credit', '>', 0), ('account_id.reconcile', '=', True)]
domain = domain + ['|', ('date_maturity', '<=', search_due_date), ('date_maturity', '=', False)]
line_ids = line_obj.search(cr, uid, domain, context=context)
context = dict(context, line_ids=line_ids)
model_data_ids = mod_obj.search(cr, uid,[('model', '=', 'ir.ui.view'), ('name', '=', 'view_create_payment_order_lines')], context=context)
resource_id = mod_obj.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {'name': _('Entry Lines'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'payment.order.create',
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
swinter2011/login-page-update | markdown/treeprocessors.py | 17 | 12853 | from __future__ import unicode_literals
from __future__ import absolute_import
from . import util
from . import odict
from . import inlinepatterns
def build_treeprocessors(md_instance, **kwargs):
""" Build the default treeprocessors for Markdown. """
treeprocessors = odict.OrderedDict()
treeprocessors["inline"] = InlineProcessor(md_instance)
treeprocessors["prettify"] = PrettifyTreeprocessor(md_instance)
return treeprocessors
def isString(s):
""" Check if it's string """
if not isinstance(s, util.AtomicString):
return isinstance(s, util.string_type)
return False
class Treeprocessor(util.Processor):
"""
Treeprocessors are run on the ElementTree object before serialization.
Each Treeprocessor implements a "run" method that takes a pointer to an
ElementTree, modifies it as necessary and returns an ElementTree
object.
Treeprocessors must extend markdown.Treeprocessor.
"""
def run(self, root):
"""
Subclasses of Treeprocessor should implement a `run` method, which
takes a root ElementTree. This method can return another ElementTree
object, and the existing root ElementTree will be replaced, or it can
modify the current tree and return None.
"""
pass
class InlineProcessor(Treeprocessor):
"""
A Treeprocessor that traverses a tree, applying inline patterns.
"""
def __init__(self, md):
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
self.__placeholder_suffix = util.ETX
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
+ len(self.__placeholder_suffix)
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
self.markdown = md
def __makePlaceholder(self, type):
""" Generate a placeholder """
id = "%04d" % len(self.stashed_nodes)
hash = util.INLINE_PLACEHOLDER % id
return hash, id
def __findPlaceholder(self, data, index):
"""
Extract id from data string, start from index
Keyword arguments:
* data: string
* index: index, from which we start search
Returns: placeholder id and string index, after the found placeholder.
"""
m = self.__placeholder_re.search(data, index)
if m:
return m.group(1), m.end()
else:
return None, index + 1
def __stashNode(self, node, type):
""" Add node to stash """
placeholder, id = self.__makePlaceholder(type)
self.stashed_nodes[id] = node
return placeholder
def __handleInline(self, data, patternIndex=0):
"""
Process string with inline patterns and replace it
with placeholders
Keyword arguments:
* data: A line of Markdown text
* patternIndex: The index of the inlinePattern to start with
Returns: String with placeholders.
"""
if not isinstance(data, util.AtomicString):
startIndex = 0
while patternIndex < len(self.markdown.inlinePatterns):
data, matched, startIndex = self.__applyPattern(
self.markdown.inlinePatterns.value_for_index(patternIndex),
data, patternIndex, startIndex)
if not matched:
patternIndex += 1
return data
def __processElementText(self, node, subnode, isText=True):
"""
Process placeholders in Element.text or Element.tail
of Elements popped from self.stashed_nodes.
Keywords arguments:
* node: parent node
* subnode: processing node
* isText: bool variable, True - it's text, False - it's tail
Returns: None
"""
if isText:
text = subnode.text
subnode.text = None
else:
text = subnode.tail
subnode.tail = None
childResult = self.__processPlaceholders(text, subnode)
if not isText and node is not subnode:
pos = list(node).index(subnode)
node.remove(subnode)
else:
pos = 0
childResult.reverse()
for newChild in childResult:
node.insert(pos, newChild)
def __processPlaceholders(self, data, parent):
"""
Process string with placeholders and generate ElementTree tree.
Keyword arguments:
* data: string with placeholders instead of ElementTree elements.
* parent: Element, which contains processing inline data
Returns: list with ElementTree elements with applied inline patterns.
"""
def linkText(text):
if text:
if result:
if result[-1].tail:
result[-1].tail += text
else:
result[-1].tail = text
else:
if parent.text:
parent.text += text
else:
parent.text = text
result = []
strartIndex = 0
while data:
index = data.find(self.__placeholder_prefix, strartIndex)
if index != -1:
id, phEndIndex = self.__findPlaceholder(data, index)
if id in self.stashed_nodes:
node = self.stashed_nodes.get(id)
if index > 0:
text = data[strartIndex:index]
linkText(text)
if not isString(node): # it's Element
for child in [node] + list(node):
if child.tail:
if child.tail.strip():
self.__processElementText(node, child,False)
if child.text:
if child.text.strip():
self.__processElementText(child, child)
else: # it's just a string
linkText(node)
strartIndex = phEndIndex
continue
strartIndex = phEndIndex
result.append(node)
else: # wrong placeholder
end = index + len(self.__placeholder_prefix)
linkText(data[strartIndex:end])
strartIndex = end
else:
text = data[strartIndex:]
if isinstance(data, util.AtomicString):
# We don't want to loose the AtomicString
text = util.AtomicString(text)
linkText(text)
data = ""
return result
def __applyPattern(self, pattern, data, patternIndex, startIndex=0):
"""
Check if the line fits the pattern, create the necessary
elements, add it to stashed_nodes.
Keyword arguments:
* data: the text to be processed
* pattern: the pattern to be checked
* patternIndex: index of current pattern
* startIndex: string index, from which we start searching
Returns: String with placeholders instead of ElementTree elements.
"""
match = pattern.getCompiledRegExp().match(data[startIndex:])
leftData = data[:startIndex]
if not match:
return data, False, 0
node = pattern.handleMatch(match)
if node is None:
return data, True, len(leftData)+match.span(len(match.groups()))[0]
if not isString(node):
if not isinstance(node.text, util.AtomicString):
# We need to process current node too
for child in [node] + list(node):
if not isString(node):
if child.text:
child.text = self.__handleInline(child.text,
patternIndex + 1)
if child.tail:
child.tail = self.__handleInline(child.tail,
patternIndex)
placeholder = self.__stashNode(node, pattern.type())
return "%s%s%s%s" % (leftData,
match.group(1),
placeholder, match.groups()[-1]), True, 0
def run(self, tree):
"""Apply inline patterns to a parsed Markdown tree.
Iterate over ElementTree, find elements with inline tag, apply inline
patterns and append newly created Elements to tree. If you don't
want to process your data with inline paterns, instead of normal string,
use subclass AtomicString:
node.text = markdown.AtomicString("This will not be processed.")
Arguments:
* tree: ElementTree object, representing Markdown tree.
Returns: ElementTree object with applied inline patterns.
"""
self.stashed_nodes = {}
stack = [tree]
while stack:
currElement = stack.pop()
insertQueue = []
for child in currElement:
if child.text and not isinstance(child.text, util.AtomicString):
text = child.text
child.text = None
lst = self.__processPlaceholders(self.__handleInline(
text), child)
stack += lst
insertQueue.append((child, lst))
if child.tail:
tail = self.__handleInline(child.tail)
dumby = util.etree.Element('d')
tailResult = self.__processPlaceholders(tail, dumby)
if dumby.text:
child.tail = dumby.text
else:
child.tail = None
pos = list(currElement).index(child) + 1
tailResult.reverse()
for newChild in tailResult:
currElement.insert(pos, newChild)
if len(child):
stack.append(child)
for element, lst in insertQueue:
if self.markdown.enable_attributes:
if element.text and isString(element.text):
element.text = \
inlinepatterns.handleAttributes(element.text,
element)
i = 0
for newChild in lst:
if self.markdown.enable_attributes:
# Processing attributes
if newChild.tail and isString(newChild.tail):
newChild.tail = \
inlinepatterns.handleAttributes(newChild.tail,
element)
if newChild.text and isString(newChild.text):
newChild.text = \
inlinepatterns.handleAttributes(newChild.text,
newChild)
element.insert(i, newChild)
i += 1
return tree
class PrettifyTreeprocessor(Treeprocessor):
""" Add linebreaks to the html document. """
def _prettifyETree(self, elem):
""" Recursively add linebreaks to ElementTree children. """
i = "\n"
if util.isBlockLevel(elem.tag) and elem.tag not in ['code', 'pre']:
if (not elem.text or not elem.text.strip()) \
and len(elem) and util.isBlockLevel(elem[0].tag):
elem.text = i
for e in elem:
if util.isBlockLevel(e.tag):
self._prettifyETree(e)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
def run(self, root):
""" Add linebreaks to ElementTree root object. """
self._prettifyETree(root)
# Do <br />'s seperately as they are often in the middle of
# inline content and missed by _prettifyETree.
brs = root.getiterator('br')
for br in brs:
if not br.tail or not br.tail.strip():
br.tail = '\n'
else:
br.tail = '\n%s' % br.tail
# Clean up extra empty lines at end of code blocks.
pres = root.getiterator('pre')
for pre in pres:
if len(pre) and pre[0].tag == 'code':
pre[0].text = pre[0].text.rstrip() + '\n'
| apache-2.0 |
redhat-openstack/django | django/middleware/common.py | 108 | 7314 | import hashlib
import logging
import re
import warnings
from django.conf import settings
from django.core.mail import mail_managers
from django.core import urlresolvers
from django import http
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils import six
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
logger.warning('Forbidden (User agent): %s', request.path,
extra={
'status_code': 403,
'request': request
}
)
return http.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.path]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
raise RuntimeError((""
"You called this URL via POST, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining POST data. "
"Change your form to point to %s%s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % (new_url[0], new_url[1]))
if new_url == old_url:
# No redirects required.
return
if new_url[0]:
newurl = "%s://%s%s" % (
'https' if request.is_secure() else 'http',
new_url[0], urlquote(new_url[1]))
else:
newurl = urlquote(new_url[1])
if request.META.get('QUERY_STRING', ''):
if six.PY3:
newurl += '?' + request.META['QUERY_STRING']
else:
# `query_string` is a bytestring. Appending it to the unicode
# string `newurl` will fail if it isn't ASCII-only. This isn't
# allowed; only broken software generates such query strings.
# Better drop the invalid query string than crash (#15152).
try:
newurl += '?' + request.META['QUERY_STRING'].decode()
except UnicodeDecodeError:
pass
return http.HttpResponsePermanentRedirect(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.SEND_BROKEN_LINK_EMAILS:
warnings.warn("SEND_BROKEN_LINK_EMAILS is deprecated. "
"Use BrokenLinkEmailsMiddleware instead.",
PendingDeprecationWarning, stacklevel=2)
BrokenLinkEmailsMiddleware().process_response(request, response)
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
jjs0sbw/CSPLN | apps/scaffolding/mac/web2py/web2py.app/Contents/Resources/gluon/debug.py | 22 | 5538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
| limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Debugger support classes
------------------------
"""
import logging
import pdb
import Queue
import sys
logger = logging.getLogger("web2py")
class Pipe(Queue.Queue):
def __init__(self, name, mode='r', *args, **kwargs):
self.__name = name
Queue.Queue.__init__(self, *args, **kwargs)
def write(self, data):
logger.debug("debug %s writing %s" % (self.__name, data))
self.put(data)
def flush(self):
# mark checkpoint (complete message)
logger.debug("debug %s flushing..." % self.__name)
self.put(None)
# wait until it is processed
self.join()
logger.debug("debug %s flush done" % self.__name)
def read(self, count=None, timeout=None):
logger.debug("debug %s reading..." % (self.__name, ))
data = self.get(block=True, timeout=timeout)
# signal that we are ready
self.task_done()
logger.debug("debug %s read %s" % (self.__name, data))
return data
def readline(self):
logger.debug("debug %s readline..." % (self.__name, ))
return self.read()
pipe_in = Pipe('in')
pipe_out = Pipe('out')
debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,)
def set_trace():
"""breakpoint shortcut (like pdb)"""
logger.info("DEBUG: set_trace!")
debugger.set_trace(sys._getframe().f_back)
def stop_trace():
"""stop waiting for the debugger (called atexit)"""
# this should prevent communicate is wait forever a command result
# and the main thread has finished
logger.info("DEBUG: stop_trace!")
pipe_out.write("debug finished!")
pipe_out.write(None)
#pipe_out.flush()
def communicate(command=None):
"""send command to debbuger, wait result"""
if command is not None:
logger.info("DEBUG: sending command %s" % command)
pipe_in.write(command)
#pipe_in.flush()
result = []
while True:
data = pipe_out.read()
if data is None:
break
result.append(data)
logger.info("DEBUG: result %s" % repr(result))
return ''.join(result)
# New debugger implementation using qdb and a web UI
import gluon.contrib.qdb as qdb
from threading import RLock
interact_lock = RLock()
run_lock = RLock()
def check_interaction(fn):
"""Decorator to clean and prevent interaction when not available"""
def check_fn(self, *args, **kwargs):
interact_lock.acquire()
try:
if self.filename:
self.clear_interaction()
return fn(self, *args, **kwargs)
finally:
interact_lock.release()
return check_fn
class WebDebugger(qdb.Frontend):
"""Qdb web2py interface"""
def __init__(self, pipe, completekey='tab', stdin=None, stdout=None):
qdb.Frontend.__init__(self, pipe)
self.clear_interaction()
def clear_interaction(self):
self.filename = None
self.lineno = None
self.exception_info = None
self.context = None
# redefine Frontend methods:
def run(self):
run_lock.acquire()
try:
while self.pipe.poll():
qdb.Frontend.run(self)
finally:
run_lock.release()
def interaction(self, filename, lineno, line, **context):
# store current status
interact_lock.acquire()
try:
self.filename = filename
self.lineno = lineno
self.context = context
finally:
interact_lock.release()
def exception(self, title, extype, exvalue, trace, request):
self.exception_info = {'title': title,
'extype': extype, 'exvalue': exvalue,
'trace': trace, 'request': request}
@check_interaction
def do_continue(self):
qdb.Frontend.do_continue(self)
@check_interaction
def do_step(self):
qdb.Frontend.do_step(self)
@check_interaction
def do_return(self):
qdb.Frontend.do_return(self)
@check_interaction
def do_next(self):
qdb.Frontend.do_next(self)
@check_interaction
def do_quit(self):
qdb.Frontend.do_quit(self)
def do_exec(self, statement):
interact_lock.acquire()
try:
# check to see if we're inside interaction
if self.filename:
# avoid spurious interaction notifications:
self.set_burst(2)
# execute the statement in the remote debugger:
return qdb.Frontend.do_exec(self, statement)
finally:
interact_lock.release()
# create the connection between threads:
parent_queue, child_queue = Queue.Queue(), Queue.Queue()
front_conn = qdb.QueuePipe("parent", parent_queue, child_queue)
child_conn = qdb.QueuePipe("child", child_queue, parent_queue)
web_debugger = WebDebugger(front_conn) # frontend
qdb_debugger = qdb.Qdb(pipe=child_conn, redirect_stdio=False, skip=None) # backend
dbg = qdb_debugger
# enable getting context (stack, globals/locals) at interaction
qdb_debugger.set_params(dict(call_stack=True, environment=True))
import gluon.main
gluon.main.global_settings.debugging = True
| gpl-3.0 |
adlius/osf.io | osf/migrations/0147_repoint_preprint_pagecounters.py | 11 | 1854 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-12-06 15:44
from __future__ import unicode_literals
from bulk_update.helper import bulk_update
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
from tqdm import tqdm
def noop(*args, **kwargs):
# No brakes on the NPD train
pass
def rekey_pagecounters(state, schema):
AbstractNode = state.get_model('osf', 'AbstractNode')
Guid = state.get_model('osf', 'Guid')
Preprint = state.get_model('osf', 'Preprint')
PageCounter = state.get_model('osf', 'PageCounter')
nct = ContentType.objects.get_for_model(AbstractNode).id
pct = ContentType.objects.get_for_model(Preprint).id
preprints = Preprint.objects.select_related('node').exclude(primary_file_id__isnull=True).exclude(node_id__isnull=True)
progress_bar = tqdm(total=preprints.count() or 1)
batch = []
for i, preprint in enumerate(preprints, 1):
node_id = Guid.objects.get(content_type=nct, object_id=preprint.node_id)._id
file_id = preprint.primary_file._id
if node_id and file_id:
preprint_id = Guid.objects.filter(content_type=pct, object_id=preprint.id).values_list('_id', flat=True).first()
if not preprint_id:
assert False
for page_counter in PageCounter.objects.filter(_id__startswith='download:{}:{}'.format(node_id, file_id)):
page_counter._id = page_counter._id.replace(node_id, preprint_id)
batch.append(page_counter)
progress_bar.update(i)
bulk_update(batch, update_fields=['_id'], batch_size=10000)
progress_bar.close()
class Migration(migrations.Migration):
dependencies = [
('osf', '0146_merge_20181119_2236'),
]
operations = [
migrations.RunPython(rekey_pagecounters, noop)
]
| apache-2.0 |
m-urban/beets | test/test_embedart.py | 9 | 7151 | # This file is part of beets.
# Copyright 2015, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os.path
import shutil
from mock import patch
import tempfile
from test import _common
from test._common import unittest
from test.helper import TestHelper
from beets.mediafile import MediaFile
from beets import config, logging, ui
from beets.util import syspath
from beets.util.artresizer import ArtResizer
from beets import art
def require_artresizer_compare(test):
def wrapper(*args, **kwargs):
if not ArtResizer.shared.can_compare:
raise unittest.SkipTest()
else:
return test(*args, **kwargs)
wrapper.__name__ = test.__name__
return wrapper
class EmbedartCliTest(_common.TestCase, TestHelper):
small_artpath = os.path.join(_common.RSRC, 'image-2x3.jpg')
abbey_artpath = os.path.join(_common.RSRC, 'abbey.jpg')
abbey_similarpath = os.path.join(_common.RSRC, 'abbey-similar.jpg')
abbey_differentpath = os.path.join(_common.RSRC, 'abbey-different.jpg')
def setUp(self):
self.setup_beets() # Converter is threaded
self.load_plugins('embedart')
def _setup_data(self, artpath=None):
if not artpath:
artpath = self.small_artpath
with open(syspath(artpath)) as f:
self.image_data = f.read()
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_embed_art_from_file(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-f', self.small_artpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_from_album(self):
self._setup_data()
album = self.add_album_fixture()
item = album.items()[0]
album.artpath = self.small_artpath
album.store()
self.run_command('embedart')
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data)
def test_embed_art_remove_art_file(self):
self._setup_data()
album = self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
handle, tmp_path = tempfile.mkstemp()
os.write(handle, self.image_data)
os.close(handle)
album.artpath = tmp_path
album.store()
config['embedart']['remove_art_file'] = True
self.run_command('embedart')
if os.path.isfile(tmp_path):
os.remove(tmp_path)
self.fail('Artwork file {0} was not deleted'.format(tmp_path))
def test_art_file_missing(self):
self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
with self.assertRaises(ui.UserError):
self.run_command('embedart', '-f', '/doesnotexist')
def test_embed_non_image_file(self):
album = self.add_album_fixture()
logging.getLogger('beets.embedart').setLevel(logging.DEBUG)
handle, tmp_path = tempfile.mkstemp()
os.write(handle, 'I am not an image.')
os.close(handle)
try:
self.run_command('embedart', '-f', tmp_path)
finally:
os.remove(tmp_path)
mediafile = MediaFile(syspath(album.items()[0].path))
self.assertFalse(mediafile.images) # No image added.
@require_artresizer_compare
def test_reject_different_art(self):
self._setup_data(self.abbey_artpath)
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-f', self.abbey_artpath)
config['embedart']['compare_threshold'] = 20
self.run_command('embedart', '-f', self.abbey_differentpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data,
'Image written is not {0}'.format(
self.abbey_artpath))
@require_artresizer_compare
def test_accept_similar_art(self):
self._setup_data(self.abbey_similarpath)
album = self.add_album_fixture()
item = album.items()[0]
self.run_command('embedart', '-f', self.abbey_artpath)
config['embedart']['compare_threshold'] = 20
self.run_command('embedart', '-f', self.abbey_similarpath)
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.images[0].data, self.image_data,
'Image written is not {0}'.format(
self.abbey_similarpath))
def test_non_ascii_album_path(self):
resource_path = os.path.join(_common.RSRC, 'image.mp3').encode('utf8')
album = self.add_album_fixture()
trackpath = album.items()[0].path
albumpath = album.path
shutil.copy(syspath(resource_path), syspath(trackpath))
self.run_command('extractart', '-n', 'extracted')
self.assertExists(syspath(os.path.join(albumpath, b'extracted.png')))
@patch('beets.art.subprocess')
@patch('beets.art.extract')
class ArtSimilarityTest(unittest.TestCase):
def test_imagemagick_response(self, mock_extract, mock_subprocess):
mock_extract.return_value = True
proc = mock_subprocess.Popen.return_value
log = logging.getLogger('beets.embedart')
# everything is fine
proc.returncode = 0
proc.communicate.return_value = "10", "tagada"
self.assertTrue(art.check_art_similarity(log, None, None, 20))
self.assertFalse(art.check_art_similarity(log, None, None, 5))
# small failure
proc.returncode = 1
proc.communicate.return_value = "tagada", "10"
self.assertTrue(art.check_art_similarity(log, None, None, 20))
self.assertFalse(art.check_art_similarity(log, None, None, 5))
# bigger failure
proc.returncode = 2
self.assertIsNone(art.check_art_similarity(log, None, None, 20))
# IM result parsing problems
proc.returncode = 0
proc.communicate.return_value = "foo", "bar"
self.assertIsNone(art.check_art_similarity(log, None, None, 20))
proc.returncode = 1
self.assertIsNone(art.check_art_similarity(log, None, None, 20))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit |
maroux/django-extensions | django_extensions/db/fields/encrypted.py | 28 | 6056 | import sys
import warnings
import six
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import models
try:
from keyczar import keyczar
except ImportError:
raise ImportError('Using an encrypted field requires the Keyczar module. '
'You can obtain Keyczar from http://www.keyczar.org/.')
class EncryptionWarning(RuntimeWarning):
pass
class BaseEncryptedField(models.Field):
prefix = 'enc_str:::'
def __init__(self, *args, **kwargs):
if not hasattr(settings, 'ENCRYPTED_FIELD_KEYS_DIR'):
raise ImproperlyConfigured('You must set the settings.ENCRYPTED_FIELD_KEYS_DIR '
'setting to your Keyczar keys directory.')
crypt_class = self.get_crypt_class()
self.crypt = crypt_class.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
# Encrypted size is larger than unencrypted
self.unencrypted_length = max_length = kwargs.get('max_length', None)
if max_length:
kwargs['max_length'] = self.calculate_crypt_max_length(max_length)
super(BaseEncryptedField, self).__init__(*args, **kwargs)
def calculate_crypt_max_length(self, unencrypted_length):
# TODO: Re-examine if this logic will actually make a large-enough
# max-length for unicode strings that have non-ascii characters in them.
# For PostGreSQL we might as well always use textfield since there is little
# difference (except for length checking) between varchar and text in PG.
return len(self.prefix) + len(self.crypt.Encrypt('x' * unencrypted_length))
def get_crypt_class(self):
"""
Get the Keyczar class to use.
The class can be customized with the ENCRYPTED_FIELD_MODE setting. By default,
this setting is DECRYPT_AND_ENCRYPT. Set this to ENCRYPT to disable decryption.
This is necessary if you are only providing public keys to Keyczar.
Returns:
keyczar.Encrypter if ENCRYPTED_FIELD_MODE is ENCRYPT.
keyczar.Crypter if ENCRYPTED_FIELD_MODE is DECRYPT_AND_ENCRYPT.
Override this method to customize the type of Keyczar class returned.
"""
crypt_type = getattr(settings, 'ENCRYPTED_FIELD_MODE', 'DECRYPT_AND_ENCRYPT')
if crypt_type == 'ENCRYPT':
crypt_class_name = 'Encrypter'
elif crypt_type == 'DECRYPT_AND_ENCRYPT':
crypt_class_name = 'Crypter'
else:
raise ImproperlyConfigured(
'ENCRYPTED_FIELD_MODE must be either DECRYPT_AND_ENCRYPT '
'or ENCRYPT, not %s.' % crypt_type)
return getattr(keyczar, crypt_class_name)
def to_python(self, value):
if isinstance(self.crypt.primary_key, keyczar.keys.RsaPublicKey):
retval = value
elif value and (value.startswith(self.prefix)):
if hasattr(self.crypt, 'Decrypt'):
retval = self.crypt.Decrypt(value[len(self.prefix):])
if sys.version_info < (3,):
if retval:
retval = retval.decode('utf-8')
else:
retval = value
else:
retval = value
return retval
def get_db_prep_value(self, value, connection, prepared=False):
if value and not value.startswith(self.prefix):
# We need to encode a unicode string into a byte string, first.
# keyczar expects a bytestring, not a unicode string.
if sys.version_info < (3,):
if type(value) == six.types.UnicodeType:
value = value.encode('utf-8')
# Truncated encrypted content is unreadable,
# so truncate before encryption
max_length = self.unencrypted_length
if max_length and len(value) > max_length:
warnings.warn("Truncating field %s from %d to %d bytes" % (
self.name, len(value), max_length), EncryptionWarning
)
value = value[:max_length]
value = self.prefix + self.crypt.Encrypt(value)
return value
def deconstruct(self):
name, path, args, kwargs = super(BaseEncryptedField, self).deconstruct()
kwargs['max_length'] = self.unencrypted_length
return name, path, args, kwargs
class EncryptedTextField(six.with_metaclass(models.SubfieldBase,
BaseEncryptedField)):
def get_internal_type(self):
return 'TextField'
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(EncryptedTextField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
class EncryptedCharField(six.with_metaclass(models.SubfieldBase,
BaseEncryptedField)):
def __init__(self, *args, **kwargs):
super(EncryptedCharField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(EncryptedCharField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.CharField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| mit |
hungdt/scf_dmft | dmft.py | 1 | 7186 | #!/usr/bin/env python
import h5py, time, sys, os;
import re
import init, solver, system_dependence as system;
from numpy import *;
from average_green import averageGreen;
from functions import *;
from share_fun import *;
#
# INIT: for the details of variables, see return of init.py
#
# read input arguments
parms, np, parms_file = getOptions(sys.argv[1:]);
# create or open database (HDF5)
h5file = h5py.File(parms['DATA_FILE'], 'a');
vars_dict = init.initialize(h5file, parms);
for k, v in vars_dict.iteritems(): exec('%s=v'%k);
#
# DMFT, DMFT, DMFT !!!
#
set_printoptions(suppress=True, precision=4, linewidth=150);
for k, v in parms.iteritems(): print k + " = " + str(v) + ";";
print "\n\n"
parms['MEASURE'] = int(val_def(parms, 'MEASURE', -1));
if 'OBSERVABLES' in parms and parms['MEASURE'] < 0: parms['MEASURE'] = 0;
if int(val_def(parms, 'ADJUST_DELTA_ONCE', 1)) > 0: Nd = -1; print "Only adjust double counting at the beginning";
it = h5["iter"][0];
SelfEnergy_out = h5['SelfEnergy/%d'%it][:];
mu_out = mu_in = float(parms['MU']);
delta_out = delta_in = float(parms['DELTA']);
while True:
# start a new iteration
MaxIter = int(val_def(parms, "MAX_ITER", 20));
if it >= MaxIter and parms['MEASURE'] < 0: break;
it += 1;
if parms['MEASURE'] > 0:
print 'Final loop for measuring'
for k, v in parms.iteritems():
if re.match('^FINAL_', k):
key_to_change = re.split('^FINAL_', k)[1]
parms[key_to_change] = v
print ' %s=%s'%(key_to_change, str(v))
if key_to_change == 'CUTOFF_FREQ':
parms['N_CUTOFF'] = int(round((float(parms['BETA'])/pi*float(v) - 1)/2.));
print ' %s=%s'%('N_CUTOFF', str(parms['N_CUTOFF']))
time_spent = time.time();
print "ITERATION: %d\n"%it;
# average over k-space for Gavg then produce Weiss field
# use Gauss quadrature with C++ extension code
# mixing self energy
tmp = mixer_SE.do_task(r_[SelfEnergy_out.real.flatten(), SelfEnergy_out.imag.flatten()]).reshape(2, -1);
SelfEnergy_in = (tmp[0] + 1j*tmp[1]).reshape(SelfEnergy_out.shape);
mu_orig = mu_in; delta_orig = delta_in;
if str(it) not in h5['WeissField']:
print "Tedious part: averaging Green function";
extra.update(system.getInfoForDensityCalculation(h5, it-1));
print 'Density correction = \n', extra['correction'][:, :NCOR];
Gavg, Gavg0, delta_out, mu_out, VCoulomb = averageGreen(delta_in, mu_in, 1j*wn, SelfEnergy_in, parms, Nd, DENSITY, int(val_def(parms, 'NO_TUNEUP', 0)) == 0, extra);
delta_in, mu_in = mixer.do_task(r_[delta_out, mu_out]);
if mu_in != mu_out or delta_in != delta_out:
print 'There is mixing, average Green function once more';
extra['G_asymp_coefs'][:N_LAYERS*int(parms['FLAVORS'])] -= delta_out - delta_in;
extra['G_asymp_coefs'] -= mu_out - mu_in;
Gavg, Gavg0, delta_in, mu_in, VCoulomb = averageGreen(delta_in, mu_in, 1j*wn, SelfEnergy_in, parms, Nd, DENSITY, False, extra);
parms["DELTA"] = delta_in;
parms["MU"] = mu_in; # just want MU shown in log_density corresponding to nf
nf = getDensityFromGmat(Gavg0, float(parms['BETA']), extra);
nf = c_[nf[:,:NCOR], sum(nf[:,NCOR:], 1)];
log_density(h5, it-1, parms, nf);
parms["DELTA"] = delta_in;
parms["MU"] = mu_in;
h5['parms/%d/DELTA'%it][...] = str(delta_in);
h5['parms/%d/MU'%it][...] = str(mu_in);
aWeiss = 1./Gavg[:, :, corr_id] + SelfEnergy_in;
save_data(h5, it, ['avgGreen', 'WeissField', 'StaticCoulomb'], [Gavg0, aWeiss, VCoulomb]);
NthOrder = 3;
dispersion_avg = system.getAvgDispersion(parms, NthOrder, extra);
h5['SolverData/AvgDispersion'][:] = dispersion_avg;
else:
Gavg0 = h5['avgGreen/%d'%it][:];
aWeiss = h5['WeissField/%d'%it][:];
VCoulomb = h5['StaticCoulomb/%d'%it][:];
time_spent = r_[time_spent, time.time()];
# run the solver here and get Gimp
# need: path for data file, iteration number, layer index
if str(it) not in h5['SelfEnergy']:
print "Tedious part: Running impurity solver %d times"%N_LAYERS;
dispersion_avg = h5['SolverData/AvgDispersion'][:];
nf = getDensity(h5, it-1);
h5file.close(); del h5;
tmph5filename = solver.run_solver(dispersion_avg, nf, 1j*wn, it, parms, aWeiss, np, VCoulomb);
if tmph5filename is None: print >> sys.stderr, "Something wrong while running the solver"; break;
h5file = h5py.File(parms['DATA_FILE'], 'a');
h5 = h5file[parms['ID']];
Gimp, SelfEnergy_out = solver.solver_post_process(parms, aWeiss, h5, tmph5filename);
if SelfEnergy_out is None: break;
save_data(h5, it, ['ImpurityGreen', 'SelfEnergy'], [Gimp, SelfEnergy_out]);
else: SelfEnergy_out = h5['SelfEnergy/%d'%it][:];
time_spent = r_[time_spent, time.time()];
# finish the iteration
time_spent = array(diff(time_spent), dtype = int);
log_data(h5, 'log_time', it, r_[time_spent, sum(time_spent)], data_type = int);
# check if needs to adjust parms
new_parms = parms_file;
if os.path.isfile(new_parms):
parms_new = readParameters(new_parms);
print 'Check for updating parameters';
updated = False;
for k, v in parms_new.iteritems():
if k not in parms or str(parms[k]) != str(v):
print k, ' = ', v
parms[k] = v
updated = True;
if k == 'MU':
mu_in = float(parms_new['MU'])
print ' chemical potential is forced to be %s'%parms_new['MU']
if not updated: print 'no new parameters.';
save_parms(h5, it+1, parms);
h5["iter"][...] = it; # this is the mark that iteration 'it' is done
print "Time for iteration %d: %d, %d, %d\n"%(it, time_spent[0], time_spent[1], sum(time_spent));
# check stop condition
# generate criterion for convergence: DOS at Fermi level
# DOS_in = getFermiDOS(Gavg0, float(parms['BETA']));
# Gavg0 = averageGreen(delta_in, mu_in, 1j*wn, SelfEnergy_out, parms, Nd, DENSITY, False, extra)[1];
# DOS_out = getFermiDOS(Gavg0, float(parms['BETA']));
# DOS_in = c_[DOS_in[:,:NCOR:N_LAYERS], sum(DOS_in[:, NCOR:], 1)/N_LAYERS];
# DOS_out = c_[DOS_out[:,:NCOR:N_LAYERS], sum(DOS_out[:, NCOR:], 1)/N_LAYERS];
print 'End iteration %d\n\n'%it;
if check_convergence(h5, it, r_[mu_orig, delta_orig, SelfEnergy_in.flatten()], r_[mu_in, delta_in, SelfEnergy_out.flatten()],
abstol = float(val_def(parms, 'TOLERANCE', 0.001*int(parms['SPINS']))), mixing = mixer.get_mixing_value(), Ntime=3):
print 'CONVERGE!'
if parms['MEASURE'] == 0: parms['MEASURE'] = 1;
else: break;
# check if it goes beyond max iter, measure and stop
elif parms['MEASURE'] > 0: break;
if it >= MaxIter:
if parms['MEASURE'] < 0: break;
else: parms['MEASURE'] = 1;
# the end
h5file.close();
| mit |
edcast-inc/edx-platform-edcast | lms/djangoapps/edxnotes/views.py | 72 | 3809 | """
Views related to EdxNotes.
"""
import json
import logging
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.conf import settings
from django.core.urlresolvers import reverse
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from courseware.courses import get_course_with_access
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from util.json_request import JsonResponse, JsonResponseBadRequest
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.helpers import (
get_notes,
get_id_token,
is_feature_enabled,
search,
get_course_position,
)
log = logging.getLogger(__name__)
@login_required
def edxnotes(request, course_id):
"""
Displays the EdxNotes page.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
try:
notes = get_notes(request.user, course)
except EdxNotesServiceUnavailable:
raise Http404
context = {
"course": course,
"search_endpoint": reverse("search_notes", kwargs={"course_id": course_id}),
"notes": notes,
"debug": json.dumps(settings.DEBUG),
'position': None,
}
if not notes:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2
)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
position = get_course_position(course_module)
if position:
context.update({
'position': position,
})
return render_to_response("edxnotes/edxnotes.html", context)
@login_required
def search_notes(request, course_id):
"""
Handles search requests.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
if not is_feature_enabled(course):
raise Http404
if "text" not in request.GET:
return HttpResponseBadRequest()
query_string = request.GET["text"]
try:
search_results = search(request.user, course, query_string)
except (EdxNotesParseError, EdxNotesServiceUnavailable) as err:
return JsonResponseBadRequest({"error": err.message}, status=500)
return HttpResponse(search_results)
# pylint: disable=unused-argument
@login_required
def get_token(request, course_id):
"""
Get JWT ID-Token, in case you need new one.
"""
return HttpResponse(get_id_token(request.user), content_type='text/plain')
@login_required
def edxnotes_visibility(request, course_id):
"""
Handle ajax call from "Show notes" checkbox.
"""
course_key = CourseKey.from_string(course_id)
course = get_course_with_access(request.user, "load", course_key)
field_data_cache = FieldDataCache([course], course_key, request.user)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course_key, course=course
)
if not is_feature_enabled(course):
raise Http404
try:
visibility = json.loads(request.body)["visibility"]
course_module.edxnotes_visibility = visibility
course_module.save()
return JsonResponse(status=200)
except (ValueError, KeyError):
log.warning(
"Could not decode request body as JSON and find a boolean visibility field: '%s'", request.body
)
return JsonResponseBadRequest()
| agpl-3.0 |
yavuzovski/playground | python/django/RESTTest/.venv/lib/python3.4/site-packages/django/contrib/redirects/models.py | 115 | 1077 | from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Redirect(models.Model):
site = models.ForeignKey(Site, models.CASCADE, verbose_name=_('site'))
old_path = models.CharField(
_('redirect from'),
max_length=200,
db_index=True,
help_text=_("This should be an absolute path, excluding the domain name. Example: '/events/search/'."),
)
new_path = models.CharField(
_('redirect to'),
max_length=200,
blank=True,
help_text=_("This can be either an absolute path (as above) or a full URL starting with 'http://'."),
)
class Meta:
verbose_name = _('redirect')
verbose_name_plural = _('redirects')
db_table = 'django_redirect'
unique_together = (('site', 'old_path'),)
ordering = ('old_path',)
def __str__(self):
return "%s ---> %s" % (self.old_path, self.new_path)
| gpl-3.0 |
MeetMe/selenium | py/test/selenium/webdriver/common/rendered_webelement_tests.py | 63 | 3233 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.webdriver.common.by import By
class RenderedWebElementTests(unittest.TestCase):
@pytest.mark.ignore_chrome
def testShouldPickUpStyleOfAnElement(self):
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-parent")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(0, 128, 0, 1)", backgroundColour)
element = self.driver.find_element(by=By.ID, value="red-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("rgba(255, 0, 0, 1)", backgroundColour)
@pytest.mark.ignore_chrome
def testShouldAllowInheritedStylesToBeUsed(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs has an issue with getting the right value for background-color")
self._loadPage("javascriptPage")
element = self.driver.find_element(by=By.ID, value="green-item")
backgroundColour = element.value_of_css_property("background-color")
self.assertEqual("transparent", backgroundColour)
def testShouldCorrectlyIdentifyThatAnElementHasWidth(self):
self._loadPage("xhtmlTest")
shrinko = self.driver.find_element(by=By.ID, value="linkId")
size = shrinko.size
self.assertTrue(size["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(size["height"] > 0, "Height expected to be greater than 0")
def testShouldBeAbleToDetermineTheRectOfAnElement(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not support rect command")
self._loadPage("xhtmlTest")
element = self.driver.find_element(By.ID, "username")
rect = element.rect
self.assertTrue(rect["x"] > 0, "Element should not be in the top left")
self.assertTrue(rect["y"] > 0, "Element should not be in the top left")
self.assertTrue(rect["width"] > 0, "Width expected to be greater than 0")
self.assertTrue(rect["height"] > 0, "Height expected to be greater than 0")
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
javilonas/NCam | cross/android-toolchain/lib/python2.7/distutils/spawn.py | 83 | 7790 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
__revision__ = "$Id$"
import sys
import os
from distutils.errors import DistutilsPlatformError, DistutilsExecError
from distutils import log
def spawn(cmd, search_path=1, verbose=0, dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new process.
'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
def _nt_quote_args(args):
"""Quote command-line arguments for DOS/Windows conventions.
Just wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i, arg in enumerate(args):
if ' ' in arg:
args[i] = '"%s"' % arg
return args
def _spawn_nt(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_os2(cmd, search_path=1, verbose=0, dry_run=0):
executable = cmd[0]
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(' '.join([executable] + cmd[1:]))
if not dry_run:
# spawnv for OS/2 EMX requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
log.debug("command '%s' failed with exit status %d" % (cmd[0], rc))
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
if sys.platform == 'darwin':
from distutils import sysconfig
_cfg_target = None
_cfg_target_split = None
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0):
log.info(' '.join(cmd))
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
exec_args = [cmd[0], cmd]
if sys.platform == 'darwin':
global _cfg_target, _cfg_target_split
if _cfg_target is None:
_cfg_target = sysconfig.get_config_var(
'MACOSX_DEPLOYMENT_TARGET') or ''
if _cfg_target:
_cfg_target_split = [int(x) for x in _cfg_target.split('.')]
if _cfg_target:
# ensure that the deployment target of build process is not less
# than that used when the interpreter was built. This ensures
# extension modules are built with correct compatibility values
cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
'now "%s" but "%s" during configure'
% (cur_target, _cfg_target))
raise DistutilsPlatformError(my_msg)
env = dict(os.environ,
MACOSX_DEPLOYMENT_TARGET=cur_target)
exec_fn = search_path and os.execvpe or os.execve
exec_args.append(env)
pid = os.fork()
if pid == 0: # in the child
try:
exec_fn(*exec_args)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
try:
pid, status = os.waitpid(pid, 0)
except OSError, exc:
import errno
if exc.errno == errno.EINTR:
continue
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
| gpl-3.0 |
Arzie/deluge | deluge/ui/gtkui/path_chooser.py | 2 | 7192 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Bro <bro.development@gmail.com>
#
# This file is part of Deluge and is licensed under GNU General Public License 3.0, or later, with
# the additional special exception to link portions of this program with the OpenSSL library.
# See LICENSE for more details.
#
import logging
import deluge.component as component
from deluge.ui.client import client
from deluge.ui.gtkui.path_combo_chooser import PathChooserComboBox
log = logging.getLogger(__name__)
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class PathChoosersHandler(component.Component):
def __init__(self, paths_config_key=None):
# self.chooser_name = "PathChooser_%d" % (len(PathChooser.path_choosers) +1)
component.Component.__init__(self, "PathChoosersHandler")
self.path_choosers = []
self.paths_list_keys = []
self.config_properties = {}
self.started = False
self.config_keys_to_funcs_mapping = {
"path_chooser_show_chooser_button_on_localhost": "filechooser_button_visible",
"path_chooser_show_path_entry": "path_entry_visible",
"path_chooser_auto_complete_enabled": "auto_complete_enabled",
"path_chooser_show_folder_name": "show_folder_name_on_button",
"path_chooser_accelerator_string": "accelerator_string",
"path_chooser_show_hidden_files": "show_hidden_files",
"path_chooser_max_popup_rows": "max_popup_rows",
}
def start(self):
self.started = True
self.update_config_from_core()
def stop(self):
self.started = False
def update_config_from_core(self):
def _on_config_values(config):
self.config_properties.update(config)
for chooser in self.path_choosers:
chooser.set_config(config)
keys = self.config_keys_to_funcs_mapping.keys()
keys += self.paths_list_keys
client.core.get_config_values(keys).addCallback(_on_config_values)
def register_chooser(self, chooser):
chooser.config_key_funcs = {}
for key in self.config_keys_to_funcs_mapping:
chooser.config_key_funcs[key] = [None, None]
chooser.config_key_funcs[key][0] = getattr(chooser, "get_%s" % self.config_keys_to_funcs_mapping[key])
chooser.config_key_funcs[key][1] = getattr(chooser, "set_%s" % self.config_keys_to_funcs_mapping[key])
self.path_choosers.append(chooser)
if chooser.paths_config_key not in self.paths_list_keys:
self.paths_list_keys.append(chooser.paths_config_key)
if self.started:
self.update_config_from_core()
else:
chooser.set_config(self.config_properties)
def set_value_for_path_choosers(self, value, key):
for chooser in self.path_choosers:
chooser.config_key_funcs[key][1](value)
# Save to core
if key is not "path_chooser_max_popup_rows":
client.core.set_config({key: value})
else:
# Since the max rows value can be changed fast with a spinbutton, we
# delay saving to core until the values hasn't been changed in 1 second.
self.max_rows_value_set = value
def update(value_):
# The value hasn't been changed in one second, so save to core
if self.max_rows_value_set == value_:
client.core.set_config({"path_chooser_max_popup_rows": value})
from twisted.internet import reactor
reactor.callLater(1, update, value)
def on_list_values_changed(self, values, key, caller):
# Save to core
config = {key: values}
client.core.set_config(config)
# Set the values on all path choosers with that key
for chooser in self.path_choosers:
# Found chooser with values from 'key'
if chooser.paths_config_key == key:
chooser.set_values(values)
def get_config_keys(self):
keys = self.config_keys_to_funcs_mapping.keys()
keys += self.paths_list_keys
return keys
class PathChooser(PathChooserComboBox):
def __init__(self, paths_config_key=None):
self.paths_config_key = paths_config_key
PathChooserComboBox.__init__(self)
self.chooser_handler = PathChoosersHandler()
self.chooser_handler.register_chooser(self)
self.set_auto_completer_func(self.on_completion)
self.connect("list-values-changed", self.on_list_values_changed_event)
self.connect("auto-complete-enabled-toggled", self.on_auto_complete_enabled_toggled)
self.connect("show-filechooser-toggled", self.on_show_filechooser_toggled)
self.connect("show-folder-name-on-button", self.on_show_folder_on_button_toggled)
self.connect("show-path-entry-toggled", self.on_show_path_entry_toggled)
self.connect("accelerator-set", self.on_accelerator_set)
self.connect("max-rows-changed", self.on_max_rows_changed)
self.connect("show-hidden-files-toggled", self.on_show_hidden_files_toggled)
def on_auto_complete_enabled_toggled(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_auto_complete_enabled")
def on_show_filechooser_toggled(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_show_chooser_button_on_localhost")
def on_show_folder_on_button_toggled(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_show_folder_name")
def on_show_path_entry_toggled(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_show_path_entry")
def on_accelerator_set(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_accelerator_string")
def on_show_hidden_files_toggled(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_show_hidden_files")
def on_max_rows_changed(self, widget, value):
self.chooser_handler.set_value_for_path_choosers(value, "path_chooser_max_popup_rows")
def on_list_values_changed_event(self, widget, values):
self.chooser_handler.on_list_values_changed(values, self.paths_config_key, self)
def set_config(self, config):
self.config = config
for key in self.config_key_funcs:
if key in config:
try:
self.config_key_funcs[key][1](config[key])
except TypeError as ex:
log.warn("TypeError: %s", ex)
# Set the saved paths
if self.paths_config_key and self.paths_config_key in config:
self.set_values(config[self.paths_config_key])
def on_completion(self, args):
def on_paths_cb(args):
self.complete(args)
d = client.core.get_completion_paths(args)
d.addCallback(on_paths_cb)
| gpl-3.0 |
ppries/tensorflow | tensorflow/python/util/example_parser_configuration.py | 30 | 4715 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Extract parse_example op configuration to a proto."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.example import example_parser_configuration_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def extract_example_parser_configuration(parse_example_op, sess):
"""Returns an ExampleParserConfig proto.
Args:
parse_example_op: A ParseExample `Operation`
sess: A tf.Session needed to obtain some configuration values.
Returns:
A ExampleParserConfig proto.
Raises:
ValueError: If attributes are inconsistent.
"""
config = example_parser_configuration_pb2.ExampleParserConfiguration()
num_sparse = parse_example_op.get_attr("Nsparse")
num_dense = parse_example_op.get_attr("Ndense")
total_features = num_dense + num_sparse
sparse_types = parse_example_op.get_attr("sparse_types")
dense_types = parse_example_op.get_attr("Tdense")
dense_shapes = parse_example_op.get_attr("dense_shapes")
if len(sparse_types) != num_sparse:
raise ValueError("len(sparse_types) attribute does not match "
"Nsparse attribute (%d vs %d)" %
(len(sparse_types), num_sparse))
if len(dense_types) != num_dense:
raise ValueError("len(dense_types) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_types), num_dense))
if len(dense_shapes) != num_dense:
raise ValueError("len(dense_shapes) attribute does not match "
"Ndense attribute (%d vs %d)" %
(len(dense_shapes), num_dense))
# Skip over the serialized input, and the names input.
fetch_list = parse_example_op.inputs[2:]
# Fetch total_features key names and num_dense default values.
if len(fetch_list) != (total_features + num_dense):
raise ValueError("len(fetch_list) does not match total features + num_dense"
"(%d vs %d" % (len(fetch_list),
(total_features + num_dense)))
fetched = sess.run(fetch_list)
if len(fetched) != len(fetch_list):
raise ValueError("len(fetched) does not match len(fetch_list)"
"(%d vs %d" % (len(fetched), len(fetch_list)))
# Fetch indices.
sparse_keys_start = 0
dense_keys_start = sparse_keys_start + num_sparse
dense_def_start = dense_keys_start + num_dense
# Output tensor indices.
sparse_indices_start = 0
sparse_values_start = num_sparse
sparse_shapes_start = sparse_values_start + num_sparse
dense_values_start = sparse_shapes_start + num_sparse
# Dense features.
for i in range(num_dense):
key = fetched[dense_keys_start + i]
feature_config = config.feature_map[key]
# Convert the default value numpy array fetched from the session run
# into a TensorProto.
fixed_config = feature_config.fixed_len_feature
fixed_config.default_value.CopyFrom(tensor_util.make_tensor_proto(fetched[
dense_def_start + i]))
# Convert the shape from the attributes
# into a TensorShapeProto.
fixed_config.shape.CopyFrom(tensor_shape.TensorShape(dense_shapes[
i]).as_proto())
fixed_config.dtype = dense_types[i]
# Get the output tensor name.
fixed_config.values_output_tensor_name = parse_example_op.outputs[
dense_values_start + i].name
# Sparse features.
for i in range(num_sparse):
key = fetched[sparse_keys_start + i]
feature_config = config.feature_map[key]
var_len_feature = feature_config.var_len_feature
var_len_feature.dtype = sparse_types[i]
var_len_feature.indices_output_tensor_name = parse_example_op.outputs[
sparse_indices_start + i].name
var_len_feature.values_output_tensor_name = parse_example_op.outputs[
sparse_values_start + i].name
var_len_feature.shapes_output_tensor_name = parse_example_op.outputs[
sparse_shapes_start + i].name
return config
| apache-2.0 |
pabelanger/stackalytics | tests/unit/test_record_processor.py | 2 | 32242 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import testtools
from stackalytics.processor import record_processor
from stackalytics.processor import runtime_storage
from stackalytics.processor import utils
RELEASES = [
{
'release_name': 'prehistory',
'end_date': utils.date_to_timestamp('2011-Apr-21')
},
{
'release_name': 'Diablo',
'end_date': utils.date_to_timestamp('2011-Sep-08')
},
{
'release_name': 'Zoo',
'end_date': utils.date_to_timestamp('2035-Sep-08')
},
]
REPOS = [
{
"branches": ["master"],
"module": "stackalytics",
"project_type": "stackforge",
"uri": "git://github.com/stackforge/stackalytics.git"
}
]
class TestRecordProcessor(testtools.TestCase):
def setUp(self):
super(TestRecordProcessor, self).setUp()
self.read_json_from_uri_patch = mock.patch(
'stackalytics.processor.utils.read_json_from_uri')
self.read_launchpad = self.read_json_from_uri_patch.start()
self.lp_profile_by_launchpad_id_patch = mock.patch(
'stackalytics.processor.launchpad_utils.'
'lp_profile_by_launchpad_id')
self.lp_profile_by_launchpad_id = (
self.lp_profile_by_launchpad_id_patch.start())
self.lp_profile_by_launchpad_id.return_value = None
self.lp_profile_by_email_patch = mock.patch(
'stackalytics.processor.launchpad_utils.lp_profile_by_email')
self.lp_profile_by_email = (
self.lp_profile_by_email_patch.start())
self.lp_profile_by_email.return_value = None
def tearDown(self):
super(TestRecordProcessor, self).tearDown()
self.read_json_from_uri_patch.stop()
self.lp_profile_by_launchpad_id_patch.stop()
self.lp_profile_by_email_patch.stop()
# get_company_by_email
def test_get_company_by_email_mapped(self):
record_processor_inst = self.make_record_processor(
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}]
)
email = 'jdoe@ibm.com'
res = record_processor_inst._get_company_by_email(email)
self.assertEquals('IBM', res)
def test_get_company_by_email_with_long_suffix_mapped(self):
record_processor_inst = self.make_record_processor(
companies=[{'company_name': 'NEC', 'domains': ['nec.co.jp']}]
)
email = 'man@mxw.nes.nec.co.jp'
res = record_processor_inst._get_company_by_email(email)
self.assertEquals('NEC', res)
def test_get_company_by_email_with_long_suffix_mapped_2(self):
record_processor_inst = self.make_record_processor(
companies=[{'company_name': 'NEC',
'domains': ['nec.co.jp', 'nec.com']}]
)
email = 'man@mxw.nes.nec.com'
res = record_processor_inst._get_company_by_email(email)
self.assertEquals('NEC', res)
def test_get_company_by_email_not_mapped(self):
record_processor_inst = self.make_record_processor()
email = 'foo@boo.com'
res = record_processor_inst._get_company_by_email(email)
self.assertEquals(None, res)
# get_lp_info
def test_get_lp_info_invalid_email(self):
self.read_launchpad.return_value = None
record_processor_inst = self.make_record_processor(users=[])
self.assertEquals((None, None),
record_processor_inst._get_lp_info('error.root'))
# commit processing
def test_process_commit_existing_user(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': '*independent',
'end_date': 1234567890},
{'company_name': 'NEC',
'end_date': 0},
]
}
])
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@gmail.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_commit_existing_user_old_job(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': '*independent',
'end_date': 1234567890},
{'company_name': 'NEC',
'end_date': 0},
]
}
])
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@gmail.com',
author_name='John Doe',
date=1000000000)))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': '*independent',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_commit_existing_user_new_email_known_company(self):
# User is known to LP, his email is new to us, and maps to other
# company. Should return other company instead of those mentioned
# in user profile
record_processor_inst = self.make_record_processor(
users=[
{'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@nec.co.jp'],
'companies': [{'company_name': 'NEC', 'end_date': 0}]}
],
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}],
lp_info={'johndoe@ibm.com':
{'name': 'john_doe', 'display_name': 'John Doe'}})
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@ibm.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@ibm.com',
'author_name': 'John Doe',
'company_name': 'IBM',
}
self.assertRecordsMatch(expected_commit, processed_commit)
self.assertIn('johndoe@ibm.com', utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')['emails'])
def test_process_commit_existing_user_new_email_unknown_company(self):
# User is known to LP, but his email is new to us. Should match
# the user and return company from user profile
record_processor_inst = self.make_record_processor(
users=[
{'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@nec.co.jp'],
'companies': [{'company_name': 'NEC', 'end_date': 0}]}
],
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}],
lp_info={'johndoe@gmail.com':
{'name': 'john_doe', 'display_name': 'John Doe'}})
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@gmail.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
}
self.assertRecordsMatch(expected_commit, processed_commit)
self.assertIn('johndoe@gmail.com', utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')['emails'])
def test_process_commit_existing_user_new_email_known_company_update(self):
record_processor_inst = self.make_record_processor(
users=[
{'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com'],
'companies': [{'company_name': '*independent',
'end_date': 0}]}
],
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}],
lp_info={'johndoe@ibm.com':
{'name': 'john_doe', 'display_name': 'John Doe'}})
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@ibm.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@ibm.com',
'author_name': 'John Doe',
'company_name': 'IBM',
}
self.assertRecordsMatch(expected_commit, processed_commit)
user = utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')
self.assertIn('johndoe@gmail.com', user['emails'])
self.assertEquals('IBM', user['companies'][0]['company_name'],
message='User affiliation should be updated')
def test_process_commit_new_user(self):
# User is known to LP, but new to us
# Should add new user and set company depending on email
record_processor_inst = self.make_record_processor(
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}],
lp_info={'johndoe@ibm.com':
{'name': 'john_doe', 'display_name': 'John Doe'}})
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@ibm.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@ibm.com',
'author_name': 'John Doe',
'company_name': 'IBM',
}
self.assertRecordsMatch(expected_commit, processed_commit)
user = utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')
self.assertIn('johndoe@ibm.com', user['emails'])
self.assertEquals('IBM', user['companies'][0]['company_name'])
def test_process_commit_new_user_unknown_to_lb(self):
# User is new to us and not known to LP
# Should set user name and empty LPid
record_processor_inst = self.make_record_processor(
companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}])
processed_commit = list(record_processor_inst.process(
generate_commits(author_email='johndoe@ibm.com',
author_name='John Doe')))[0]
expected_commit = {
'launchpad_id': None,
'author_email': 'johndoe@ibm.com',
'author_name': 'John Doe',
'company_name': 'IBM',
}
self.assertRecordsMatch(expected_commit, processed_commit)
user = utils.load_user(
record_processor_inst.runtime_storage_inst, 'johndoe@ibm.com')
self.assertIn('johndoe@ibm.com', user['emails'])
self.assertEquals('IBM', user['companies'][0]['company_name'])
self.assertEquals(None, user['launchpad_id'])
# process records complex scenarios
def test_process_blueprint_one_draft_spawned_lp_doesnt_know_user(self):
# In: blueprint record
# LP doesn't know user
# Out: blueprint-draft record
# new user profile created
record_processor_inst = self.make_record_processor()
processed_records = list(record_processor_inst.process([
{'record_type': 'bp',
'id': 'mod:blueprint',
'self_link': 'http://launchpad.net/blueprint',
'owner': 'john_doe',
'date_created': 1234567890}
]))
self.assertRecordsMatch(
{'record_type': 'bpd',
'launchpad_id': 'john_doe',
'author_name': 'john_doe',
'company_name': '*independent'},
processed_records[0])
user = utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')
self.assertEquals({
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'john_doe',
'emails': [],
'companies': [{'company_name': '*independent', 'end_date': 0}]
}, user)
def test_process_blueprint_one_draft_spawned_lp_knows_user(self):
# In: blueprint record
# LP knows user
# Out: blueprint-draft record
# new user profile created, name is taken from LP profile
record_processor_inst = self.make_record_processor(
lp_user_name={
'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}})
processed_records = list(record_processor_inst.process([
{'record_type': 'bp',
'id': 'mod:blueprint',
'self_link': 'http://launchpad.net/blueprint',
'owner': 'john_doe',
'date_created': 1234567890}
]))
self.assertRecordsMatch(
{'record_type': 'bpd',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'company_name': '*independent'},
processed_records[0])
user = utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe')
self.assertEquals({
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': [],
'companies': [{'company_name': '*independent', 'end_date': 0}]
}, user)
def test_process_blueprint_then_review(self):
record_processor_inst = self.make_record_processor(
lp_user_name={
'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}})
processed_records = list(record_processor_inst.process([
{'record_type': 'bp',
'id': 'mod:blueprint',
'self_link': 'http://launchpad.net/blueprint',
'owner': 'john_doe',
'date_created': 1234567890},
{'record_type': 'review',
'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e',
'subject': 'Fix AttributeError in Keypair._add_details()',
'owner': {'name': 'John Doe',
'email': 'john_doe@gmail.com',
'username': 'john_doe'},
'createdOn': 1379404951,
'module': 'nova'}
]))
self.assertRecordsMatch(
{'record_type': 'bpd',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'company_name': '*independent'},
processed_records[0])
self.assertRecordsMatch(
{'record_type': 'review',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'author_email': 'john_doe@gmail.com',
'company_name': '*independent'},
processed_records[1])
user = {'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['john_doe@gmail.com'],
'companies': [{'company_name': '*independent', 'end_date': 0}]}
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe'))
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe@gmail.com'))
def test_process_blueprint_then_commit(self):
record_processor_inst = self.make_record_processor(
lp_user_name={
'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}},
lp_info={'john_doe@gmail.com':
{'name': 'john_doe', 'display_name': 'John Doe'}})
processed_records = list(record_processor_inst.process([
{'record_type': 'bp',
'id': 'mod:blueprint',
'self_link': 'http://launchpad.net/blueprint',
'owner': 'john_doe',
'date_created': 1234567890},
{'record_type': 'commit',
'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1',
'author_name': 'John Doe',
'author_email': 'john_doe@gmail.com',
'date': 1234567890,
'lines_added': 25,
'lines_deleted': 9,
'release_name': 'havana'}
]))
self.assertRecordsMatch(
{'record_type': 'bpd',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'company_name': '*independent'},
processed_records[0])
self.assertRecordsMatch(
{'record_type': 'commit',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'author_email': 'john_doe@gmail.com',
'company_name': '*independent'},
processed_records[1])
user = {'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['john_doe@gmail.com'],
'companies': [{'company_name': '*independent', 'end_date': 0}]}
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe'))
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe@gmail.com'))
def test_process_review_then_blueprint(self):
record_processor_inst = self.make_record_processor(
lp_user_name={
'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}})
processed_records = list(record_processor_inst.process([
{'record_type': 'review',
'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e',
'subject': 'Fix AttributeError in Keypair._add_details()',
'owner': {'name': 'John Doe',
'email': 'john_doe@gmail.com',
'username': 'john_doe'},
'createdOn': 1379404951,
'module': 'nova'},
{'record_type': 'bp',
'id': 'mod:blueprint',
'self_link': 'http://launchpad.net/blueprint',
'owner': 'john_doe',
'date_created': 1234567890}
]))
self.assertRecordsMatch(
{'record_type': 'review',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'author_email': 'john_doe@gmail.com',
'company_name': '*independent'},
processed_records[0])
self.assertRecordsMatch(
{'record_type': 'bpd',
'launchpad_id': 'john_doe',
'author_name': 'John Doe',
'company_name': '*independent'},
processed_records[1])
user = {'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['john_doe@gmail.com'],
'companies': [{'company_name': '*independent', 'end_date': 0}]}
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe'))
self.assertEquals(user, utils.load_user(
record_processor_inst.runtime_storage_inst, 'john_doe@gmail.com'))
# update records
def _generate_record_commit(self):
yield {'commit_id': u'0afdc64bfd041b03943ceda7849c4443940b6053',
'lines_added': 9,
'module': u'stackalytics',
'record_type': 'commit',
'message': u'Closes bug 1212953\n\nChange-Id: '
u'I33f0f37b6460dc494abf2520dc109c9893ace9e6\n',
'subject': u'Fixed affiliation of Edgar and Sumit',
'loc': 10,
'user_id': u'john_doe',
'primary_key': u'0afdc64bfd041b03943ceda7849c4443940b6053',
'author_email': u'jdoe@super.no',
'company_name': u'SuperCompany',
'record_id': 6,
'lines_deleted': 1,
'week': 2275,
'blueprint_id': None,
'bug_id': u'1212953',
'files_changed': 1,
'author_name': u'John Doe',
'date': 1376737923,
'launchpad_id': u'john_doe',
'branches': set([u'master']),
'change_id': u'I33f0f37b6460dc494abf2520dc109c9893ace9e6',
'release': u'havana'}
def test_update_record_no_changes(self):
commit_generator = self._generate_record_commit()
release_index = {'0afdc64bfd041b03943ceda7849c4443940b6053': 'havana'}
record_processor_inst = self.make_record_processor(
users=[],
companies=[{'company_name': 'SuperCompany',
'domains': ['super.no']}])
updated = list(record_processor_inst.update(commit_generator,
release_index))
self.assertEquals(0, len(updated))
# mail processing
def test_process_mail(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': 'NEC', 'end_date': 0},
]
}
],
repos=[{"module": "stackalytics"}]
)
processed_commit = list(record_processor_inst.process(
generate_emails(
author_email='johndoe@gmail.com',
author_name='John Doe',
subject='[openstack-dev] [Stackalytics] Configuration files')
))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
'module': 'stackalytics',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_mail_guessed(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': 'NEC', 'end_date': 0},
]
}
],
repos=[{'module': 'nova'}, {'module': 'neutron'}]
)
processed_commit = list(record_processor_inst.process(
generate_emails(
author_email='johndoe@gmail.com',
author_name='John Doe',
subject='[openstack-dev] [Neutron] [Nova] Integration issue')
))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
'module': 'neutron',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_mail_guessed_module_in_body_override(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': 'NEC', 'end_date': 0},
]
}
],
repos=[{'module': 'nova'}, {'module': 'neutron'}]
)
processed_commit = list(record_processor_inst.process(
generate_emails(
author_email='johndoe@gmail.com',
author_name='John Doe',
module='nova',
subject='[openstack-dev] [neutron] Comments/questions on the')
))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
'module': 'neutron',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_mail_guessed_module_in_body(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': 'NEC', 'end_date': 0},
]
}
],
repos=[{'module': 'nova'}, {'module': 'neutron'}]
)
processed_commit = list(record_processor_inst.process(
generate_emails(
author_email='johndoe@gmail.com',
author_name='John Doe',
module='nova',
subject='[openstack-dev] Comments/questions on the')
))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
'module': 'nova',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_process_mail_unmatched(self):
record_processor_inst = self.make_record_processor(
users=[
{
'user_id': 'john_doe',
'launchpad_id': 'john_doe',
'user_name': 'John Doe',
'emails': ['johndoe@gmail.com', 'johndoe@nec.co.jp'],
'companies': [
{'company_name': 'NEC', 'end_date': 0},
]
}
],
repos=[{'module': 'nova'}, {'module': 'neutron'}]
)
processed_commit = list(record_processor_inst.process(
generate_emails(
author_email='johndoe@gmail.com',
author_name='John Doe',
subject='[openstack-dev] Comments/questions on the')
))[0]
expected_commit = {
'launchpad_id': 'john_doe',
'author_email': 'johndoe@gmail.com',
'author_name': 'John Doe',
'company_name': 'NEC',
'module': 'unknown',
}
self.assertRecordsMatch(expected_commit, processed_commit)
def test_get_modules(self):
record_processor_inst = self.make_record_processor()
with mock.patch('stackalytics.processor.utils.load_repos') as patch:
patch.return_value = [{'module': 'nova'},
{'module': 'python-novaclient'},
{'module': 'neutron'}]
modules = record_processor_inst._get_modules()
self.assertEqual(set(['nova', 'neutron']), set(modules))
def assertRecordsMatch(self, expected, actual):
for key, value in expected.iteritems():
self.assertEquals(value, actual[key],
'Values for key %s do not match' % key)
# Helpers
def make_record_processor(self, users=None, companies=None, releases=None,
repos=None, lp_info=None, lp_user_name=None):
rp = record_processor.RecordProcessor(make_runtime_storage(
users=users, companies=companies, releases=releases, repos=repos))
if lp_info is not None:
self.lp_profile_by_email.side_effect = (
lambda x: lp_info.get(x))
if lp_user_name is not None:
self.lp_profile_by_launchpad_id.side_effect = (
lambda x: lp_user_name.get(x))
return rp
def generate_commits(author_name='John Doe', author_email='johndoe@gmail.com',
date=1999999999):
yield {
'record_type': 'commit',
'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1',
'author_name': author_name,
'author_email': author_email,
'date': date,
'lines_added': 25,
'lines_deleted': 9,
'release_name': 'havana',
}
def generate_emails(author_name='John Doe', author_email='johndoe@gmail.com',
date=1999999999, subject='[openstack-dev]', module=None):
yield {
'record_type': 'email',
'message_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1',
'author_name': author_name,
'author_email': author_email,
'date': date,
'subject': subject,
'module': module,
'body': 'lorem ipsum',
}
def make_runtime_storage(users=None, companies=None, releases=None,
repos=None):
runtime_storage_cache = {}
def get_by_key(key):
if key == 'companies':
return _make_companies(companies or [
{"company_name": "*independent", "domains": [""]},
])
elif key == 'users':
return _make_users(users or [])
elif key == 'releases':
return releases or RELEASES
elif key == 'repos':
return repos or REPOS
else:
return runtime_storage_cache.get(key)
def set_by_key(key, value):
runtime_storage_cache[key] = value
rs = mock.Mock(runtime_storage.RuntimeStorage)
rs.get_by_key = mock.Mock(side_effect=get_by_key)
rs.set_by_key = mock.Mock(side_effect=set_by_key)
if users:
for user in users:
set_by_key('user:%s' % user['user_id'], user)
if user.get('launchpad_id'):
set_by_key('user:%s' % user['launchpad_id'], user)
for email in user.get('emails') or []:
set_by_key('user:%s' % email, user)
return rs
def _make_users(users):
users_index = {}
for user in users:
if 'user_id' in user:
users_index[user['user_id']] = user
if 'launchpad_id' in user:
users_index[user['launchpad_id']] = user
for email in user['emails']:
users_index[email] = user
return users_index
def _make_companies(companies):
domains_index = {}
for company in companies:
for domain in company['domains']:
domains_index[domain] = company['company_name']
return domains_index
| apache-2.0 |
TimYi/django | django/contrib/gis/maps/google/gmap.py | 526 | 9223 | from __future__ import unicode_literals
from django.conf import settings
from django.contrib.gis.maps.google.overlays import (
GMarker, GPolygon, GPolyline,
)
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL = 'http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException(
'Google Maps API Key not found (try adding '
'GOOGLE_MAPS_API_KEY to your settings).'
)
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None:
zoom = 4
self.zoom = zoom
if center is None:
center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom': self.calc_zoom,
'center': self.center,
'dom_id': self.dom_id,
'js_module': self.js_module,
'kml_urls': self.kml_urls,
'zoom': self.zoom,
'polygons': self.polygons,
'polylines': self.polylines,
'icons': self.icons,
'markers': self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {} {}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{}.{}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{}{}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{}\n <script type="text/javascript">\n//<![CDATA[\n{}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set(marker.icon for marker in self.markers if marker.icon)
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in range(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module': self.js_module,
'dom_ids': self.dom_ids,
'load_map_js': self.load_map_js(),
'icons': self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps:
icons |= map.icons
return icons
| bsd-3-clause |
KarlTDebiec/md_format_converter | AmberTrajOutput.py | 2 | 2950 | # -*- coding: utf-8 -*-
# md_format_converter.AmberTrajOutput.py
#
# Copyright (C) 2012-2016 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Manages addition of Amber output information to segments.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
from .TrajOutput import TrajOutput
################################### CLASSES ###################################
class AmberTrajOutput(TrajOutput):
"""
Manages addition of Amber output information to segments.
"""
def receive_segment(self, **kwargs):
"""
Receives a trajectory segment and sends to each target.
Arugments:
kwargs (dict): Additional keyword arguments
"""
import os
while True:
segment = yield
segment_crd = "{0}/{1:04d}/{1:04d}{2}.crd".format(self.outpath,
int(segment.number), self.suffix)
if not os.path.isfile(segment_crd) or self.force:
segment.outputs.append(
dict(
filename = segment_crd,
format = "crdbox",
selection = self.selection))
for target in self.targets:
target.send(segment)
@staticmethod
def add_subparser(level1_subparser, level2_subparsers, level3_classes):
"""
Adds subparser for this input format to nascent parser.
Arguments:
level1_subparser (Subparser): Level 1 subparser to which level
2 subparser will be added
level2_subparsers (Subparsers): Nascent collection of level 2
subparsers to which level 2 subparser will be added
level3_classes (list): Classes for which level 3 subparsers
will be added
Returns:
(*Subparser*, *Subparsers*): New level 2 subparser and
associated collection of level 3 subparsers
"""
level2_subparser = level2_subparsers.add_parser(
name = "amber",
usage = "convert.py {0} amber".format(level1_subparser.name),
help = "Amber crd text output")
setattr(level2_subparser, "name", "amber")
level3_subparsers = level2_subparser.add_subparsers(
title = "Converter")
for level3_class in level3_classes:
level3_subparser = level3_class.add_subparser(level1_subparser,
level2_subparser, level3_subparsers)
arg_groups = {ag.title: ag
for ag in level3_subparser._action_groups}
AmberTrajOutput.add_shared_args(level3_subparser)
level3_subparser.set_defaults(output_coroutine=AmberTrajOutput)
return level2_subparser, level3_subparsers
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.