repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
lipis/hurry-app | main/lib/werkzeug/wrappers.py | 298 | 76131 | # -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri, url_join
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, \
ClosingIterator, get_input_stream, get_content_length
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange, iter_multi_items
from werkzeug._internal import _get_environ
from werkzeug._compat import to_bytes, string_types, text_type, \
integer_types, wsgi_decoding_dance, wsgi_get_bytes, \
to_unicode, to_native, BytesIO
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, string_types):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
def _assert_not_shallow(request):
if request.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
def _iter_encoded(iterable, charset):
for item in iterable:
if isinstance(item, text_type):
yield item.encode(charset)
else:
yield item
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
#: Optionally a list of hosts that is trusted by this request. By default
#: all hosts are trusted which means that whatever the client sends the
#: host is will be accepted. This is the recommended setup as a webserver
#: should manually be set up to not route invalid hosts to the application.
#:
#: .. versionadded:: 0.9
trusted_hosts = None
#: Indicates weather the data descriptor should be allowed to read and
#: buffer up the input stream. By default it's enabled.
#:
#: .. versionadded:: 0.9
disable_data_descriptor = False
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument and the
request object will be closed automatically::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
def application(*args):
request = cls(args[-2])
with request:
return f(*args[:-2] + (request,))(*args[-2:])
return update_wrapper(application, f)
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method carries content. As of
Werkzeug 0.9 this will be the case if a content type is transmitted.
.. versionadded:: 0.8
"""
return bool(self.environ.get('CONTENT_TYPE'))
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards. You can also call this method to
force the parsing of the form data.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'form' in self.__dict__:
return
_assert_not_shallow(self)
if self.want_form_data_parsed:
content_type = self.environ.get('CONTENT_TYPE', '')
content_length = get_content_length(self.environ)
mimetype, options = parse_options_header(content_type)
parser = self.make_form_data_parser()
data = parser.parse(self._get_stream_for_parsing(),
mimetype, content_length, options)
else:
data = (self.stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
def _get_stream_for_parsing(self):
"""This is the same as accessing :attr:`stream` with the difference
that if it finds cached data from calling :meth:`get_data` first it
will create a new stream out of the cached data.
.. versionadded:: 0.9.3
"""
cached_data = getattr(self, '_cached_data', None)
if cached_data is not None:
return BytesIO(cached_data)
return self.stream
def close(self):
"""Closes associated resources of this request object. This
closes all file handles explicitly. You can also use the request
object in a with statement with will automatically close it.
.. versionadded:: 0.9
"""
files = self.__dict__.get('files')
for key, value in iter_multi_items(files or ()):
value.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
@cached_property
def stream(self):
"""The stream to read incoming data from. Unlike :attr:`input_stream`
this stream is properly guarded that you can't accidentally read past
the length of the input. Werkzeug will internally always refer to
this stream to read data which makes it possible to wrap this
object with a stream that does filtering.
.. versionchanged:: 0.9
This stream is now always available but might be consumed by the
form parser later on. Previously the stream was only set if no
parsing happened.
"""
_assert_not_shallow(self)
return get_input_stream(self.environ)
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(wsgi_get_bytes(self.environ.get('QUERY_STRING', '')),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
if self.disable_data_descriptor:
raise AttributeError('data descriptor is disabled')
# XXX: this should eventually be deprecated.
# We trigger form data parsing first which means that the descriptor
# will not cache the data that would otherwise be .form or .files
# data. This restores the behavior that was there in Werkzeug
# before 0.9. New code should use :meth:`get_data` explicitly as
# this will make behavior explicit.
return self.get_data(parse_form_data=True)
def get_data(self, cache=True, as_text=False, parse_form_data=False):
"""This reads the buffered incoming data from the client into one
bytestring. By default this is cached but that behavior can be
changed by setting `cache` to `False`.
Usually it's a bad idea to call this method without checking the
content length first as a client could send dozens of megabytes or more
to cause memory problems on the server.
Note that if the form data was already parsed this method will not
return anything as form data parsing does not cache the data like
this method does. To implicitly invoke form data parsing function
set `parse_form_data` to `True`. When this is done the return value
of this method will be an empty string if the form parser handles
the data. This generally is not necessary as if the whole data is
cached (which is the default) the form parser will used the cached
data to parse the form data. Please be generally aware of checking
the content length first in any case before calling this method
to avoid exhausting server memory.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
rv = getattr(self, '_cached_data', None)
if rv is None:
if parse_form_data:
self._load_form_data()
rv = self.stream.read()
if cache:
self._cached_data = rv
if as_text:
rv = rv.decode(self.charset, self.encoding_errors)
return rv
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
self.encoding_errors,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
raw_path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return '/' + raw_path.lstrip('/')
@cached_property
def full_path(self):
"""Requested path as unicode, including the query string."""
return self.path + u'?' + to_unicode(self.query_string, self.url_charset)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
raw_path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return raw_path.rstrip('/')
@cached_property
def url(self):
"""The reconstructed current URL"""
return get_current_url(self.environ,
trusted_hosts=self.trusted_hosts)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application root."""
return get_current_url(self.environ, True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host_url(self):
"""Just the host with scheme."""
return get_current_url(self.environ, host_only=True,
trusted_hosts=self.trusted_hosts)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ, trusted_hosts=self.trusted_hosts)
query_string = environ_property('QUERY_STRING', '', read_only=True,
load_func=wsgi_get_bytes, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/`, the charset parameter of the response
object is appended to it. In contrast the `content_type` parameter is
always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, integer_types):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, (text_type, bytes, bytearray)):
self.set_data(response)
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = to_native(value)
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
self._status = '0 %s' % self._status
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def get_data(self, as_text=False):
"""The string representation of the request body. Whenever you call
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
If `as_text` is set to `True` the return value will be a decoded
unicode string.
.. versionadded:: 0.9
"""
self._ensure_sequence()
rv = b''.join(self.iter_encoded())
if as_text:
rv = rv.decode(self.charset)
return rv
def set_data(self, value):
"""Sets a new string as response. The value set must either by a
unicode or bytestring. If a unicode string is set it's encoded
automatically to the charset of the response (utf-8 by default).
.. versionadded:: 0.9
"""
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, text_type):
value = value.encode(self.charset)
else:
value = bytes(value)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(get_data, set_data, doc='''
A descriptor that calls :meth:`get_data` and :meth:`set_data`. This
should not be used and will eventually get deprecated.
''')
def calculate_content_length(self):
"""Returns the content length if available or `None` otherwise."""
try:
self._ensure_sequence()
except RuntimeError:
return None
return sum(len(x) for x in self.response)
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if self.direct_passthrough:
raise RuntimeError('Attempted implicit sequence conversion '
'but the response object is in direct '
'passthrough mode.')
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
"""
charset = self.charset
if __debug__:
_warn_if_string(self.response)
# Encode in a separate function so that self.response is fetched
# early. This allows us to wrap the response with the return
# value from get_app_iter or iter_encoded.
return _iter_encoded(self.response, self.charset)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except (TypeError, AttributeError):
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible. You can also use the object
in a with statement which will automatically close it.
.. versionadded:: 0.9
Can now be used in a with statement.
"""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
self.close()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == u'location':
location = value
elif ikey == u'content-location':
content_location = value
elif ikey == u'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, text_type):
location = iri_to_uri(location)
if self.autocorrect_location_header:
current_url = get_current_url(environ, root_only=True)
if isinstance(current_url, text_type):
current_url = iri_to_uri(current_url)
location = url_join(current_url, location)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, text_type):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = u'0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(to_bytes(x, 'ascii')) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
iterable = ()
elif self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
else:
iterable = self.iter_encoded()
return ClosingIterator(iterable, self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_wsgi_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class StreamOnlyMixin(object):
"""If mixed in before the request object this will change the bahavior
of it to disable handling of form parsing. This disables the
:attr:`files`, :attr:`form` attributes and will just provide a
:attr:`stream` attribute that however is always available.
.. versionadded:: 0.9
"""
disable_data_descriptor = True
want_form_data_parsed = False
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
length = self.calculate_content_length()
if length is not None:
self.headers['Content-Length'] = length
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.get_data()), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, string_types):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
@cached_property
def content_length(self):
"""The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.
"""
return get_content_length(self.environ)
content_encoding = environ_property('HTTP_CONTENT_ENCODING', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.
.. versionadded:: 0.9''')
content_md5 = environ_property('HTTP_CONTENT_MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
.. versionadded:: 0.9''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, string_types):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class PlainRequest(StreamOnlyMixin, Request):
"""A request object without special form parsing capabilities.
.. versionadded:: 0.9
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
| mit |
Hazelsuko07/17WarmingUp | py3.6/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| mit |
bdfoster/blumate | blumate/components/media_player/firetv.py | 1 | 5532 | """
Support for functionality to interact with FireTV devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.firetv/
"""
import logging
import requests
from blumate.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_SET, MediaPlayerDevice)
from blumate.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, STATE_STANDBY,
STATE_UNKNOWN)
SUPPORT_FIRETV = SUPPORT_PAUSE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_VOLUME_SET
DOMAIN = 'firetv'
DEVICE_LIST_URL = 'http://{0}/devices/list'
DEVICE_STATE_URL = 'http://{0}/devices/state/{1}'
DEVICE_ACTION_URL = 'http://{0}/devices/action/{1}/{2}'
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the FireTV platform."""
host = config.get('host', 'localhost:5556')
device_id = config.get('device', 'default')
try:
response = requests.get(DEVICE_LIST_URL.format(host)).json()
if device_id in response['devices'].keys():
add_devices([
FireTVDevice(
host,
device_id,
config.get('name', 'Amazon Fire TV')
)
])
_LOGGER.info(
'Device %s accessible and ready for control', device_id)
else:
_LOGGER.warning(
'Device %s is not registered with firetv-server', device_id)
except requests.exceptions.RequestException:
_LOGGER.error('Could not connect to firetv-server at %s', host)
class FireTV(object):
"""The firetv-server client.
Should a native Python 3 ADB module become available, python-firetv can
support Python 3, it can be added as a dependency, and this class can be
dispensed of.
For now, it acts as a client to the firetv-server HTTP server (which must
be running via Python 2).
"""
def __init__(self, host, device_id):
"""Initialize the FireTV server."""
self.host = host
self.device_id = device_id
@property
def state(self):
"""Get the device state. An exception means UNKNOWN state."""
try:
response = requests.get(
DEVICE_STATE_URL.format(
self.host,
self.device_id
)
).json()
return response.get('state', STATE_UNKNOWN)
except requests.exceptions.RequestException:
_LOGGER.error(
'Could not retrieve device state for %s', self.device_id)
return STATE_UNKNOWN
def action(self, action_id):
"""Perform an action on the device."""
try:
requests.get(
DEVICE_ACTION_URL.format(
self.host,
self.device_id,
action_id
)
)
except requests.exceptions.RequestException:
_LOGGER.error(
'Action request for %s was not accepted for device %s',
action_id, self.device_id)
class FireTVDevice(MediaPlayerDevice):
"""Representation of an Amazon Fire TV device on the network."""
# pylint: disable=abstract-method
def __init__(self, host, device, name):
"""Initialize the FireTV device."""
self._firetv = FireTV(host, device)
self._name = name
self._state = STATE_UNKNOWN
@property
def name(self):
"""Return the device name."""
return self._name
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
return SUPPORT_FIRETV
@property
def state(self):
"""Return the state of the player."""
return self._state
def update(self):
"""Get the latest date and update device state."""
self._state = {
'idle': STATE_IDLE,
'off': STATE_OFF,
'play': STATE_PLAYING,
'pause': STATE_PAUSED,
'standby': STATE_STANDBY,
'disconnected': STATE_UNKNOWN,
}.get(self._firetv.state, STATE_UNKNOWN)
def turn_on(self):
"""Turn on the device."""
self._firetv.action('turn_on')
def turn_off(self):
"""Turn off the device."""
self._firetv.action('turn_off')
def media_play(self):
"""Send play command."""
self._firetv.action('media_play')
def media_pause(self):
"""Send pause command."""
self._firetv.action('media_pause')
def media_play_pause(self):
"""Send play/pause command."""
self._firetv.action('media_play_pause')
def volume_up(self):
"""Send volume up command."""
self._firetv.action('volume_up')
def volume_down(self):
"""Send volume down command."""
self._firetv.action('volume_down')
def media_previous_track(self):
"""Send previous track command (results in rewind)."""
self._firetv.action('media_previous')
def media_next_track(self):
"""Send next track command (results in fast-forward)."""
self._firetv.action('media_next')
| mit |
mckerrj/ansible | lib/ansible/playbook/become.py | 63 | 4030 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Become:
# Privilege escalation
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_flags = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
def _detect_privilege_escalation_conflict(self, ds):
# Fail out if user specifies conflicting privilege escalations
has_become = 'become' in ds or 'become_user'in ds
has_sudo = 'sudo' in ds or 'sudo_user' in ds
has_su = 'su' in ds or 'su_user' in ds
if has_become:
msg = 'The become params ("become", "become_user") and'
if has_sudo:
raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
elif has_su:
raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
elif has_sudo and has_su:
raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
def _preprocess_data_become(self, ds):
"""Preprocess the playbook data for become attributes
This is called from the Base object's preprocess_data() method which
in turn is called pretty much anytime any sort of playbook object
(plays, tasks, blocks, etc) is created.
"""
self._detect_privilege_escalation_conflict(ds)
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
if 'sudo' in ds:
ds['become'] = ds['sudo']
del ds['sudo']
if 'sudo_user' in ds:
ds['become_user'] = ds['sudo_user']
del ds['sudo_user']
display.deprecated("Instead of sudo/sudo_user, use become/become_user and make sure become_method is 'sudo' (default)")
elif 'su' in ds or 'su_user' in ds:
ds['become_method'] = 'su'
if 'su' in ds:
ds['become'] = ds['su']
del ds['su']
if 'su_user' in ds:
ds['become_user'] = ds['su_user']
del ds['su_user']
display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
return ds
def set_become_defaults(self, become, become_method, become_user):
''' if we are becoming someone else, but some fields are unset,
make sure they're initialized to the default config values '''
if become:
if become_method is None:
become_method = C.DEFAULT_BECOME_METHOD
if become_user is None:
become_user = C.DEFAULT_BECOME_USER
| gpl-3.0 |
VaneCloud/horizon | openstack_dashboard/dashboards/project/vpn/views.py | 16 | 17540 | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.vpn \
import forms as vpn_forms
from openstack_dashboard.dashboards.project.vpn import tables as vpn_tables
from openstack_dashboard.dashboards.project.vpn import tabs as vpn_tabs
from openstack_dashboard.dashboards.project.vpn \
import workflows as vpn_workflows
import re
class IndexView(tabs.TabView):
tab_group_class = vpn_tabs.VPNTabs
template_name = 'project/vpn/index.html'
page_title = _("Virtual Private Network")
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
m = re.search('.delete([a-z]+)', action).group(1)
if obj_ids == []:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if m == 'vpnservice':
for obj_id in obj_ids:
try:
api.vpn.vpnservice_delete(request, obj_id)
messages.success(request,
_('Deleted VPN Service %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete VPN Service: %s')
% e)
elif m == 'ikepolicy':
for obj_id in obj_ids:
try:
api.vpn.ikepolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IKE Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IKE Policy: %s') % e)
elif m == 'ipsecpolicy':
for obj_id in obj_ids:
try:
api.vpn.ipsecpolicy_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete IPSec Policy: %s')
% e)
elif m == 'ipsecsiteconnection':
for obj_id in obj_ids:
try:
api.vpn.ipsecsiteconnection_delete(request, obj_id)
messages.success(request,
_('Deleted IPSec Site Connection %s')
% obj_id)
except Exception as e:
exceptions.handle(
request,
_('Unable to delete IPSec Site Connection: %s') % e)
return self.get(request, *args, **kwargs)
class AddVPNServiceView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddVPNService
def get_initial(self):
initial = super(AddVPNServiceView, self).get_initial()
return initial
class AddIPSecSiteConnectionView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecSiteConnection
def get_initial(self):
initial = super(AddIPSecSiteConnectionView, self).get_initial()
return initial
class AddIKEPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIKEPolicy
def get_initial(self):
initial = super(AddIKEPolicyView, self).get_initial()
return initial
class AddIPSecPolicyView(workflows.WorkflowView):
workflow_class = vpn_workflows.AddIPSecPolicy
def get_initial(self):
initial = super(AddIPSecPolicyView, self).get_initial()
return initial
class IKEPolicyDetailsView(tabs.TabView):
tab_group_class = vpn_tabs.IKEPolicyDetailsTabs
template_name = 'project/vpn/details_tabs.html'
page_title = _("Virtual Private Network")
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['ikepolicy_id']
try:
return api.vpn.ikepolicy_get(self.request, pid)
except Exception:
msg = _('Unable to retrieve IKE Policy details.')
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
def get_context_data(self, **kwargs):
context = super(IKEPolicyDetailsView, self).get_context_data(**kwargs)
ikepolicy = self.get_data()
table = vpn_tables.IKEPoliciesTable(self.request)
context["ikepolicy"] = ikepolicy
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(ikepolicy)
return context
def get_tabs(self, request, *args, **kwargs):
ikepolicy = self.get_data()
return self.tab_group_class(request, ikepolicy=ikepolicy, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:vpn:index')
class IPSecPolicyDetailsView(tabs.TabView):
tab_group_class = vpn_tabs.IPSecPolicyDetailsTabs
template_name = 'project/vpn/details_tabs.html'
@memoized.memoized_method
def get_data(self):
pid = self.kwargs['ipsecpolicy_id']
try:
return api.vpn.ipsecpolicy_get(self.request, pid)
except Exception:
msg = _('Unable to retrieve IPSec Policy details.')
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
def get_context_data(self, **kwargs):
context = super(IPSecPolicyDetailsView, self).get_context_data(
**kwargs)
ipsecpolicy = self.get_data()
table = vpn_tables.IPSecPoliciesTable(self.request)
context["ipsecpolicy"] = ipsecpolicy
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(ipsecpolicy)
return context
def get_tabs(self, request, *args, **kwargs):
ipsecpolicy = self.get_data()
return self.tab_group_class(request, ipsecpolicy=ipsecpolicy, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:vpn:index')
class VPNServiceDetailsView(tabs.TabView):
tab_group_class = vpn_tabs.VPNServiceDetailsTabs
template_name = 'project/vpn/details_tabs.html'
@memoized.memoized_method
def get_data(self):
sid = self.kwargs['vpnservice_id']
try:
vpnservice = api.vpn.vpnservice_get(self.request, sid)
except Exception:
vpnservice = []
msg = _('Unable to retrieve VPN Service details.')
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
try:
connections = api.vpn.ipsecsiteconnection_list(
self.request, vpnservice_id=sid)
vpnservice.vpnconnections = connections
except Exception:
vpnservice.vpnconnections = []
return vpnservice
def get_context_data(self, **kwargs):
context = super(VPNServiceDetailsView, self).get_context_data(**kwargs)
vpnservice = self.get_data()
table = vpn_tables.VPNServicesTable(self.request)
context["vpnservice"] = vpnservice
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(vpnservice)
return context
def get_tabs(self, request, *args, **kwargs):
vpnservice = self.get_data()
return self.tab_group_class(request, vpnservice=vpnservice, **kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:vpn:index')
class IPSecSiteConnectionDetailsView(tabs.TabView):
tab_group_class = vpn_tabs.IPSecSiteConnectionDetailsTabs
template_name = 'project/vpn/details_tabs.html'
@memoized.memoized_method
def get_data(self):
cid = self.kwargs['ipsecsiteconnection_id']
try:
return api.vpn.ipsecsiteconnection_get(self.request, cid)
except Exception:
msg = _('Unable to retrieve IPSec Site Connection details.')
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
def get_context_data(self, **kwargs):
context = super(IPSecSiteConnectionDetailsView, self).get_context_data(
**kwargs)
ipsecsiteconnection = self.get_data()
table = vpn_tables.IPSecSiteConnectionsTable(self.request)
context["ipsecsiteconnection"] = ipsecsiteconnection
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(ipsecsiteconnection)
return context
def get_tabs(self, request, *args, **kwargs):
ipsecsiteconnection = self.get_data()
return self.tab_group_class(request,
ipsecsiteconnection=ipsecsiteconnection,
**kwargs)
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:vpn:index')
class UpdateVPNServiceView(forms.ModalFormView):
form_class = vpn_forms.UpdateVPNService
form_id = "update_vpnservice_form"
modal_header = _("Edit VPN Service")
template_name = "project/vpn/update_vpnservice.html"
context_object_name = 'vpnservice'
submit_label = _("Save Changes")
submit_url = "horizon:project:vpn:update_vpnservice"
success_url = reverse_lazy("horizon:project:vpn:index")
page_title = _("Edit VPN Service")
def get_context_data(self, **kwargs):
context = super(UpdateVPNServiceView, self).get_context_data(**kwargs)
context["vpnservice_id"] = self.kwargs['vpnservice_id']
args = (self.kwargs['vpnservice_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
vpnservice_id = self.kwargs['vpnservice_id']
try:
return api.vpn.vpnservice_get(self.request, vpnservice_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve VPN Service details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
vpnservice = self._get_object()
return {'name': vpnservice['name'],
'vpnservice_id': vpnservice['id'],
'description': vpnservice['description'],
'admin_state_up': vpnservice['admin_state_up']}
class UpdateIKEPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIKEPolicy
form_id = "update_ikepolicy_form"
modal_header = _("Edit IKE Policy")
template_name = "project/vpn/update_ikepolicy.html"
context_object_name = 'ikepolicy'
submit_label = _("Save Changes")
submit_url = "horizon:project:vpn:update_ikepolicy"
success_url = reverse_lazy("horizon:project:vpn:index")
page_title = _("Edit IKE Policy")
def get_context_data(self, **kwargs):
context = super(UpdateIKEPolicyView, self).get_context_data(**kwargs)
context["ikepolicy_id"] = self.kwargs['ikepolicy_id']
args = (self.kwargs['ikepolicy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ikepolicy_id = self.kwargs['ikepolicy_id']
try:
return api.vpn.ikepolicy_get(self.request, ikepolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IKE Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ikepolicy = self._get_object()
return {'name': ikepolicy['name'],
'ikepolicy_id': ikepolicy['id'],
'description': ikepolicy['description'],
'auth_algorithm': ikepolicy['auth_algorithm'],
'encryption_algorithm': ikepolicy['encryption_algorithm'],
'ike_version': ikepolicy['ike_version'],
'lifetime_units': ikepolicy['lifetime']['units'],
'lifetime_value': ikepolicy['lifetime']['value'],
'pfs': ikepolicy['pfs'],
'phase1_negotiation_mode': ikepolicy[
'phase1_negotiation_mode']}
class UpdateIPSecPolicyView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecPolicy
form_id = "update_ipsecpolicy_form"
modal_header = _("Edit IPSec Policy")
template_name = "project/vpn/update_ipsecpolicy.html"
context_object_name = 'ipsecpolicy'
submit_label = _("Save Changes")
submit_url = "horizon:project:vpn:update_ipsecpolicy"
success_url = reverse_lazy("horizon:project:vpn:index")
page_title = _("Edit IPSec Policy")
def get_context_data(self, **kwargs):
context = super(UpdateIPSecPolicyView, self).get_context_data(**kwargs)
context["ipsecpolicy_id"] = self.kwargs['ipsecpolicy_id']
args = (self.kwargs['ipsecpolicy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
ipsecpolicy_id = self.kwargs['ipsecpolicy_id']
try:
return api.vpn.ipsecpolicy_get(self.request, ipsecpolicy_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Policy details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecpolicy = self._get_object()
return {'name': ipsecpolicy['name'],
'ipsecpolicy_id': ipsecpolicy['id'],
'description': ipsecpolicy['description'],
'auth_algorithm': ipsecpolicy['auth_algorithm'],
'encapsulation_mode': ipsecpolicy['encapsulation_mode'],
'encryption_algorithm': ipsecpolicy['encryption_algorithm'],
'lifetime_units': ipsecpolicy['lifetime']['units'],
'lifetime_value': ipsecpolicy['lifetime']['value'],
'pfs': ipsecpolicy['pfs'],
'transform_protocol': ipsecpolicy['transform_protocol']}
class UpdateIPSecSiteConnectionView(forms.ModalFormView):
form_class = vpn_forms.UpdateIPSecSiteConnection
form_id = "update_ipsecsiteconnection_form"
modal_header = _("Edit IPSec Site Connection")
template_name = "project/vpn/update_ipsecsiteconnection.html"
context_object_name = 'ipsecsiteconnection'
submit_label = _("Save Changes")
submit_url = "horizon:project:vpn:update_ipsecsiteconnection"
success_url = reverse_lazy("horizon:project:vpn:index")
page_title = _("Edit IPSec Site Connection")
def get_context_data(self, **kwargs):
context = super(
UpdateIPSecSiteConnectionView, self).get_context_data(**kwargs)
context["ipsecsiteconnection_id"] = self.kwargs[
'ipsecsiteconnection_id']
args = (self.kwargs['ipsecsiteconnection_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
connection_id = self.kwargs['ipsecsiteconnection_id']
try:
return api.vpn.ipsecsiteconnection_get(self.request, connection_id)
except Exception as e:
redirect = self.success_url
msg = _('Unable to retrieve IPSec Site Connection details. %s') % e
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
ipsecsiteconnection = self._get_object()
return {'name': ipsecsiteconnection['name'],
'ipsecsiteconnection_id': ipsecsiteconnection['id'],
'description': ipsecsiteconnection['description'],
'peer_address': ipsecsiteconnection['peer_address'],
'peer_id': ipsecsiteconnection['peer_id'],
'peer_cidrs': ", ".join(ipsecsiteconnection['peer_cidrs']),
'psk': ipsecsiteconnection['psk'],
'mtu': ipsecsiteconnection['mtu'],
'dpd_action': ipsecsiteconnection['dpd']['action'],
'dpd_interval': ipsecsiteconnection['dpd']['interval'],
'dpd_timeout': ipsecsiteconnection['dpd']['timeout'],
'initiator': ipsecsiteconnection['initiator'],
'admin_state_up': ipsecsiteconnection['admin_state_up']}
| apache-2.0 |
0x46616c6b/ansible | lib/ansible/utils/module_docs_fragments/sros.py | 40 | 2972 | #
# (c) 2015, Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the remote
device.
required: false
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
required: false
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
required: false
default: null
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
require: false
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
required: false
provider:
description:
- Convenience argument that allows connection arguments to be passed as
a dict object. These include C(host), C(port), C(username), C(password),
C(ssh_keyfile), and C(timeout). All constraints (required, choices,
etc) must be met either by individual arguments or values in this dict.
required: false
default: null
"""
| gpl-3.0 |
teltek/edx-platform | lms/djangoapps/bulk_email/tests/test_err_handling.py | 4 | 17945 | # -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
import json
from itertools import cycle
from smtplib import SMTPConnectError, SMTPDataError, SMTPServerDisconnected
import ddt
from celery.states import RETRY, SUCCESS
from django.conf import settings
from django.core.management import call_command
from django.urls import reverse
from django.db import DatabaseError
from mock import Mock, patch
from opaque_keys.edx.locator import CourseLocator
from six import text_type
from bulk_email.models import SEND_TO_MYSELF, BulkEmailFlag, CourseEmail
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from lms.djangoapps.instructor_task.exceptions import DuplicateTaskException
from lms.djangoapps.instructor_task.models import InstructorTask
from lms.djangoapps.instructor_task.subtasks import (
MAX_DATABASE_LOCK_RETRIES,
SubtaskStatus,
check_subtask_is_valid,
initialize_subtask_info,
update_subtask_status
)
from student.tests.factories import AdminFactory, CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@ddt.ddt
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
shard = 1
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(TestEmailErrors, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course.id)})
self.send_mail_url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
self.success_content = {
'course_id': text_type(self.course.id),
'success': True,
}
@classmethod
def setUpClass(cls):
super(TestEmailErrors, cls).setUpClass()
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestEmailErrors, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': '["myself"]',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
# Don't forget to account for the "myself" instructor user
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK - 1)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': '["myself", "staff", "learners"]',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': '["myself"]',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': '["myself"]',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name")
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = CourseLocator("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id}
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name")
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
with self.assertRaisesRegexp(ValueError, 'Course email being sent to unrecognized target: "IDONTEXIST" *'):
email = CourseEmail.create( # pylint: disable=unused-variable
self.course.id,
self.instructor,
["IDONTEXIST"],
"re: subject",
"dummy body goes here"
)
@ddt.data('track', 'cohort')
def test_nonexistent_grouping(self, target_type):
"""
Tests exception when the cohort or course mode doesn't exist
"""
with self.assertRaisesRegexp(ValueError, '.* IDONTEXIST does not exist .*'):
email = CourseEmail.create( # pylint: disable=unused-variable
self.course.id,
self.instructor,
["{}:IDONTEXIST".format(target_type)],
"re: subject",
"dummy body goes here"
)
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail.create(
self.course.id,
self.instructor,
[SEND_TO_MYSELF],
"re: subject",
"dummy body goes here"
)
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id}
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name")
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail.create(
CourseLocator("bogus", "course", "id"),
self.instructor,
[SEND_TO_MYSELF],
"re: subject",
"dummy body goes here"
)
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id}
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name")
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with patch('lms.djangoapps.instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
| agpl-3.0 |
Learningtribes/edx-platform | cms/djangoapps/contentstore/views/tests/test_container_page.py | 158 | 9232 | """
Unit tests for the container page.
"""
import re
import datetime
from pytz import UTC
from mock import patch, Mock
from django.http import Http404
from django.test.client import RequestFactory
from django.utils import http
import contentstore.views.component as views
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
class ContainerPageTestCase(StudioPageTestCase):
"""
Unit tests for the container page.
"""
container_view = 'container_preview'
reorderable_child_view = 'reorderable_container_child_preview'
def setUp(self):
super(ContainerPageTestCase, self).setUp()
self.vertical = self._create_item(self.sequential.location, 'vertical', 'Unit')
self.html = self._create_item(self.vertical.location, "html", "HTML")
self.child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.child_vertical = self._create_item(self.child_container.location, 'vertical', 'Child Vertical')
self.video = self._create_item(self.child_vertical.location, "video", "My Video")
self.store = modulestore()
past = datetime.datetime(1970, 1, 1, tzinfo=UTC)
future = datetime.datetime.now(UTC) + datetime.timedelta(days=1)
self.released_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Private Unit',
start=past)
self.unreleased_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Private Unit',
start=future)
self.released_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Public Unit',
start=past)
self.unreleased_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Public Unit',
start=future)
self.store.publish(self.unreleased_public_vertical.location, self.user.id)
self.store.publish(self.released_public_vertical.location, self.user.id)
def test_container_html(self):
self._test_html_content(
self.child_container,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(self.child_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
def test_container_on_container_html(self):
"""
Create the scenario of an xblock with children (non-vertical) on the container page.
This should create a container page that is a child of another container page.
"""
draft_container = self._create_item(self.child_container.location, "wrapper", "Wrapper")
self._create_item(draft_container.location, "html", "Child HTML")
def test_container_html(xblock):
self._test_html_content(
xblock,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(draft_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>\s*'
r'<a href="/container/{split_test}" class="{classes}">\s*Split Test\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
split_test=re.escape(unicode(self.child_container.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
# Test the draft version of the container
test_container_html(draft_container)
# Now publish the unit and validate again
self.store.publish(self.vertical.location, self.user.id)
draft_container = self.store.get_item(draft_container.location)
test_container_html(draft_container)
def _test_html_content(self, xblock, expected_section_tag, expected_breadcrumbs):
"""
Get the HTML for a container page and verify the section tag is correct
and the breadcrumbs trail is correct.
"""
html = self.get_page_html(xblock)
self.assertIn(expected_section_tag, html)
self.assertRegexpMatches(html, expected_breadcrumbs)
def test_public_container_preview_html(self):
"""
Verify that a public xblock's container preview returns the expected HTML.
"""
published_unit = self.store.publish(self.vertical.location, self.user.id)
published_child_container = self.store.get_item(self.child_container.location)
published_child_vertical = self.store.get_item(self.child_vertical.location)
self.validate_preview_html(published_unit, self.container_view)
self.validate_preview_html(published_child_container, self.container_view)
self.validate_preview_html(published_child_vertical, self.reorderable_child_view)
def test_draft_container_preview_html(self):
"""
Verify that a draft xblock's container preview returns the expected HTML.
"""
self.validate_preview_html(self.vertical, self.container_view)
self.validate_preview_html(self.child_container, self.container_view)
self.validate_preview_html(self.child_vertical, self.reorderable_child_view)
def _create_item(self, parent_location, category, display_name, **kwargs):
"""
creates an item in the module store, without publishing it.
"""
return ItemFactory.create(
parent_location=parent_location,
category=category,
display_name=display_name,
publish_item=False,
user_id=self.user.id,
**kwargs
)
def test_public_child_container_preview_html(self):
"""
Verify that a public container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)
self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.validate_preview_html(empty_child_container, self.reorderable_child_view, can_add=False)
@patch('contentstore.views.component.render_to_response', Mock(return_value=Mock(status_code=200, content='')))
def test_container_page_with_valid_and_invalid_usage_key_string(self):
"""
Check that invalid 'usage_key_string' raises Http404.
"""
request = RequestFactory().get('foo')
request.user = self.user
# Check for invalid 'usage_key_strings'
self.assertRaises(
Http404, views.container_handler,
request,
usage_key_string='i4x://InvalidOrg/InvalidCourse/vertical/static/InvalidContent',
)
# Check 200 response if 'usage_key_string' is correct
response = views.container_handler(
request=request,
usage_key_string=unicode(self.vertical.location)
)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
awkspace/ansible | test/integration/targets/setup_rpm_repo/files/create-repo.py | 27 | 1369 | #!/usr/bin/env python
import sys
from collections import namedtuple
import rpmfluff
RPM = namedtuple('RPM', ['name', 'version', 'release', 'epoch', 'recommends'])
SPECS = [
RPM('foo', '1.0', '1', None, None),
RPM('foo', '1.0', '2', '1', None),
RPM('foo', '1.1', '1', '1', None),
RPM('foo-bar', '1.0', '1', None, None),
RPM('foo-bar', '1.1', '1', None, None),
RPM('bar', '1.0', '1', None, None),
RPM('bar', '1.1', '1', None, None),
RPM('foo-with-weak-dep', '1.0', '1', None, ['foo-weak-dep']),
RPM('foo-weak-dep', '1.0', '1', None, None),
]
def main():
try:
arch = sys.argv[1]
except IndexError:
arch = 'x86_64'
pkgs = []
for spec in SPECS:
pkg = rpmfluff.SimpleRpmBuild(spec.name, spec.version, spec.release, [arch])
pkg.epoch = spec.epoch
if spec.recommends:
# Skip packages that require weak deps but an older version of RPM is being used
if not hasattr(rpmfluff, "can_use_rpm_weak_deps") or not rpmfluff.can_use_rpm_weak_deps():
continue
for recommend in spec.recommends:
pkg.add_recommends(recommend)
pkgs.append(pkg)
repo = rpmfluff.YumRepoBuild(pkgs)
repo.make(arch)
for pkg in pkgs:
pkg.clean()
print(repo.repoDir)
if __name__ == "__main__":
main()
| gpl-3.0 |
timwee/emacs-starter-kit-mr-flip-forked | vendor/rope/rope/refactor/change_signature.py | 59 | 13280 | import copy
import rope.base.exceptions
from rope.base import pyobjects, taskhandle, evaluate, worder, codeanalyze, utils
from rope.base.change import ChangeContents, ChangeSet
from rope.refactor import occurrences, functionutils
class ChangeSignature(object):
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self._set_name_and_pyname()
if self.pyname is None or self.pyname.get_object() is None or \
not isinstance(self.pyname.get_object(), pyobjects.PyFunction):
raise rope.base.exceptions.RefactoringError(
'Change method signature should be performed on functions')
def _set_name_and_pyname(self):
self.name = worder.get_name_at(self.resource, self.offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.primary, self.pyname = evaluate.eval_location2(
this_pymodule, self.offset)
if self.pyname is None:
return
pyobject = self.pyname.get_object()
if isinstance(pyobject, pyobjects.PyClass) and \
'__init__' in pyobject:
self.pyname = pyobject['__init__']
self.name = '__init__'
pyobject = self.pyname.get_object()
self.others = None
if self.name == '__init__' and \
isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
pyclass = pyobject.parent
self.others = (pyclass.get_name(),
pyclass.parent[pyclass.get_name()])
def _change_calls(self, call_changer, in_hierarchy=None, resources=None,
handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Changing signature of <%s>' % self.name)
job_set = handle.create_jobset('Collecting Changes', len(resources))
finder = occurrences.create_finder(
self.pycore, self.name, self.pyname, instance=self.primary,
in_hierarchy=in_hierarchy and self.is_method())
if self.others:
name, pyname = self.others
constructor_finder = occurrences.create_finder(
self.pycore, name, pyname, only_calls=True)
finder = _MultipleFinders([finder, constructor_finder])
for file in resources:
job_set.started_job(file.path)
change_calls = _ChangeCallsInModule(
self.pycore, finder, file, call_changer)
changed_file = change_calls.get_changed_module()
if changed_file is not None:
changes.add_change(ChangeContents(file, changed_file))
job_set.finished_job()
return changes
def get_args(self):
"""Get function arguments.
Return a list of ``(name, default)`` tuples for all but star
and double star arguments. For arguments that don't have a
default, `None` will be used.
"""
return self._definfo().args_with_defaults
def is_method(self):
pyfunction = self.pyname.get_object()
return isinstance(pyfunction.parent, pyobjects.PyClass)
@utils.deprecated('Use `ChangeSignature.get_args()` instead')
def get_definition_info(self):
return self._definfo()
def _definfo(self):
return functionutils.DefinitionInfo.read(self.pyname.get_object())
@utils.deprecated()
def normalize(self):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentNormalizer()])
return self._change_calls(changer)
@utils.deprecated()
def remove(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentRemover(index)])
return self._change_calls(changer)
@utils.deprecated()
def add(self, index, name, default=None, value=None):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentAdder(index, name, default, value)])
return self._change_calls(changer)
@utils.deprecated()
def inline_default(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentDefaultInliner(index)])
return self._change_calls(changer)
@utils.deprecated()
def reorder(self, new_ordering):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentReorderer(new_ordering)])
return self._change_calls(changer)
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle)
class _FunctionChangers(object):
def __init__(self, pyfunction, definition_info, changers=None):
self.pyfunction = pyfunction
self.definition_info = definition_info
self.changers = changers
self.changed_definition_infos = self._get_changed_definition_infos()
def _get_changed_definition_infos(self):
result = []
definition_info = self.definition_info
result.append(definition_info)
for changer in self.changers:
definition_info = copy.deepcopy(definition_info)
changer.change_definition_info(definition_info)
result.append(definition_info)
return result
def change_definition(self, call):
return self.changed_definition_infos[-1].to_string()
def change_call(self, primary, pyname, call):
call_info = functionutils.CallInfo.read(
primary, pyname, self.definition_info, call)
mapping = functionutils.ArgumentMapping(self.definition_info, call_info)
for definition_info, changer in zip(self.changed_definition_infos, self.changers):
changer.change_argument_mapping(definition_info, mapping)
return mapping.to_call_info(self.changed_definition_infos[-1]).to_string()
class _ArgumentChanger(object):
def change_definition_info(self, definition_info):
pass
def change_argument_mapping(self, definition_info, argument_mapping):
pass
class ArgumentNormalizer(_ArgumentChanger):
pass
class ArgumentRemover(_ArgumentChanger):
def __init__(self, index):
self.index = index
def change_definition_info(self, call_info):
if self.index < len(call_info.args_with_defaults):
del call_info.args_with_defaults[self.index]
elif self.index == len(call_info.args_with_defaults) and \
call_info.args_arg is not None:
call_info.args_arg = None
elif (self.index == len(call_info.args_with_defaults) and
call_info.args_arg is None and call_info.keywords_arg is not None) or \
(self.index == len(call_info.args_with_defaults) + 1 and
call_info.args_arg is not None and call_info.keywords_arg is not None):
call_info.keywords_arg = None
def change_argument_mapping(self, definition_info, mapping):
if self.index < len(definition_info.args_with_defaults):
name = definition_info.args_with_defaults[0]
if name in mapping.param_dict:
del mapping.param_dict[name]
class ArgumentAdder(_ArgumentChanger):
def __init__(self, index, name, default=None, value=None):
self.index = index
self.name = name
self.default = default
self.value = value
def change_definition_info(self, definition_info):
for pair in definition_info.args_with_defaults:
if pair[0] == self.name:
raise rope.base.exceptions.RefactoringError(
'Adding duplicate parameter: <%s>.' % self.name)
definition_info.args_with_defaults.insert(self.index,
(self.name, self.default))
def change_argument_mapping(self, definition_info, mapping):
if self.value is not None:
mapping.param_dict[self.name] = self.value
class ArgumentDefaultInliner(_ArgumentChanger):
def __init__(self, index):
self.index = index
self.remove = False
def change_definition_info(self, definition_info):
if self.remove:
definition_info.args_with_defaults[self.index] = \
(definition_info.args_with_defaults[self.index][0], None)
def change_argument_mapping(self, definition_info, mapping):
default = definition_info.args_with_defaults[self.index][1]
name = definition_info.args_with_defaults[self.index][0]
if default is not None and name not in mapping.param_dict:
mapping.param_dict[name] = default
class ArgumentReorderer(_ArgumentChanger):
def __init__(self, new_order, autodef=None):
"""Construct an `ArgumentReorderer`
Note that the `new_order` is a list containing the new
position of parameters; not the position each parameter
is going to be moved to. (changed in ``0.5m4``)
For example changing ``f(a, b, c)`` to ``f(c, a, b)``
requires passing ``[2, 0, 1]`` and *not* ``[1, 2, 0]``.
The `autodef` (automatic default) argument, forces rope to use
it as a default if a default is needed after the change. That
happens when an argument without default is moved after
another that has a default value. Note that `autodef` should
be a string or `None`; the latter disables adding automatic
default.
"""
self.new_order = new_order
self.autodef = autodef
def change_definition_info(self, definition_info):
new_args = list(definition_info.args_with_defaults)
for new_index, index in enumerate(self.new_order):
new_args[new_index] = definition_info.args_with_defaults[index]
seen_default = False
for index, (arg, default) in enumerate(list(new_args)):
if default is not None:
seen_default = True
if seen_default and default is None and self.autodef is not None:
new_args[index] = (arg, self.autodef)
definition_info.args_with_defaults = new_args
class _ChangeCallsInModule(object):
def __init__(self, pycore, occurrence_finder, resource, call_changer):
self.pycore = pycore
self.occurrence_finder = occurrence_finder
self.resource = resource
self.call_changer = call_changer
def get_changed_module(self):
word_finder = worder.Worder(self.source)
change_collector = codeanalyze.ChangeCollector(self.source)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
if not occurrence.is_called() and not occurrence.is_defined():
continue
start, end = occurrence.get_primary_range()
begin_parens, end_parens = word_finder.get_word_parens_range(end - 1)
if occurrence.is_called():
primary, pyname = occurrence.get_primary_and_pyname()
changed_call = self.call_changer.change_call(
primary, pyname, self.source[start:end_parens])
else:
changed_call = self.call_changer.change_definition(
self.source[start:end_parens])
if changed_call is not None:
change_collector.add_change(start, end_parens, changed_call)
return change_collector.get_changed()
@property
@utils.saveit
def pymodule(self):
return self.pycore.resource_to_pyobject(self.resource)
@property
@utils.saveit
def source(self):
if self.resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def lines(self):
return self.pymodule.lines
class _MultipleFinders(object):
def __init__(self, finders):
self.finders = finders
def find_occurrences(self, resource=None, pymodule=None):
all_occurrences = []
for finder in self.finders:
all_occurrences.extend(finder.find_occurrences(resource, pymodule))
all_occurrences.sort(self._cmp_occurrences)
return all_occurrences
def _cmp_occurrences(self, o1, o2):
return cmp(o1.get_primary_range(), o2.get_primary_range())
| gpl-3.0 |
lheadjh/MultimodalDeepLearning | ver_tensorflow/MM_RDN.py | 1 | 8138 | import tensorflow as tf
import time
import numpy as np
import mdl_data
import sys
GPUNUM = sys.argv[1]
FILEPATH = sys.argv[2]
# Network Parameters
learning_rate = 0.001
training_epochs = 20
batch_size = 32
display_step = 1
n_input_img = 4096 # YLI_MED image data input (data shape: 4096, fc7 layer output)
n_hidden_1_img = 1000 # 1st layer num features 1000
n_hidden_2_img = 600 # 2nd layer num features 600
n_input_aud = 100
n_steps_aud = 20 # YLI_MED audio data input (data shape: 2000, mfcc output)
n_hidden_1_aud = 300 # 1st layer num features 1000
#n_hidden_2_aud = 600 # 2nd layer num features 600
n_hidden_1_in = 600
n_hidden_1_out = 256
n_hidden_2_out = 128
n_classes = 10 # YLI_MED total classes (0-9 digits)
dropout = 1#0.75
with tf.device('/gpu:' + GPUNUM):
#-------------------------------Struct Graph
# tf Graph input
x_aud = tf.placeholder("float", [None, n_steps_aud, n_input_aud])
x_img = tf.placeholder("float", [None, n_input_img])
y = tf.placeholder("float", [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create model
def Multimodal(_X_aud, _X_img, _w_aud, _b_aud, _w_img, _b_img, _w_out, _b_out, _dropout):
#------------------------------------aud
# Permuting batch_size and n_steps
_X_aud = tf.transpose(_X_aud, [1, 0, 2])
# Reshape to (n_steps*batch_size, n_input)
_X_aud = tf.reshape(_X_aud, [-1, n_input_aud])
# Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
_X_aud = tf.split(0, n_steps_aud, _X_aud)
# Define lstm cells with tensorflow
# Forward direction cell
lstm_fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_1_aud, forget_bias=1.0)
# Backward direction cell
lstm_bw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_1_aud, forget_bias=1.0)
# Get lstm cell output
try:
aud_outputs, _, _ = tf.nn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X_aud, dtype=tf.float32)
except Exception: # Old TensorFlow version only returns outputs not states
aud_outputs = tf.nn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X_aud, dtype=tf.float32)
#------------------------------------Image
img_layer_1 = tf.nn.relu(tf.add(tf.matmul(_X_img, _w_img['h1']), _b_img['b1'])) #Hidden layer with RELU activation
drop_1 = tf.nn.dropout(img_layer_1, _dropout)
img_layer_2 = tf.nn.relu(tf.add(tf.matmul(drop_1, _w_img['h2']), _b_img['b2'])) #Hidden layer with RELU activation
drop_2 = tf.nn.dropout(img_layer_2, _dropout)
#img_out = tf.matmul(drop_2, _w_img['out']) + _b_img['out']
facmat = tf.nn.relu(tf.add(aud_outputs[-1], drop_2))
#out_drop = tf.nn.dropout(merge_sum, _dropout)
out_layer_1 = tf.nn.relu(tf.add(tf.matmul(facmat, _w_out['h1']), _b_out['b1'])) #Hidden layer with RELU activation
out_layer_2 = tf.nn.relu(tf.add(tf.matmul(out_layer_1, _w_out['h2']), _b_out['b2'])) #Hidden layer with RELU activation
#return out_drop
return tf.matmul(out_layer_2, _w_out['out']) + _b_out['out']
# Store layers weight & bias
w_out = {
'h1': tf.Variable(tf.random_normal([n_hidden_1_in, n_hidden_1_out])),
'h2': tf.Variable(tf.random_normal([n_hidden_1_out, n_hidden_2_out])),
'out': tf.Variable(tf.random_normal([n_hidden_2_out, n_classes]))
}
b_out = {
'b1': tf.Variable(tf.random_normal([n_hidden_1_out])),
'b2': tf.Variable(tf.random_normal([n_hidden_2_out])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
w_aud = {
'h1': tf.Variable(tf.random_normal([n_input_aud, 2*n_hidden_1_aud])),
#'h2': tf.Variable(tf.random_normal([n_hidden_1_aud, n_hidden_2_aud])),
'out': tf.Variable(tf.random_normal([2*n_hidden_1_aud, n_classes]))
}
b_aud = {
'b1': tf.Variable(tf.random_normal([2*n_hidden_1_aud])),
#'b2': tf.Variable(tf.random_normal([n_hidden_2_aud])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
w_img = {
'h1': tf.Variable(tf.random_normal([n_input_img, n_hidden_1_img])),
'h2': tf.Variable(tf.random_normal([n_hidden_1_img, n_hidden_2_img])),
'out': tf.Variable(tf.random_normal([n_hidden_2_img, n_classes]))
}
b_img = {
'b1': tf.Variable(tf.random_normal([n_hidden_1_img])),
'b2': tf.Variable(tf.random_normal([n_hidden_2_img])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = Multimodal(x_aud, x_img, w_aud, b_aud, w_img, b_img, w_out, b_out, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
# Initializing the variables
init = tf.initialize_all_variables()
'''
-------------------------------
Load data
-------------------------------
'''
#Source reference: https://github.com/aymericdamien/TensorFlow-Examples.git/input_data.py
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# Load data
data = mdl_data.YLIMED('YLIMED_info.csv', FILEPATH + '/YLIMED150924/audio/mfcc20', FILEPATH + '/YLIMED150924/keyframe/fc7')
X_img_train = data.get_img_X_train()
X_aud_train = data.get_aud_X_train()
y_train = data.get_y_train()
Y_train = dense_to_one_hot(y_train)
# Shuffle initial data
p = np.random.permutation(len(Y_train))
X_img_train = X_img_train[p]
X_aud_train = X_aud_train[p]
Y_train = Y_train[p]
# Load test data
X_img_test = data.get_img_X_test()
X_aud_test = data.get_aud_X_test()
X_aud_test = X_aud_test.reshape((-1, n_steps_aud, n_input_aud))
y_test = data.get_y_test()
Y_test = dense_to_one_hot(y_test)
'''
-------------------------------
Launch the graph
-------------------------------
'''
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess:
sess.run(init)
#Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(len(Y_train)/batch_size)
#Loop oveer all batches
for i in range(total_batch):
batch_x_aud, batch_x_img, batch_ys, finish = data.next_batch_multi(X_aud_train, X_img_train, Y_train, batch_size, len(Y_train))
batch_x_aud = batch_x_aud.reshape((batch_size, n_steps_aud, n_input_aud))
# Fit traning using batch data
sess.run(optimizer, feed_dict = {x_aud: batch_x_aud, x_img: batch_x_img, y: batch_ys, keep_prob: dropout})
# Compute average loss
avg_cost += sess.run(cost, feed_dict = {x_aud: batch_x_aud, x_img: batch_x_img, y: batch_ys, keep_prob: 1.}) / total_batch
#Shuffling
if finish:
p = np.random.permutation(len(Y_train))
X_aud_train = X_aud_train[p]
X_img_train = X_img_train[p]
Y_train = Y_train[p]
# Display logs per epoch step
if epoch % display_step == 0:
print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)
print "Optimization Finished!"
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x_aud: X_aud_test, x_img: X_img_test, y: Y_test, keep_prob: 1.})
print 'MM_RDN.py'
| mit |
Inspq/ansible | lib/ansible/modules/cloud/amazon/ec2_lc_find.py | 27 | 7251 | #!/usr/bin/python
# encoding: utf-8
# (c) 2015, Jose Armesto <jose@armesto.net>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ec2_lc_find
short_description: Find AWS Autoscaling Launch Configurations
description:
- Returns list of matching Launch Configurations for a given name, along with other useful information
- Results can be sorted and sliced
- It depends on boto
- Based on the work by Tom Bamford (https://github.com/tombamford)
version_added: "2.2"
author: "Jose Armesto (@fiunchinho)"
options:
region:
description:
- The AWS region to use.
required: true
aliases: ['aws_region', 'ec2_region']
name_regex:
description:
- A Launch Configuration to match
- It'll be compiled as regex
required: True
sort_order:
description:
- Order in which to sort results.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
limit:
description:
- How many results to show.
- Corresponds to Python slice notation like list[:limit].
default: null
required: false
requirements:
- "python >= 2.6"
- boto3
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the Launch Configurations that start with "app"
- ec2_lc_find:
name_regex: app.*
sort_order: descending
limit: 2
'''
RETURN = '''
image_id:
description: AMI id
returned: when Launch Configuration was found
type: string
sample: "ami-0d75df7e"
user_data:
description: User data used to start instance
returned: when Launch Configuration was found
type: string
user_data: "ZXhwb3J0IENMT1VE"
name:
description: Name of the AMI
returned: when Launch Configuration was found
type: string
sample: "myapp-v123"
arn:
description: Name of the AMI
returned: when Launch Configuration was found
type: string
sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
instance_type:
description: Type of ec2 instance
returned: when Launch Configuration was found
type: string
sample: "t2.small"
created_time:
description: When it was created
returned: when Launch Configuration was found
type: string
sample: "2016-06-29T14:59:22.222000+00:00"
ebs_optimized:
description: Launch Configuration EBS optimized property
returned: when Launch Configuration was found
type: boolean
sample: False
instance_monitoring:
description: Launch Configuration instance monitoring property
returned: when Launch Configuration was found
type: string
sample: {"Enabled": false}
classic_link_vpc_security_groups:
description: Launch Configuration classic link vpc security groups property
returned: when Launch Configuration was found
type: list
sample: []
block_device_mappings:
description: Launch Configuration block device mappings property
returned: when Launch Configuration was found
type: list
sample: []
keyname:
description: Launch Configuration ssh key
returned: when Launch Configuration was found
type: string
sample: mykey
security_groups:
description: Launch Configuration security groups
returned: when Launch Configuration was found
type: list
sample: []
kernel_id:
description: Launch Configuration kernel to use
returned: when Launch Configuration was found
type: string
sample: ''
ram_disk_id:
description: Launch Configuration ram disk property
returned: when Launch Configuration was found
type: string
sample: ''
associate_public_address:
description: Assign public address or not
returned: when Launch Configuration was found
type: boolean
sample: True
...
'''
def find_launch_configs(client, module):
name_regex = module.params.get('name_regex')
sort_order = module.params.get('sort_order')
limit = module.params.get('limit')
paginator = client.get_paginator('describe_launch_configurations')
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 1000,
'PageSize': 100
}
)
results = []
for response in response_iterator:
response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
response['LaunchConfigurations'])
for lc in response['LaunchConfigurations']:
data = {
'name': lc['LaunchConfigurationName'],
'arn': lc['LaunchConfigurationARN'],
'created_time': lc['CreatedTime'],
'user_data': lc['UserData'],
'instance_type': lc['InstanceType'],
'image_id': lc['ImageId'],
'ebs_optimized': lc['EbsOptimized'],
'instance_monitoring': lc['InstanceMonitoring'],
'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
'block_device_mappings': lc['BlockDeviceMappings'],
'keyname': lc['KeyName'],
'security_groups': lc['SecurityGroups'],
'kernel_id': lc['KernelId'],
'ram_disk_id': lc['RamdiskId'],
'associate_public_address': lc.get('AssociatePublicIpAddress', False),
}
results.append(data)
results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
if limit:
results = results[:int(limit)]
module.exit_json(changed=False, results=results)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region=dict(required=True, aliases=['aws_region', 'ec2_region']),
name_regex=dict(required=True),
sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
limit=dict(required=False, type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
find_launch_configs(client, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sjsrey/pysal_core | pysal_core/io/IOHandlers/tests/test_arcgis_txt.py | 2 | 2310 | import unittest
from ..arcgis_txt import ArcGISTextIO
from ...FileIO import FileIO as psopen
from .... import examples as pysal_examples
import tempfile
import os
import warnings
class test_ArcGISTextIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal_examples.get_path('arcgis_txt.txt')
self.obj = ArcGISTextIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
w = self.obj.read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
self.assertEqual(3, w.n)
self.assertEqual(2.0, w.mean_neighbors)
self.assertEqual([0.1, 0.05], w[2].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
def test_write(self):
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
w = self.obj.read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
f = tempfile.NamedTemporaryFile(
suffix='.txt', dir=pysal_examples.get_path(''))
fname = f.name
f.close()
o = psopen(fname, 'w', 'arcgis_text')
o.write(w)
o.close()
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
wnew = psopen(fname, 'r', 'arcgis_text').read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
palladius/gcloud | packages/gsutil/boto/tests/unit/ec2/test_blockdevicemapping.py | 10 | 4065 | import mock
import unittest
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
class BlockDeviceTypeTests(unittest.TestCase):
def setUp(self):
self.block_device_type = BlockDeviceType()
def check_that_attribute_has_been_set(self, name, value, attribute):
self.block_device_type.endElement(name, value, None)
self.assertEqual(getattr(self.block_device_type, attribute), value)
def test_endElement_sets_correct_attributes_with_values(self):
for arguments in [("volumeId", 1, "volume_id"),
("virtualName", "some name", "ephemeral_name"),
("snapshotId", 1, "snapshot_id"),
("volumeSize", 1, "size"),
("status", "some status", "status"),
("attachTime", 1, "attach_time"),
("somethingRandom", "somethingRandom", "somethingRandom")]:
self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])
def test_endElement_with_name_NoDevice_value_true(self):
self.block_device_type.endElement("NoDevice", 'true', None)
self.assertEqual(self.block_device_type.no_device, True)
def test_endElement_with_name_NoDevice_value_other(self):
self.block_device_type.endElement("NoDevice", 'something else', None)
self.assertEqual(self.block_device_type.no_device, False)
def test_endElement_with_name_deleteOnTermination_value_true(self):
self.block_device_type.endElement("deleteOnTermination", "true", None)
self.assertEqual(self.block_device_type.delete_on_termination, True)
def test_endElement_with_name_deleteOnTermination_value_other(self):
self.block_device_type.endElement("deleteOnTermination", 'something else', None)
self.assertEqual(self.block_device_type.delete_on_termination, False)
class BlockDeviceMappingTests(unittest.TestCase):
def setUp(self):
self.block_device_mapping = BlockDeviceMapping()
def block_device_type_eq(self, b1, b2):
if isinstance(b1, BlockDeviceType) and isinstance(b2, BlockDeviceType):
return all([b1.connection == b2.connection,
b1.ephemeral_name == b2.ephemeral_name,
b1.no_device == b2.no_device,
b1.volume_id == b2.volume_id,
b1.snapshot_id == b2.snapshot_id,
b1.status == b2.status,
b1.attach_time == b2.attach_time,
b1.delete_on_termination == b2.delete_on_termination,
b1.size == b2.size])
def test_startElement_with_name_ebs_sets_and_returns_current_value(self):
retval = self.block_device_mapping.startElement("ebs", None, None)
assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
def test_startElement_with_name_virtualName_sets_and_returns_current_value(self):
retval = self.block_device_mapping.startElement("virtualName", None, None)
assert self.block_device_type_eq(retval, BlockDeviceType(self.block_device_mapping))
def test_endElement_with_name_device_sets_current_name(self):
self.block_device_mapping.endElement("device", "/dev/null", None)
self.assertEqual(self.block_device_mapping.current_name, "/dev/null")
def test_endElement_with_name_device_sets_current_name(self):
self.block_device_mapping.endElement("deviceName", "some device name", None)
self.assertEqual(self.block_device_mapping.current_name, "some device name")
def test_endElement_with_name_item_sets_current_name_key_to_current_value(self):
self.block_device_mapping.current_name = "some name"
self.block_device_mapping.current_value = "some value"
self.block_device_mapping.endElement("item", "some item", None)
self.assertEqual(self.block_device_mapping["some name"], "some value")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
pgmillon/ansible | lib/ansible/modules/utilities/logic/async_wrapper.py | 17 | 11406 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import shlex
import shutil
import os
import subprocess
import sys
import traceback
import signal
import time
import syslog
import multiprocessing
from ansible.module_utils._text import to_text
PY3 = sys.version_info[0] == 3
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# pipe for communication between forked process and parent
ipc_watcher, ipc_notifier = multiprocessing.Pipe()
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment (does not chdir / to keep the directory context the same as for non async tasks)
os.setsid()
os.umask(int('022', 8))
# do second fork
try:
pid = os.fork()
if pid > 0:
# print "Daemon PID %d" % pid
sys.exit(0)
except OSError:
e = sys.exc_info()[1]
sys.exit("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
dev_null = open('/dev/null', 'w')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
# NB: this function copied from module_utils/json_utils.py. Ensure any changes are propagated there.
# FUTURE: AnsibleModule-ify this module so it's Ansiballz-compatible and can use the module_utils copy of this function.
def _filter_non_json_lines(data):
'''
Used to filter unrelated output around module JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
Filters leading lines before first line-starting occurrence of '{' or '[', and filter all
trailing lines after matching close character (working from the bottom of output).
'''
warnings = []
# Filter initial junk
lines = data.splitlines()
for start, line in enumerate(lines):
line = line.strip()
if line.startswith(u'{'):
endchar = u'}'
break
elif line.startswith(u'['):
endchar = u']'
break
else:
raise ValueError('No start of json char found')
# Filter trailing junk
lines = lines[start:]
for reverse_end_offset, line in enumerate(reversed(lines)):
if line.strip().endswith(endchar):
break
else:
raise ValueError('No end of json char found')
if reverse_end_offset > 0:
# Trailing junk is uncommon and can point to things the user might
# want to change. So print a warning if we find any
trailing_junk = lines[len(lines) - reverse_end_offset:]
warnings.append('Module invocation had junk after the JSON data: %s' % '\n'.join(trailing_junk))
lines = lines[:(len(lines) - reverse_end_offset)]
return ('\n'.join(lines), warnings)
def _get_interpreter(module_path):
module_fd = open(module_path, 'rb')
try:
head = module_fd.read(1024)
if head[0:2] != '#!':
return None
return head[2:head.index('\n')].strip().split(' ')
finally:
module_fd.close()
def _run_module(wrapped_cmd, jid, job_path):
tmp_job_path = job_path + ".tmp"
jobfile = open(tmp_job_path, "w")
jobfile.write(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid}))
jobfile.close()
os.rename(tmp_job_path, job_path)
jobfile = open(tmp_job_path, "w")
result = {}
# signal grandchild process started and isolated from being terminated
# by the connection being closed sending a signal to the job group
ipc_notifier.send(True)
ipc_notifier.close()
outdata = ''
filtered_outdata = ''
stderr = ''
try:
cmd = shlex.split(wrapped_cmd)
# call the module interpreter directly (for non-binary modules)
# this permits use of a script for an interpreter on non-Linux platforms
interpreter = _get_interpreter(cmd[0])
if interpreter:
cmd = interpreter + cmd
script = subprocess.Popen(cmd, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(outdata, stderr) = script.communicate()
if PY3:
outdata = outdata.decode('utf-8', 'surrogateescape')
stderr = stderr.decode('utf-8', 'surrogateescape')
(filtered_outdata, json_warnings) = _filter_non_json_lines(outdata)
result = json.loads(filtered_outdata)
if json_warnings:
# merge JSON junk warnings with any existing module warnings
module_warnings = result.get('warnings', [])
if not isinstance(module_warnings, list):
module_warnings = [module_warnings]
module_warnings.extend(json_warnings)
result['warnings'] = module_warnings
if stderr:
result['stderr'] = stderr
jobfile.write(json.dumps(result))
except (OSError, IOError):
e = sys.exc_info()[1]
result = {
"failed": 1,
"cmd": wrapped_cmd,
"msg": to_text(e),
"outdata": outdata, # temporary notice only
"stderr": stderr
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
except (ValueError, Exception):
result = {
"failed": 1,
"cmd": wrapped_cmd,
"data": outdata, # temporary notice only
"stderr": stderr,
"msg": traceback.format_exc()
}
result['ansible_job_id'] = jid
jobfile.write(json.dumps(result))
jobfile.close()
os.rename(tmp_job_path, job_path)
if __name__ == '__main__':
if len(sys.argv) < 5:
print(json.dumps({
"failed": True,
"msg": "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile> [-preserve_tmp] "
"Humans, do not call directly!"
}))
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
if '-tmp-' not in os.path.dirname(wrapped_module):
preserve_tmp = True
elif len(sys.argv) > 5:
preserve_tmp = sys.argv[5] == '-preserve_tmp'
else:
preserve_tmp = False
# consider underscore as no argsfile so we can support passing of additional positional parameters
if argsfile != '_':
cmd = "%s %s" % (wrapped_module, argsfile)
else:
cmd = wrapped_module
step = 5
async_dir = os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
# setup job output directory
jobdir = os.path.expanduser(async_dir)
job_path = os.path.join(jobdir, jid)
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except Exception:
print(json.dumps({
"failed": 1,
"msg": "could not create: %s" % jobdir
}))
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
# we need to not return immediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
# close off notifier handle in grandparent, probably unnecessary as
# this process doesn't hang around long enough
ipc_notifier.close()
# allow waiting up to 2.5 seconds in total should be long enough for worst
# loaded environment in practice.
retries = 25
while retries > 0:
if ipc_watcher.poll(0.1):
break
else:
retries = retries - 1
continue
notice("Return async_wrapper task started.")
print(json.dumps({"started": 1, "finished": 0, "ansible_job_id": jid, "results_file": job_path,
"_ansible_suppress_tmpdir_delete": not preserve_tmp}))
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# close off the receiving end of the pipe from child process
ipc_watcher.close()
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# close off inherited pipe handles
ipc_watcher.close()
ipc_notifier.close()
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)" % (sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)" % (sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s" % (sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s " % sub_pid)
time.sleep(1)
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
notice("Done in kid B.")
if not preserve_tmp:
shutil.rmtree(os.path.dirname(wrapped_module), True)
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)" % os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)" % os.getpid())
sys.exit(0)
except SystemExit:
# On python2.4, SystemExit is a subclass of Exception.
# This block makes python2.4 behave the same as python2.5+
raise
except Exception:
e = sys.exc_info()[1]
notice("error: %s" % e)
print(json.dumps({
"failed": True,
"msg": "FATAL ERROR: %s" % e
}))
sys.exit(1)
| gpl-3.0 |
ride90/Booktype | lib/booktype/apps/themes/templatetags/themes.py | 4 | 2019 | import os
from django import template
from django.template.base import Template
from django.conf import settings
from ..utils import read_theme_info
register = template.Library()
@register.inclusion_tag('themes/list.html')
def list_themes():
themes = []
for theme in os.listdir('{}/themes/'.format(settings.BOOKTYPE_ROOT)):
if os.path.isdir('{}/themes/{}/'.format(settings.BOOKTYPE_ROOT, theme)):
if os.path.exists('{}/themes/{}/info.json'.format(settings.BOOKTYPE_ROOT, theme)):
info = read_theme_info('{}/themes/{}/info.json'.format(settings.BOOKTYPE_ROOT, theme))
themes.append((theme, info.get('name', '')))
themes.sort()
return {'themes': themes}
@register.inclusion_tag('themes/options.html', takes_context=True)
def list_theme_options(context):
options = []
for theme in os.listdir('{}/themes/'.format(settings.BOOKTYPE_ROOT)):
if os.path.isdir('{}/themes/{}/'.format(settings.BOOKTYPE_ROOT, theme)):
if os.path.exists('{}/themes/{}/panel.html'.format(settings.BOOKTYPE_ROOT, theme)):
f = open('{}/themes/{}/panel.html'.format(settings.BOOKTYPE_ROOT, theme), 'rt')
s = f.read()
f.close()
t = Template(unicode(s, 'utf8'))
content = t.render(context)
options.append({'name': theme, 'content': content})
return {'options': options}
@register.inclusion_tag('themes/preloads.html')
def list_theme_preloads():
from django.contrib.staticfiles.templatetags.staticfiles import static
options = []
for theme in os.listdir('{}/themes/'.format(settings.BOOKTYPE_ROOT)):
if os.path.isdir('{}/themes/{}/'.format(settings.BOOKTYPE_ROOT, theme)):
if os.path.exists('{}/themes/{}/static/preload.css'.format(settings.BOOKTYPE_ROOT, theme)):
options.append(static('themes/{}/preload.css'.format(theme)))
return {'preloads': options, 'DATA_URL': settings.DATA_URL}
| agpl-3.0 |
rave-engine/rave | modules/sdl2/image.py | 1 | 2325 | """
Support for decoding image formats using SDL2_Image.
"""
import sdl2
import sdl2.ext
import sdl2.sdlimage as sdl2image
import rave.log
import rave.events
import rave.rendering
import rave.resources
from .common import fs_to_rwops
## Constants.
FORMAT_NAMES = {
sdl2image.IMG_INIT_JPG: 'JPEG',
sdl2image.IMG_INIT_PNG: 'PNG',
sdl2image.IMG_INIT_TIF: 'TIFF',
sdl2image.IMG_INIT_WEBP: 'WebP'
}
FORMAT_PATTERNS = {
sdl2image.IMG_INIT_JPG: '.jpe?g$',
sdl2image.IMG_INIT_PNG: '.png$',
sdl2image.IMG_INIT_TIF: '.tiff?$',
sdl2image.IMG_INIT_WEBP: '.webp$'
}
## Module API.
def load():
global _formats
_formats = sdl2image.IMG_Init(sum(FORMAT_NAMES))
rave.events.hook('engine.new_game', new_game)
def unload():
sdl2image.IMG_Quit()
## Module stuff.
def new_game(event, game):
for fmt, pattern in FORMAT_PATTERNS.items():
if _formats & fmt:
game.resources.register_loader(ImageLoader, pattern)
_log.debug('Loaded support for {fmt} images.', fmt=FORMAT_NAMES[fmt])
else:
_log.warn('Failed to load support for {fmt} images.', fmt=FORMAT_NAMES[fmt])
class ImageData(rave.resources.ImageData):
__slots__ = ('surface',)
def __init__(self, surface, *args, **kwargs):
super().__init__(*args, **kwargs)
self.surface = surface
def __del__(self):
sdl2.SDL_FreeSurface(self.surface)
def get_data(self, amount=None):
if amount:
return self.surface.contents.pixels[:amount]
return self.surface.contents.pixels
class ImageLoader:
@classmethod
def can_load(cls, path, fd):
return True
@classmethod
def load(cls, path, fd):
handle = fs_to_rwops(fd)
surface = sdl2image.IMG_Load_RW(handle, True)
if not surface:
raise sdl2.ext.SDLError()
pixfmt = sdl2.SDL_AllocFormat(sdl2.SDL_PIXELFORMAT_BGRA8888)
converted = sdl2.SDL_ConvertSurface(surface, pixfmt, 0)
sdl2.SDL_FreeFormat(pixfmt)
sdl2.SDL_FreeSurface(surface)
if not converted:
raise sdl2.ext.SDLError()
return ImageData(converted, converted.contents.w, converted.contents.h, rave.rendering.PixelFormat.FORMAT_BGRA8888)
## Internals.
_log = rave.log.get(__name__)
_formats = 0
| bsd-2-clause |
robertmrk/json | doc/scripts/send_to_wandbox.py | 13 | 3952 | #! /usr/bin/env python
# This script uploads a directory to Wandbox (http://melpon.org/wandbox),
# which is an online compiler environment, and prints a permalink to the
# uploaded code. We use this to provide a "Try it online" version of the
# library to make the barrier to entry as low as possible.
#
# This script was adapted from the script proposed in
# https://github.com/melpon/wandbox/issues/153.
#
# To know how to use this script: ./wandbox.py --help
#
# Copyright Louis Dionne 2015
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
import argparse
import fnmatch
import json
import os
import re
import urllib2
# Strips C and C++ comments from the given string.
#
# Copied from http://stackoverflow.com/a/241506/627587.
def strip_comments(text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " " # note: a space and not an empty string
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
# Post the given JSON data to Wandbox's API, and return the result
# as a JSON object.
def upload(options):
request = urllib2.Request('http://melpon.org/wandbox/api/compile.json')
request.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(request, json.dumps(options))
return json.loads(response.read())
# Returns a list of the '.hpp' headers in the given directory and in
# subdirectories.
#
# The path must be absolute, and the returned paths are all absolute too.
def headers(path):
return [
os.path.join(dir, file)
for (dir, _, files) in os.walk(path)
for file in fnmatch.filter(files, "*.hpp")
]
def main():
parser = argparse.ArgumentParser(description=
"""Upload a directory to Wandbox (http://melpon.org/wandbox).
On success, the program prints a permalink to the uploaded
directory on Wandbox and returns 0. On error, it prints the
response from the Wandbox API and returns 1.
Note that the comments are stripped from all the headers in the
uploaded directory.
"""
)
parser.add_argument('directory', type=str, help=
"""A directory to upload to Wandbox.
The path may be either absolute or relative to the current directory.
However, the names of the files uploaded to Wandbox will all be
relative to this directory. This way, one can easily specify the
directory to be '/some/project/include', and the uploaded files
will be uploaded as-if they were rooted at '/some/project/include'
""")
parser.add_argument('main', type=str, help=
"""The main source file.
The path may be either absolute or relative to the current directory.
"""
)
args = parser.parse_args()
directory = os.path.abspath(args.directory)
if not os.path.exists(directory):
raise Exception("'%s' is not a valid directory" % args.directory)
cpp = os.path.abspath(args.main)
if not os.path.exists(cpp):
raise Exception("'%s' is not a valid file name" % args.main)
response = upload({
'code': open(cpp).read(),
'codes': [{
'file': os.path.relpath(header, directory),
#'code': strip_comments(open(header).read())
'code': open(header).read()
} for header in headers(directory)],
'options': 'boost-nothing,c++11',
'compiler': 'gcc-4.9.2',
'save': True,
'compiler-option-raw': '-I.'
})
if 'status' in response and response['status'] == '0':
print response['url']
return 0
else:
print response
return 1
exit(main())
| mit |
duqiao/django | tests/template_tests/filter_tests/test_join.py | 362 | 2707 | from django.template.defaultfilters import join
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class JoinTests(SimpleTestCase):
@setup({'join01': '{{ a|join:", " }}'})
def test_join01(self):
output = self.engine.render_to_string('join01', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha, beta & me')
@setup({'join02': '{% autoescape off %}{{ a|join:", " }}{% endautoescape %}'})
def test_join02(self):
output = self.engine.render_to_string('join02', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha, beta & me')
@setup({'join03': '{{ a|join:" & " }}'})
def test_join03(self):
output = self.engine.render_to_string('join03', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join04': '{% autoescape off %}{{ a|join:" & " }}{% endautoescape %}'})
def test_join04(self):
output = self.engine.render_to_string('join04', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha & beta & me')
# #11377 Test that joining with unsafe joiners doesn't result in
# unsafe strings
@setup({'join05': '{{ a|join:var }}'})
def test_join05(self):
output = self.engine.render_to_string('join05', {'a': ['alpha', 'beta & me'], 'var': ' & '})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join06': '{{ a|join:var }}'})
def test_join06(self):
output = self.engine.render_to_string('join06', {'a': ['alpha', 'beta & me'], 'var': mark_safe(' & ')})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join07': '{{ a|join:var|lower }}'})
def test_join07(self):
output = self.engine.render_to_string('join07', {'a': ['Alpha', 'Beta & me'], 'var': ' & '})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join08': '{{ a|join:var|lower }}'})
def test_join08(self):
output = self.engine.render_to_string('join08', {'a': ['Alpha', 'Beta & me'], 'var': mark_safe(' & ')})
self.assertEqual(output, 'alpha & beta & me')
class FunctionTests(SimpleTestCase):
def test_list(self):
self.assertEqual(join([0, 1, 2], 'glue'), '0glue1glue2')
def test_autoescape(self):
self.assertEqual(
join(['<a>', '<img>', '</a>'], '<br>'),
'<a><br><img><br></a>',
)
def test_autoescape_off(self):
self.assertEqual(
join(['<a>', '<img>', '</a>'], '<br>', autoescape=False),
'<a><br><img><br></a>',
)
| bsd-3-clause |
qk4l/Flexget | flexget/plugins/list/sonarr_list.py | 2 | 12588 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import urlparse
import json
import logging
from collections import MutableSet
import requests
from requests import RequestException
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('sonarr_list')
class SonarrSet(MutableSet):
supported_ids = ['tvdb_id', 'tvrage_id', 'tvmaze_id', 'imdb_id', 'slug', 'sonarr_id']
schema = {
'type': 'object',
'properties': {
'base_url': {'type': 'string'},
'port': {'type': 'number', 'default': 80},
'api_key': {'type': 'string'},
'include_ended': {'type': 'boolean', 'default': True},
'only_monitored': {'type': 'boolean', 'default': True},
'include_data': {'type': 'boolean', 'default': False},
'search_missing_episodes': {'type': 'boolean', 'default': True},
'ignore_episodes_without_files': {'type': 'boolean', 'default': False},
'ignore_episodes_with_files': {'type': 'boolean', 'default': False}
},
'required': ['api_key', 'base_url'],
'additionalProperties': False
}
def series_request_builder(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series list request')
url = '%s://%s:%s%s/api/series' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def lookup_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received series lookup request')
url = '%s://%s:%s%s/api/series/lookup?term=' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def profile_list_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received profile list request')
url = '%s://%s:%s%s/api/profile' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def rootfolder_request(self, base_url, port, api_key):
parsedurl = urlparse(base_url)
log.debug('Received rootfolder list request')
url = '%s://%s:%s%s/api/Rootfolder' % (parsedurl.scheme, parsedurl.netloc, port, parsedurl.path)
headers = {'X-Api-Key': api_key}
return url, headers
def get_json(self, url, headers):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def post_json(self, url, headers, data):
try:
response = requests.post(url, headers=headers, data=data)
if response.status_code == 201:
return response.json()
else:
raise plugin.PluginError('Invalid response received from Sonarr: %s' % response.content)
except RequestException as e:
raise plugin.PluginError('Unable to connect to Sonarr at %s. Error: %s' % (url, e))
def request_builder(self, base_url, request_type, port, api_key):
if request_type == 'series':
return self.series_request_builder(base_url, port, api_key)
elif request_type == 'profile':
return self.profile_list_request(base_url, port, api_key)
elif request_type == 'lookup':
return self.lookup_request(base_url, port, api_key)
elif request_type == 'rootfolder':
return self.rootfolder_request(base_url, port, api_key)
else:
raise plugin.PluginError('Received unknown API request, aborting.')
def translate_quality(self, quality_name):
"""
Translate Sonnar's qualities to ones recognize by Flexget
"""
if quality_name == 'Raw-HD': # No better match yet in Flexget
return 'remux'
elif quality_name == 'DVD': # No better match yet in Flexget
return 'dvdrip'
else:
return quality_name.replace('-', ' ').lower()
def quality_requirement_builder(self, quality_profile):
allowed_qualities = [self.translate_quality(quality['quality']['name']) for quality in quality_profile['items']
if quality['allowed']]
cutoff = self.translate_quality(quality_profile['cutoff']['name'])
return allowed_qualities, cutoff
def list_entries(self):
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
json = self.get_json(series_url, series_headers)
# Retrieves Sonarr's profile list if include_data is set to true
if self.config.get('include_data'):
profile_url, profile_headers = self.request_builder(self.config.get('base_url'), 'profile',
self.config.get('port'),
self.config['api_key'])
profiles_json = self.get_json(profile_url, profile_headers)
entries = []
for show in json:
fg_qualities = '' # Initializes the quality parameter
fg_cutoff = ''
path = None
if not show['monitored'] and self.config.get(
'only_monitored'): # Checks if to retrieve just monitored shows
continue
if show['status'] == 'ended' and not self.config.get('include_ended'): # Checks if to retrieve ended shows
continue
if self.config.get('include_data') and profiles_json: # Check if to retrieve quality & path
path = show.get('path')
for profile in profiles_json:
if profile['id'] == show['profileId']: # Get show's profile data from all possible profiles
fg_qualities, fg_cutoff = self.quality_requirement_builder(profile)
entry = Entry(title=show['title'],
url='',
series_name=show['title'],
tvdb_id=show.get('tvdbId'),
tvrage_id=show.get('tvRageId'),
tvmaze_id=show.get('tvMazeId'),
imdb_id=show.get('imdbid'),
slug=show.get('titleSlug'),
sonarr_id=show.get('id'),
configure_series_target=fg_cutoff)
if self.config.get('include_data'):
if len(fg_qualities) > 1:
entry['configure_series_qualities'] = fg_qualities
elif len(fg_qualities) == 1:
entry['configure_series_quality'] = fg_qualities[0]
else:
entry['configure_series_quality'] = fg_qualities
if path:
entry['configure_series_path'] = path
if entry.isvalid():
log.debug('returning entry %s', entry)
entries.append(entry)
else:
log.error('Invalid entry created? %s' % entry)
continue
return entries
def add_show(self, entry):
log.debug('searching for show match for %s using Sonarr', entry)
lookup_series_url, lookup_series_headers = self.request_builder(self.config.get('base_url'), 'lookup',
self.config.get('port'), self.config['api_key'])
if entry.get('tvdb_id'):
lookup_series_url += 'tvdb:%s' % entry.get('tvdb_id')
else:
lookup_series_url += entry.get('title')
lookup_results = self.get_json(lookup_series_url, headers=lookup_series_headers)
if not lookup_results:
log.debug('could not find series match to %s', entry)
return
else:
if len(lookup_results) > 1:
log.debug('got multiple results for Sonarr, using first one')
show = lookup_results[0]
log.debug('using show %s', show)
# Getting rootfolder
rootfolder_series_url, rootfolder_series_headers = self.request_builder(self.config.get('base_url'),
'rootfolder', self.config.get('port'),
self.config['api_key'])
rootfolder = self.get_json(rootfolder_series_url, headers=rootfolder_series_headers)
# Setting defaults for Sonarr
show['profileId'] = 1
show['qualityProfileId '] = 1
show['rootFolderPath'] = rootfolder[0]['path']
show['addOptions'] = {"ignoreEpisodesWithFiles": self.config.get('ignore_episodes_with_files'),
"ignoreEpisodesWithoutFiles": self.config.get('ignore_episodes_without_files'),
"searchForMissingEpisodes": self.config.get('search_missing_episodes')}
series_url, series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
log.debug('adding show %s to sonarr', show)
returned_show = self.post_json(series_url, headers=series_headers, data=json.dumps(show))
return returned_show
def remove_show(self, show):
delete_series_url, delete_series_headers = self.request_builder(self.config.get('base_url'), 'series',
self.config.get('port'), self.config['api_key'])
delete_series_url += '/%s' % show.get('sonarr_id')
requests.delete(delete_series_url, headers=delete_series_headers)
@property
def shows(self):
if self._shows is None:
self._shows = self.list_entries()
return self._shows
def _find_entry(self, entry):
for sb_entry in self.shows:
if any(entry.get(id) is not None and entry[id] == sb_entry[id] for id in self.supported_ids):
return sb_entry
if entry.get('title').lower() == sb_entry.get('title').lower():
return sb_entry
def _from_iterable(self, it):
# TODO: is this the right answer? the returned object won't have our custom __contains__ logic
return set(it)
def __init__(self, config):
self.config = config
self._shows = None
def __iter__(self):
return (entry for entry in self.shows)
def __len__(self):
return len(self.shows)
def __contains__(self, entry):
return self._find_entry(entry) is not None
def add(self, entry):
if not self._find_entry(entry):
show = self.add_show(entry)
self._shows = None
log.verbose('Successfully added show %s to Sonarr', show['title'])
else:
log.debug('entry %s already exists in Sonarr list', entry)
def discard(self, entry):
show = self._find_entry(entry)
if not show:
log.debug('Did not find matching show in Sonarr for %s, skipping', entry)
return
self.remove_show(show)
log.verbose('removed show %s from Sonarr', show['title'])
@property
def immutable(self):
return False
@property
def online(self):
""" Set the online status of the plugin, online plugin should be treated differently in certain situations,
like test mode"""
return True
def get(self, entry):
return self._find_entry(entry)
class SonarrList(object):
schema = SonarrSet.schema
@staticmethod
def get_list(config):
return SonarrSet(config)
def on_task_input(self, task, config):
return list(SonarrSet(config))
@event('plugin.register')
def register_plugin():
plugin.register(SonarrList, 'sonarr_list', api_ver=2, interfaces=['task', 'list'])
| mit |
ctb/cvxpy | cvxpy/atoms/sum_squares.py | 3 | 1060 | """
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.norm import norm
from cvxpy.atoms.elementwise.square import square
def sum_squares(expr):
"""The sum of the squares of the entries.
Parameters
----------
expr: Expression
The expression to take the sum of squares of.
Returns
-------
Expression
An expression representing the sum of squares.
"""
return square(norm(expr, "fro"))
| gpl-3.0 |
Dipsomaniac/peewee | playhouse/tests/test_fields.py | 1 | 25031 | import decimal
import sys
from playhouse.tests.base import binary_construct
from playhouse.tests.base import binary_types
from playhouse.tests.base import database_class
from playhouse.tests.base import ModelTestCase
from playhouse.tests.base import PeeweeTestCase
from playhouse.tests.base import test_db
from playhouse.tests.models import *
class TestFieldTypes(ModelTestCase):
requires = [NullModel, BlobModel]
_dt = datetime.datetime
_d = datetime.date
_t = datetime.time
_data = (
('char_field', 'text_field', 'int_field', 'float_field', 'decimal_field1', 'datetime_field', 'date_field', 'time_field'),
('c1', 't1', 1, 1.0, "1.0", _dt(2010, 1, 1), _d(2010, 1, 1), _t(1, 0)),
('c2', 't2', 2, 2.0, "2.0", _dt(2010, 1, 2), _d(2010, 1, 2), _t(2, 0)),
('c3', 't3', 3, 3.0, "3.0", _dt(2010, 1, 3), _d(2010, 1, 3), _t(3, 0)),
)
def setUp(self):
super(TestFieldTypes, self).setUp()
self.field_data = {}
headers = self._data[0]
for row in self._data[1:]:
nm = NullModel()
for i, col in enumerate(row):
attr = headers[i]
self.field_data.setdefault(attr, [])
self.field_data[attr].append(col)
setattr(nm, attr, col)
nm.save()
def assertNM(self, q, exp):
query = NullModel.select().where(q).order_by(NullModel.id)
self.assertEqual([nm.char_field for nm in query], exp)
def test_null_query(self):
NullModel.delete().execute()
nm1 = NullModel.create(char_field='nm1')
nm2 = NullModel.create(char_field='nm2', int_field=1)
nm3 = NullModel.create(char_field='nm3', int_field=2, float_field=3.0)
q = ~(NullModel.int_field >> None)
self.assertNM(q, ['nm2', 'nm3'])
def test_field_types(self):
for field, values in self.field_data.items():
field_obj = getattr(NullModel, field)
self.assertNM(field_obj < values[2], ['c1', 'c2'])
self.assertNM(field_obj <= values[1], ['c1', 'c2'])
self.assertNM(field_obj > values[0], ['c2', 'c3'])
self.assertNM(field_obj >= values[1], ['c2', 'c3'])
self.assertNM(field_obj == values[1], ['c2'])
self.assertNM(field_obj != values[1], ['c1', 'c3'])
self.assertNM(field_obj << [values[0], values[2]], ['c1', 'c3'])
self.assertNM(field_obj << [values[1]], ['c2'])
def test_charfield(self):
NM = NullModel
nm = NM.create(char_field=4)
nm_db = NM.get(NM.id==nm.id)
self.assertEqual(nm_db.char_field, '4')
nm_alpha = NM.create(char_field='Alpha')
nm_bravo = NM.create(char_field='Bravo')
if isinstance(test_db, SqliteDatabase):
# Sqlite's sql-dialect uses "*" as case-sensitive lookup wildcard,
# and pysqlcipher is simply a wrapper around sqlite's engine.
like_wildcard = '*'
else:
like_wildcard = '%'
like_str = '%sA%s' % (like_wildcard, like_wildcard)
ilike_str = '%A%'
case_sens = NM.select(NM.char_field).where(NM.char_field % like_str)
self.assertEqual([x[0] for x in case_sens.tuples()], ['Alpha'])
case_insens = NM.select(NM.char_field).where(NM.char_field ** ilike_str)
self.assertEqual([x[0] for x in case_insens.tuples()], ['Alpha', 'Bravo'])
def test_intfield(self):
nm = NullModel.create(int_field='4')
nm_db = NullModel.get(NullModel.id==nm.id)
self.assertEqual(nm_db.int_field, 4)
def test_floatfield(self):
nm = NullModel.create(float_field='4.2')
nm_db = NullModel.get(NullModel.id==nm.id)
self.assertEqual(nm_db.float_field, 4.2)
def test_decimalfield(self):
D = decimal.Decimal
nm = NullModel()
nm.decimal_field1 = D("3.14159265358979323")
nm.decimal_field2 = D("100.33")
nm.save()
nm_from_db = NullModel.get(NullModel.id==nm.id)
# sqlite doesn't enforce these constraints properly
#self.assertEqual(nm_from_db.decimal_field1, decimal.Decimal("3.14159"))
self.assertEqual(nm_from_db.decimal_field2, D("100.33"))
class TestDecimalModel(TestModel):
df1 = DecimalField(decimal_places=2, auto_round=True)
df2 = DecimalField(decimal_places=2, auto_round=True, rounding=decimal.ROUND_UP)
f1 = TestDecimalModel.df1.db_value
f2 = TestDecimalModel.df2.db_value
self.assertEqual(f1(D('1.2345')), D('1.23'))
self.assertEqual(f2(D('1.2345')), D('1.24'))
def test_boolfield(self):
NullModel.delete().execute()
nmt = NullModel.create(boolean_field=True, char_field='t')
nmf = NullModel.create(boolean_field=False, char_field='f')
nmn = NullModel.create(boolean_field=None, char_field='n')
self.assertNM(NullModel.boolean_field == True, ['t'])
self.assertNM(NullModel.boolean_field == False, ['f'])
self.assertNM(NullModel.boolean_field >> None, ['n'])
def _time_to_delta(self, t):
micro = t.microsecond / 1000000.
return datetime.timedelta(
seconds=(3600 * t.hour) + (60 * t.minute) + t.second + micro)
def test_date_and_time_fields(self):
dt1 = datetime.datetime(2011, 1, 2, 11, 12, 13, 54321)
dt2 = datetime.datetime(2011, 1, 2, 11, 12, 13)
d1 = datetime.date(2011, 1, 3)
t1 = datetime.time(11, 12, 13, 54321)
t2 = datetime.time(11, 12, 13)
td1 = self._time_to_delta(t1)
td2 = self._time_to_delta(t2)
nm1 = NullModel.create(datetime_field=dt1, date_field=d1, time_field=t1)
nm2 = NullModel.create(datetime_field=dt2, time_field=t2)
nmf1 = NullModel.get(NullModel.id==nm1.id)
self.assertEqual(nmf1.date_field, d1)
if isinstance(test_db, MySQLDatabase):
# mysql doesn't store microseconds
self.assertEqual(nmf1.datetime_field, dt2)
self.assertEqual(nmf1.time_field, td2)
else:
self.assertEqual(nmf1.datetime_field, dt1)
self.assertEqual(nmf1.time_field, t1)
nmf2 = NullModel.get(NullModel.id==nm2.id)
self.assertEqual(nmf2.datetime_field, dt2)
if isinstance(test_db, MySQLDatabase):
self.assertEqual(nmf2.time_field, td2)
else:
self.assertEqual(nmf2.time_field, t2)
def test_date_as_string(self):
nm1 = NullModel.create(date_field='2014-01-02')
nm1_db = NullModel.get(NullModel.id == nm1.id)
self.assertEqual(nm1_db.date_field, datetime.date(2014, 1, 2))
def test_various_formats(self):
class FormatModel(Model):
dtf = DateTimeField()
df = DateField()
tf = TimeField()
dtf = FormatModel._meta.fields['dtf']
df = FormatModel._meta.fields['df']
tf = FormatModel._meta.fields['tf']
d = datetime.datetime
self.assertEqual(dtf.python_value('2012-01-01 11:11:11.123456'), d(
2012, 1, 1, 11, 11, 11, 123456
))
self.assertEqual(dtf.python_value('2012-01-01 11:11:11'), d(
2012, 1, 1, 11, 11, 11
))
self.assertEqual(dtf.python_value('2012-01-01'), d(
2012, 1, 1,
))
self.assertEqual(dtf.python_value('2012 01 01'), '2012 01 01')
d = datetime.date
self.assertEqual(df.python_value('2012-01-01 11:11:11.123456'), d(
2012, 1, 1,
))
self.assertEqual(df.python_value('2012-01-01 11:11:11'), d(
2012, 1, 1,
))
self.assertEqual(df.python_value('2012-01-01'), d(
2012, 1, 1,
))
self.assertEqual(df.python_value('2012 01 01'), '2012 01 01')
t = datetime.time
self.assertEqual(tf.python_value('2012-01-01 11:11:11.123456'), t(
11, 11, 11, 123456
))
self.assertEqual(tf.python_value('2012-01-01 11:11:11'), t(
11, 11, 11
))
self.assertEqual(tf.python_value('11:11:11.123456'), t(
11, 11, 11, 123456
))
self.assertEqual(tf.python_value('11:11:11'), t(
11, 11, 11
))
self.assertEqual(tf.python_value('11:11'), t(
11, 11,
))
self.assertEqual(tf.python_value('11:11 AM'), '11:11 AM')
class CustomFormatsModel(Model):
dtf = DateTimeField(formats=['%b %d, %Y %I:%M:%S %p'])
df = DateField(formats=['%b %d, %Y'])
tf = TimeField(formats=['%I:%M %p'])
dtf = CustomFormatsModel._meta.fields['dtf']
df = CustomFormatsModel._meta.fields['df']
tf = CustomFormatsModel._meta.fields['tf']
d = datetime.datetime
self.assertEqual(dtf.python_value('2012-01-01 11:11:11.123456'), '2012-01-01 11:11:11.123456')
self.assertEqual(dtf.python_value('Jan 1, 2012 11:11:11 PM'), d(
2012, 1, 1, 23, 11, 11,
))
d = datetime.date
self.assertEqual(df.python_value('2012-01-01'), '2012-01-01')
self.assertEqual(df.python_value('Jan 1, 2012'), d(
2012, 1, 1,
))
t = datetime.time
self.assertEqual(tf.python_value('11:11:11'), '11:11:11')
self.assertEqual(tf.python_value('11:11 PM'), t(
23, 11
))
def test_blob_field(self):
byte_count = 256
data = ''.join(chr(i) for i in range(256))
blob = BlobModel.create(data=data)
# pull from db and check binary data
res = BlobModel.get(BlobModel.id == blob.id)
self.assertTrue(isinstance(res.data, binary_types))
self.assertEqual(len(res.data), byte_count)
db_data = res.data
binary_data = binary_construct(data)
if db_data != binary_data and sys.version_info[:3] >= (3, 3, 3):
db_data = db_data.tobytes()
self.assertEqual(db_data, binary_data)
# try querying the blob field
binary_data = res.data
# use the string representation
res = BlobModel.get(BlobModel.data == data)
self.assertEqual(res.id, blob.id)
# use the binary representation
res = BlobModel.get(BlobModel.data == binary_data)
self.assertEqual(res.id, blob.id)
def test_between(self):
field = NullModel.int_field
self.assertNM(field.between(1, 2), ['c1', 'c2'])
self.assertNM(field.between(2, 3), ['c2', 'c3'])
self.assertNM(field.between(5, 300), [])
def test_in_(self):
self.assertNM(NullModel.int_field.in_([1, 3]), ['c1', 'c3'])
self.assertNM(NullModel.int_field.in_([2, 5]), ['c2'])
def test_contains(self):
self.assertNM(NullModel.char_field.contains('c2'), ['c2'])
self.assertNM(NullModel.char_field.contains('c'), ['c1', 'c2', 'c3'])
self.assertNM(NullModel.char_field.contains('1'), ['c1'])
def test_startswith(self):
NullModel.create(char_field='ch1')
self.assertNM(NullModel.char_field.startswith('c'), ['c1', 'c2', 'c3', 'ch1'])
self.assertNM(NullModel.char_field.startswith('ch'), ['ch1'])
self.assertNM(NullModel.char_field.startswith('a'), [])
def test_endswith(self):
NullModel.create(char_field='ch1')
self.assertNM(NullModel.char_field.endswith('1'), ['c1', 'ch1'])
self.assertNM(NullModel.char_field.endswith('4'), [])
def test_regexp(self):
values = [
'abcdefg',
'abcd',
'defg',
'gij',
'xx',
]
for value in values:
NullModel.create(char_field=value)
def assertValues(regexp, *expected):
query = NullModel.select().where(
NullModel.char_field.regexp(regexp)).order_by(NullModel.id)
values = [nm.char_field for nm in query]
self.assertEqual(values, list(expected))
assertValues('^ab', 'abcdefg', 'abcd')
assertValues('d', 'abcdefg', 'abcd', 'defg')
assertValues('efg$', 'abcdefg', 'defg')
assertValues('a.+d', 'abcdefg', 'abcd')
def test_concat(self):
if database_class is MySQLDatabase:
if TEST_VERBOSITY > 0:
print_('Skipping `concat` for mysql.')
return
NullModel.create(char_field='foo')
NullModel.create(char_field='bar')
values = (NullModel
.select(
NullModel.char_field.concat('-nuggets').alias('nugs'))
.order_by(NullModel.id)
.dicts())
self.assertEqual(list(values), [
{'nugs': 'c1-nuggets'},
{'nugs': 'c2-nuggets'},
{'nugs': 'c3-nuggets'},
{'nugs': 'foo-nuggets'},
{'nugs': 'bar-nuggets'}])
class TestDateTimeExtract(ModelTestCase):
requires = [NullModel]
test_datetimes = [
datetime.datetime(2001, 1, 2, 3, 4, 5),
datetime.datetime(2002, 2, 3, 4, 5, 6),
# overlap on year and hour with previous
datetime.datetime(2002, 3, 4, 4, 6, 7),
]
datetime_parts = ['year', 'month', 'day', 'hour', 'minute', 'second']
date_parts = datetime_parts[:3]
time_parts = datetime_parts[3:]
def setUp(self):
super(TestDateTimeExtract, self).setUp()
self.nms = []
for dt in self.test_datetimes:
self.nms.append(NullModel.create(
datetime_field=dt,
date_field=dt.date(),
time_field=dt.time()))
def assertDates(self, sq, expected):
sq = sq.tuples().order_by(NullModel.id)
self.assertEqual(list(sq), [(e,) for e in expected])
def assertPKs(self, sq, idxs):
sq = sq.tuples().order_by(NullModel.id)
self.assertEqual(list(sq), [(self.nms[i].id,) for i in idxs])
def test_extract_datetime(self):
self.test_extract_date(NullModel.datetime_field)
self.test_extract_time(NullModel.datetime_field)
def test_extract_date(self, f=None):
if f is None:
f = NullModel.date_field
self.assertDates(NullModel.select(f.year), [2001, 2002, 2002])
self.assertDates(NullModel.select(f.month), [1, 2, 3])
self.assertDates(NullModel.select(f.day), [2, 3, 4])
def test_extract_time(self, f=None):
if f is None:
f = NullModel.time_field
self.assertDates(NullModel.select(f.hour), [3, 4, 4])
self.assertDates(NullModel.select(f.minute), [4, 5, 6])
self.assertDates(NullModel.select(f.second), [5, 6, 7])
def test_extract_datetime_where(self):
f = NullModel.datetime_field
self.test_extract_date_where(f)
self.test_extract_time_where(f)
sq = NullModel.select(NullModel.id)
self.assertPKs(sq.where((f.year == 2002) & (f.month == 2)), [1])
self.assertPKs(sq.where((f.year == 2002) & (f.hour == 4)), [1, 2])
self.assertPKs(sq.where((f.year == 2002) & (f.minute == 5)), [1])
def test_extract_date_where(self, f=None):
if f is None:
f = NullModel.date_field
sq = NullModel.select(NullModel.id)
self.assertPKs(sq.where(f.year == 2001), [0])
self.assertPKs(sq.where(f.year == 2002), [1, 2])
self.assertPKs(sq.where(f.year == 2003), [])
self.assertPKs(sq.where(f.month == 1), [0])
self.assertPKs(sq.where(f.month > 1), [1, 2])
self.assertPKs(sq.where(f.month == 4), [])
self.assertPKs(sq.where(f.day == 2), [0])
self.assertPKs(sq.where(f.day > 2), [1, 2])
self.assertPKs(sq.where(f.day == 5), [])
def test_extract_time_where(self, f=None):
if f is None:
f = NullModel.time_field
sq = NullModel.select(NullModel.id)
self.assertPKs(sq.where(f.hour == 3), [0])
self.assertPKs(sq.where(f.hour == 4), [1, 2])
self.assertPKs(sq.where(f.hour == 5), [])
self.assertPKs(sq.where(f.minute == 4), [0])
self.assertPKs(sq.where(f.minute > 4), [1, 2])
self.assertPKs(sq.where(f.minute == 7), [])
self.assertPKs(sq.where(f.second == 5), [0])
self.assertPKs(sq.where(f.second > 5), [1, 2])
self.assertPKs(sq.where(f.second == 8), [])
class TestUniqueColumnConstraint(ModelTestCase):
requires = [UniqueModel, MultiIndexModel]
def test_unique(self):
uniq1 = UniqueModel.create(name='a')
uniq2 = UniqueModel.create(name='b')
self.assertRaises(Exception, UniqueModel.create, name='a')
test_db.rollback()
def test_multi_index(self):
mi1 = MultiIndexModel.create(f1='a', f2='a', f3='a')
mi2 = MultiIndexModel.create(f1='b', f2='b', f3='b')
self.assertRaises(Exception, MultiIndexModel.create, f1='a', f2='a', f3='b')
test_db.rollback()
self.assertRaises(Exception, MultiIndexModel.create, f1='b', f2='b', f3='a')
test_db.rollback()
mi3 = MultiIndexModel.create(f1='a', f2='b', f3='b')
class TestNonIntegerPrimaryKey(ModelTestCase):
requires = [NonIntModel, NonIntRelModel]
def test_non_int_pk(self):
ni1 = NonIntModel.create(pk='a1', data='ni1')
self.assertEqual(ni1.pk, 'a1')
ni2 = NonIntModel(pk='a2', data='ni2')
ni2.save(force_insert=True)
self.assertEqual(ni2.pk, 'a2')
ni2.save()
self.assertEqual(ni2.pk, 'a2')
self.assertEqual(NonIntModel.select().count(), 2)
ni1_db = NonIntModel.get(NonIntModel.pk=='a1')
self.assertEqual(ni1_db.data, ni1.data)
self.assertEqual([(x.pk, x.data) for x in NonIntModel.select().order_by(NonIntModel.pk)], [
('a1', 'ni1'), ('a2', 'ni2'),
])
def test_non_int_fk(self):
ni1 = NonIntModel.create(pk='a1', data='ni1')
ni2 = NonIntModel.create(pk='a2', data='ni2')
rni11 = NonIntRelModel(non_int_model=ni1)
rni12 = NonIntRelModel(non_int_model=ni1)
rni11.save()
rni12.save()
self.assertEqual([r.id for r in ni1.nr.order_by(NonIntRelModel.id)], [rni11.id, rni12.id])
self.assertEqual([r.id for r in ni2.nr.order_by(NonIntRelModel.id)], [])
rni21 = NonIntRelModel.create(non_int_model=ni2)
self.assertEqual([r.id for r in ni2.nr.order_by(NonIntRelModel.id)], [rni21.id])
sq = NonIntRelModel.select().join(NonIntModel).where(NonIntModel.data == 'ni2')
self.assertEqual([r.id for r in sq], [rni21.id])
class TestPrimaryKeyIsForeignKey(ModelTestCase):
requires = [Job, JobExecutionRecord]
def test_primary_foreign_key(self):
# we have one job, unexecuted, and therefore no executed jobs
job = Job.create(name='Job One')
executed_jobs = Job.select().join(JobExecutionRecord)
self.assertEqual([], list(executed_jobs))
# after execution, we must have one executed job
exec_record = JobExecutionRecord.create(job=job, status='success')
executed_jobs = Job.select().join(JobExecutionRecord)
self.assertEqual([job], list(executed_jobs))
# we must not be able to create another execution record for the job
self.assertRaises(Exception, JobExecutionRecord.create, job=job, status='success')
test_db.rollback()
class TestFieldDatabaseColumn(ModelTestCase):
requires = [DBUser, DBBlog]
def test_select(self):
sq = DBUser.select().where(DBUser.username == 'u1')
self.assertSelect(sq, '"dbuser"."db_user_id", "dbuser"."db_username"', [])
self.assertWhere(sq, '("dbuser"."db_username" = ?)', ['u1'])
sq = DBUser.select(DBUser.user_id).join(DBBlog).where(DBBlog.title == 'b1')
self.assertSelect(sq, '"dbuser"."db_user_id"', [])
self.assertJoins(sq, ['INNER JOIN "dbblog" AS dbblog ON ("dbuser"."db_user_id" = "dbblog"."db_user")'])
self.assertWhere(sq, '("dbblog"."db_title" = ?)', ['b1'])
def test_db_column(self):
u1 = DBUser.create(username='u1')
u2 = DBUser.create(username='u2')
u2_db = DBUser.get(DBUser.user_id==u2._get_pk_value())
self.assertEqual(u2_db.username, 'u2')
b1 = DBBlog.create(user=u1, title='b1')
b2 = DBBlog.create(user=u2, title='b2')
b2_db = DBBlog.get(DBBlog.blog_id==b2._get_pk_value())
self.assertEqual(b2_db.user.user_id, u2.user_id)
self.assertEqual(b2_db.title, 'b2')
self.assertEqual([b.title for b in u2.dbblog_set], ['b2'])
class _SqliteDateTestHelper(PeeweeTestCase):
datetimes = [
datetime.datetime(2000, 1, 2, 3, 4, 5),
datetime.datetime(2000, 2, 3, 4, 5, 6),
]
def create_date_model(self, date_fn):
dp_db = SqliteDatabase(':memory:')
class SqDp(Model):
datetime_field = DateTimeField()
date_field = DateField()
time_field = TimeField()
null_datetime_field = DateTimeField(null=True)
class Meta:
database = dp_db
@classmethod
def date_query(cls, field, part):
return (SqDp
.select(date_fn(field, part))
.tuples()
.order_by(SqDp.id))
SqDp.create_table()
for d in self.datetimes:
SqDp.create(datetime_field=d, date_field=d.date(),
time_field=d.time())
return SqDp
class TestSQLiteDatePart(_SqliteDateTestHelper):
def test_sqlite_date_part(self):
date_fn = lambda field, part: fn.date_part(part, field)
SqDp = self.create_date_model(date_fn)
for part in ('year', 'month', 'day', 'hour', 'minute', 'second'):
for i, dp in enumerate(SqDp.date_query(SqDp.datetime_field, part)):
self.assertEqual(dp[0], getattr(self.datetimes[i], part))
for part in ('year', 'month', 'day'):
for i, dp in enumerate(SqDp.date_query(SqDp.date_field, part)):
self.assertEqual(dp[0], getattr(self.datetimes[i], part))
for part in ('hour', 'minute', 'second'):
for i, dp in enumerate(SqDp.date_query(SqDp.time_field, part)):
self.assertEqual(dp[0], getattr(self.datetimes[i], part))
# ensure that the where clause works
query = SqDp.select().where(fn.date_part('year', SqDp.datetime_field) == 2000)
self.assertEqual(query.count(), 2)
query = SqDp.select().where(fn.date_part('month', SqDp.datetime_field) == 1)
self.assertEqual(query.count(), 1)
query = SqDp.select().where(fn.date_part('month', SqDp.datetime_field) == 3)
self.assertEqual(query.count(), 0)
null_sqdp = SqDp.create(
datetime_field=datetime.datetime.now(),
date_field=datetime.date.today(),
time_field=datetime.time(0, 0),
null_datetime_field=datetime.datetime(2014, 1, 1))
query = SqDp.select().where(
fn.date_part('year', SqDp.null_datetime_field) == 2014)
self.assertEqual(query.count(), 1)
self.assertEqual(list(query), [null_sqdp])
class TestSQLiteDateTrunc(_SqliteDateTestHelper):
def test_sqlite_date_trunc(self):
date_fn = lambda field, part: fn.date_trunc(part, field)
SqDp = self.create_date_model(date_fn)
def assertQuery(field, part, expected):
values = SqDp.date_query(field, part)
self.assertEqual([r[0] for r in values], expected)
assertQuery(SqDp.datetime_field, 'year', ['2000', '2000'])
assertQuery(SqDp.datetime_field, 'month', ['2000-01', '2000-02'])
assertQuery(SqDp.datetime_field, 'day', ['2000-01-02', '2000-02-03'])
assertQuery(SqDp.datetime_field, 'hour', [
'2000-01-02 03', '2000-02-03 04'])
assertQuery(SqDp.datetime_field, 'minute', [
'2000-01-02 03:04', '2000-02-03 04:05'])
assertQuery(SqDp.datetime_field, 'second', [
'2000-01-02 03:04:05', '2000-02-03 04:05:06'])
null_sqdp = SqDp.create(
datetime_field=datetime.datetime.now(),
date_field=datetime.date.today(),
time_field=datetime.time(0, 0),
null_datetime_field=datetime.datetime(2014, 1, 1))
assertQuery(SqDp.null_datetime_field, 'year', [None, None, '2014'])
class TestCheckConstraints(ModelTestCase):
requires = [CheckModel]
def test_check_constraint(self):
CheckModel.create(value=1)
if isinstance(test_db, MySQLDatabase):
# MySQL silently ignores all check constraints.
CheckModel.create(value=0)
else:
with test_db.transaction() as txn:
self.assertRaises(IntegrityError, CheckModel.create, value=0)
txn.rollback()
| mit |
cloudera/hue | desktop/libs/indexer/src/indexer/api3.py | 2 | 25066 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from past.builtins import basestring
import csv
import json
import logging
import urllib.error
import sys
import tempfile
import uuid
from django.urls import reverse
from django.views.decorators.http import require_POST
LOG = logging.getLogger(__name__)
try:
from simple_salesforce.api import Salesforce
from simple_salesforce.exceptions import SalesforceRefusedRequest
except ImportError:
LOG.warning('simple_salesforce module not found')
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.lib.python_util import check_encoding
from desktop.models import Document2
from kafka.kafka_api import get_topics, get_topic_data
from notebook.connectors.base import get_api, Notebook
from notebook.decorators import api_error_handler
from notebook.models import MockedDjangoRequest, escape_rows
from indexer.controller import CollectionManagerController
from indexer.file_format import HiveFormat
from indexer.fields import Field, guess_field_type_from_samples
from indexer.indexers.envelope import _envelope_job
from indexer.indexers.base import get_api
from indexer.indexers.flink_sql import FlinkIndexer
from indexer.indexers.morphline import MorphlineIndexer, _create_solr_collection
from indexer.indexers.phoenix_sql import PhoenixIndexer
from indexer.indexers.rdbms import run_sqoop, _get_api
from indexer.indexers.sql import _create_database, _create_table, _create_table_from_local
from indexer.models import _save_pipeline
from indexer.solr_client import SolrClient, MAX_UPLOAD_SIZE
from indexer.indexers.flume import FlumeIndexer
if sys.version_info[0] > 2:
from io import StringIO as string_io
from urllib.parse import urlparse, unquote as urllib_unquote
from django.utils.translation import gettext as _
else:
from StringIO import StringIO as string_io
from urllib import unquote as urllib_unquote
from urlparse import urlparse
from django.utils.translation import ugettext as _
try:
from beeswax.server import dbms
except ImportError as e:
LOG.warning('Hive and HiveServer2 interfaces are not enabled')
try:
from filebrowser.views import detect_parquet
except ImportError as e:
LOG.warning('File Browser interface is not enabled')
try:
from search.conf import SOLR_URL
except ImportError as e:
LOG.warning('Solr Search interface is not enabled')
def _escape_white_space_characters(s, inverse=False):
MAPPINGS = {
"\n": "\\n",
"\t": "\\t",
"\r": "\\r",
" ": "\\s"
}
to = 1 if inverse else 0
from_ = 0 if inverse else 1
for pair in MAPPINGS.items():
if sys.version_info[0] > 2:
s = s.replace(pair[to], pair[from_])
else:
s = s.replace(pair[to], pair[from_]).encode('utf-8')
return s
def _convert_format(format_dict, inverse=False):
for field in format_dict:
if isinstance(format_dict[field], basestring):
format_dict[field] = _escape_white_space_characters(format_dict[field], inverse)
@api_error_handler
def guess_format(request):
file_format = json.loads(request.POST.get('fileFormat', '{}'))
if file_format['inputFormat'] == 'localfile':
format_ = {
"quoteChar": "\"",
"recordSeparator": '\\n',
"type": "csv",
"hasHeader": True,
"fieldSeparator": ","
}
elif file_format['inputFormat'] == 'file':
path = urllib_unquote(file_format["path"])
indexer = MorphlineIndexer(request.user, request.fs)
if not request.fs.isfile(path):
raise PopupException(_('Path %(path)s is not a file') % file_format)
stream = request.fs.open(path)
format_ = indexer.guess_format({
"file": {
"stream": stream,
"name": path
}
})
_convert_format(format_)
elif file_format['inputFormat'] == 'table':
db = dbms.get(request.user)
try:
table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
except Exception as e:
raise PopupException(e.message if hasattr(e, 'message') and e.message else e)
storage = {}
for delim in table_metadata.storage_details:
if delim['data_type']:
if '=' in delim['data_type']:
key, val = delim['data_type'].split('=', 1)
storage[key] = val
else:
storage[delim['data_type']] = delim['comment']
if table_metadata.details['properties']['format'] == 'text':
format_ = {
"quoteChar": "\"",
"recordSeparator": '\\n',
"type": "csv",
"hasHeader": False,
"fieldSeparator": storage.get('field.delim', ',')
}
elif table_metadata.details['properties']['format'] == 'parquet':
format_ = {"type": "parquet", "hasHeader": False,}
else:
raise PopupException('Hive table format %s is not supported.' % table_metadata.details['properties']['format'])
elif file_format['inputFormat'] == 'query':
format_ = {
"quoteChar": "\"",
"recordSeparator": "\\n",
"type": "csv",
"hasHeader": False,
"fieldSeparator": "\u0001"
}
elif file_format['inputFormat'] == 'rdbms':
format_ = {"type": "csv"}
elif file_format['inputFormat'] == 'stream':
if file_format['streamSelection'] == 'kafka':
format_ = {
"type": "json",
# "fieldSeparator": ",",
# "hasHeader": True,
# "quoteChar": "\"",
# "recordSeparator": "\\n",
'topics': get_topics(request.user)
}
elif file_format['streamSelection'] == 'flume':
format_ = {
"type": "csv",
"fieldSeparator": ",",
"hasHeader": True,
"quoteChar": "\"",
"recordSeparator": "\\n"
}
elif file_format['inputFormat'] == 'connector':
if file_format['connectorSelection'] == 'sfdc':
sf = Salesforce(
username=file_format['streamUsername'],
password=file_format['streamPassword'],
security_token=file_format['streamToken']
)
format_ = {
"type": "csv",
"fieldSeparator": ",",
"hasHeader": True,
"quoteChar": "\"",
"recordSeparator": "\\n",
'objects': [sobject['name'] for sobject in sf.restful('sobjects/')['sobjects'] if sobject['queryable']]
}
else:
raise PopupException(_('Input format %(inputFormat)s connector not recognized: $(connectorSelection)s') % file_format)
else:
raise PopupException(_('Input format not recognized: %(inputFormat)s') % file_format)
format_['status'] = 0
return JsonResponse(format_)
def decode_utf8(input_iterator):
for l in input_iterator:
yield l.decode('utf-8')
def guess_field_types(request):
file_format = json.loads(request.POST.get('fileFormat', '{}'))
if file_format['inputFormat'] == 'localfile':
path = urllib_unquote(file_format['path'])
with open(path, 'r') as local_file:
reader = csv.reader(local_file)
csv_data = list(reader)
if file_format['format']['hasHeader']:
sample = csv_data[1:5]
column_row = csv_data[0]
else:
sample = csv_data[:4]
column_row = ['field_' + str(count+1) for count, col in enumerate(sample[0])]
field_type_guesses = []
for count, col in enumerate(column_row):
column_samples = [sample_row[count] for sample_row in sample if len(sample_row) > count]
field_type_guess = guess_field_type_from_samples(column_samples)
field_type_guesses.append(field_type_guess)
columns = [
Field(column_row[count], field_type_guesses[count]).to_dict()
for count, col in enumerate(column_row)
]
format_ = {
'columns': columns,
'sample': sample
}
elif file_format['inputFormat'] == 'file':
indexer = MorphlineIndexer(request.user, request.fs)
path = urllib_unquote(file_format["path"])
stream = request.fs.open(path)
encoding = check_encoding(stream.read(10000))
LOG.debug('File %s encoding is %s' % (path, encoding))
stream.seek(0)
_convert_format(file_format["format"], inverse=True)
format_ = indexer.guess_field_types({
"file": {
"stream": stream,
"name": path
},
"format": file_format['format']
})
# Note: Would also need to set charset to table (only supported in Hive)
if 'sample' in format_ and format_['sample']:
format_['sample'] = escape_rows(format_['sample'], nulls_only=True, encoding=encoding)
for col in format_['columns']:
col['name'] = smart_unicode(col['name'], errors='replace', encoding=encoding)
elif file_format['inputFormat'] == 'table':
sample = get_api(
request, {'type': 'hive'}).get_sample_data({'type': 'hive'}, database=file_format['databaseName'], table=file_format['tableName']
)
db = dbms.get(request.user)
table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
format_ = {
"sample": sample['rows'][:4],
"columns": [
Field(col.name, HiveFormat.FIELD_TYPE_TRANSLATE.get(col.type, 'string')).to_dict()
for col in table_metadata.cols
]
}
elif file_format['inputFormat'] == 'query':
query_id = file_format['query']['id'] if file_format['query'].get('id') else file_format['query']
notebook = Notebook(document=Document2.objects.document(user=request.user, doc_id=query_id)).get_data()
snippet = notebook['snippets'][0]
db = get_api(request, snippet)
if file_format.get('sampleCols'):
columns = file_format.get('sampleCols')
sample = file_format.get('sample')
else:
snippet['query'] = snippet['statement']
try:
sample = db.fetch_result(notebook, snippet, 4, start_over=True)['rows'][:4]
except Exception as e:
LOG.warning('Skipping sample data as query handle might be expired: %s' % e)
sample = [[], [], [], [], []]
columns = db.autocomplete(snippet=snippet, database='', table='')
columns = [
Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
for col in columns['extended_columns']
]
format_ = {
"sample": sample,
"columns": columns,
}
elif file_format['inputFormat'] == 'rdbms':
api = _get_api(request)
sample = api.get_sample_data(None, database=file_format['rdbmsDatabaseName'], table=file_format['tableName'])
format_ = {
"sample": list(sample['rows'])[:4],
"columns": [
Field(col['name'], col['type']).to_dict()
for col in sample['full_headers']
]
}
elif file_format['inputFormat'] == 'stream':
if file_format['streamSelection'] == 'kafka':
data = get_topic_data(
request.user,
file_format.get('kafkaSelectedTopics')
)
kafkaFieldNames = [col['name'] for col in data['full_headers']]
kafkaFieldTypes = [col['type'] for col in data['full_headers']]
topics_data = data['rows']
format_ = {
"sample": topics_data,
"columns": [
Field(col, 'string', unique=False).to_dict()
for col in kafkaFieldNames
]
}
# data = """%(kafkaFieldNames)s
# %(data)s""" % {
# 'kafkaFieldNames': ','.join(kafkaFieldNames),
# 'data': '\n'.join([','.join(cols) for cols in topics_data])
# }
# stream = string_io()
# stream.write(data)
# _convert_format(file_format["format"], inverse=True)
# indexer = MorphlineIndexer(request.user, request.fs)
# format_ = indexer.guess_field_types({
# "file": {
# "stream": stream,
# "name": file_format['path']
# },
# "format": file_format['format']
# })
# type_mapping = dict(
# list(
# zip(kafkaFieldNames, kafkaFieldTypes)
# )
# )
# for col in format_['columns']:
# col['keyType'] = type_mapping[col['name']]
# col['type'] = type_mapping[col['name']]
elif file_format['streamSelection'] == 'flume':
if 'hue-httpd/access_log' in file_format['channelSourcePath']:
columns = [
{'name': 'id', 'type': 'string', 'unique': True},
{'name': 'client_ip', 'type': 'string'},
{'name': 'time', 'type': 'date'},
{'name': 'request', 'type': 'string'},
{'name': 'code', 'type': 'plong'},
{'name': 'bytes', 'type': 'plong'},
{'name': 'method', 'type': 'string'},
{'name': 'url', 'type': 'string'},
{'name': 'protocol', 'type': 'string'},
{'name': 'app', 'type': 'string'},
{'name': 'subapp', 'type': 'string'}
]
else:
columns = [{'name': 'message', 'type': 'string'}]
format_ = {
"sample": [['...'] * len(columns)] * 4,
"columns": [
Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string'), unique=col.get('unique')).to_dict()
for col in columns
]
}
elif file_format['inputFormat'] == 'connector':
if file_format['connectorSelection'] == 'sfdc':
sf = Salesforce(
username=file_format['streamUsername'],
password=file_format['streamPassword'],
security_token=file_format['streamToken']
)
table_metadata = [{
'name': column['name'],
'type': column['type']
} for column in sf.restful('sobjects/%(streamObject)s/describe/' % file_format)['fields']
]
query = 'SELECT %s FROM %s LIMIT 4' % (', '.join([col['name'] for col in table_metadata]), file_format['streamObject'])
print(query)
try:
records = sf.query_all(query)
except SalesforceRefusedRequest as e:
raise PopupException(message=str(e))
format_ = {
"sample": [list(row.values())[1:] for row in records['records']],
"columns": [
Field(col['name'], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict()
for col in table_metadata
]
}
else:
raise PopupException(_('Connector format not recognized: %(connectorSelection)s') % file_format)
else:
raise PopupException(_('Input format not recognized: %(inputFormat)s') % file_format)
return JsonResponse(format_)
@api_error_handler
def importer_submit(request):
source = json.loads(request.POST.get('source', '{}'))
outputFormat = json.loads(request.POST.get('destination', '{}'))['outputFormat']
destination = json.loads(request.POST.get('destination', '{}'))
destination['ouputFormat'] = outputFormat # Workaround a very weird bug
start_time = json.loads(request.POST.get('start_time', '-1'))
file_encoding = None
if source['inputFormat'] == 'file':
if source['path']:
path = urllib_unquote(source['path'])
source['path'] = request.fs.netnormpath(path)
stream = request.fs.open(path)
file_encoding = check_encoding(stream.read(10000))
if destination['ouputFormat'] in ('database', 'table'):
destination['nonDefaultLocation'] = request.fs.netnormpath(destination['nonDefaultLocation']) \
if destination['nonDefaultLocation'] else destination['nonDefaultLocation']
if destination['ouputFormat'] == 'index':
source['columns'] = destination['columns']
index_name = destination["name"]
if destination['indexerRunJob'] or source['inputFormat'] == 'stream':
_convert_format(source["format"], inverse=True)
job_handle = _large_indexing(
request,
source,
index_name,
start_time=start_time,
lib_path=destination['indexerJobLibPath'],
destination=destination
)
else:
client = SolrClient(request.user)
job_handle = _small_indexing(
request.user,
request.fs,
client,
source,
destination, index_name
)
elif source['inputFormat'] in ('stream', 'connector') or destination['ouputFormat'] == 'stream':
args = {
'source': source,
'destination': destination,
'start_time': start_time,
'dry_run': request.POST.get('show_command')
}
api = FlinkIndexer(
request.user,
request.fs
)
job_nb = api.create_table_from_kafka(**args)
if request.POST.get('show_command'):
job_handle = {
'status': 0,
'commands': job_nb
}
else:
job_handle = job_nb.execute(request, batch=False)
elif source['inputFormat'] == 'altus':
# BDR copy or DistCP + DDL + Sentry DDL copy
pass
elif source['inputFormat'] == 'rdbms':
if destination['outputFormat'] in ('database', 'file', 'table', 'hbase'):
job_handle = run_sqoop(
request,
source,
destination,
start_time
)
elif destination['ouputFormat'] == 'database':
job_handle = _create_database(
request,
source,
destination,
start_time
)
elif destination['ouputFormat'] == 'big-table':
args = {
'source': source,
'destination': destination,
'start_time': start_time,
'dry_run': request.POST.get('show_command')
}
api = PhoenixIndexer(
request.user,
request.fs
)
job_nb = api.create_table_from_file(**args)
if request.POST.get('show_command'):
job_handle = {
'status': 0,
'commands': job_nb
}
else:
job_handle = job_nb.execute(request, batch=False)
else:
if source['inputFormat'] == 'localfile':
job_handle = _create_table_from_local(
request,
source,
destination,
start_time
)
else:
job_handle = _create_table(
request,
source,
destination,
start_time,
file_encoding
)
request.audit = {
'operation': 'EXPORT',
'operationText': 'User %(username)s exported %(inputFormat)s to %(ouputFormat)s: %(name)s' % {
'username': request.user.username,
'inputFormat': source['inputFormat'],
'ouputFormat': destination['ouputFormat'],
'name': destination['name'],
},
'allowed': True
}
return JsonResponse(job_handle)
@require_POST
@api_error_handler
def index(request):
'''
Input: pasted data, CSV/json files, Kafka topic
Output: tables
'''
source = json.loads(request.POST.get('source', '{}'))
destination = json.loads(request.POST.get('destination', '{}'))
options = json.loads(request.POST.get('options', '{}'))
connector_id = request.POST.get('connector')
api = get_api(request.user, connector_id)
if request.FILES.get('data'):
source['file'] = request.FILES['data']
result = api.index(source, destination, options)
return JsonResponse({'result': result})
def _small_indexing(user, fs, client, source, destination, index_name):
kwargs = {}
errors = []
if source['inputFormat'] not in ('manual', 'table', 'query_handle'):
path = urllib_unquote(source["path"])
stats = fs.stats(path)
if stats.size > MAX_UPLOAD_SIZE:
raise PopupException(_('File size is too large to handle!'))
indexer = MorphlineIndexer(user, fs)
fields = indexer.get_field_list(destination['columns'])
_create_solr_collection(user, fs, client, destination, index_name, kwargs)
if source['inputFormat'] == 'file':
kwargs['separator'] = source['format']['fieldSeparator']
path = urllib_unquote(source["path"])
data = fs.read(path, 0, MAX_UPLOAD_SIZE)
if client.is_solr_six_or_more():
kwargs['processor'] = 'tolerant'
kwargs['map'] = 'NULL:'
try:
if source['inputFormat'] == 'query':
query_id = source['query']['id'] if source['query'].get('id') else source['query']
notebook = Notebook(document=Document2.objects.document(user=user, doc_id=query_id)).get_data()
request = MockedDjangoRequest(user=user)
snippet = notebook['snippets'][0]
searcher = CollectionManagerController(user)
columns = [field['name'] for field in fields if field['name'] != 'hue_id']
# Assumes handle still live
fetch_handle = lambda rows, start_over: get_api(
request, snippet
).fetch_result(
notebook,
snippet,
rows=rows,
start_over=start_over
)
rows = searcher.update_data_from_hive(
index_name,
columns,
fetch_handle=fetch_handle,
indexing_options=kwargs
)
# TODO if rows == MAX_ROWS truncation warning
elif source['inputFormat'] == 'manual':
pass # No need to do anything
else:
response = client.index(name=index_name, data=data, **kwargs)
errors = [error.get('message', '') for error in response['responseHeader'].get('errors', [])]
except Exception as e:
try:
client.delete_index(index_name, keep_config=False)
except Exception as e2:
LOG.warning('Error while cleaning-up config of failed collection creation %s: %s' % (index_name, e2))
raise e
return {
'status': 0,
'on_success_url': reverse('indexer:indexes',
kwargs={'index': index_name}),
'pub_sub_url': 'assist.collections.refresh',
'errors': errors
}
def _large_indexing(request, file_format, collection_name, query=None, start_time=None, lib_path=None, destination=None):
indexer = MorphlineIndexer(request.user, request.fs)
unique_field = indexer.get_unique_field(file_format)
is_unique_generated = indexer.is_unique_generated(file_format)
schema_fields = indexer.get_kept_field_list(file_format['columns'])
if is_unique_generated:
schema_fields += [{"name": unique_field, "type": "string"}]
client = SolrClient(user=request.user)
if not client.exists(collection_name) and not request.POST.get('show_command'): # if destination['isTargetExisting']:
client.create_index(
name=collection_name,
fields=request.POST.get('fields', schema_fields),
unique_key_field=unique_field
# No df currently
)
else:
# TODO: check if format matches
pass
if file_format['inputFormat'] == 'table':
db = dbms.get(request.user)
table_metadata = db.get_table(database=file_format['databaseName'], table_name=file_format['tableName'])
input_path = table_metadata.path_location
elif file_format['inputFormat'] == 'stream' and file_format['streamSelection'] == 'flume':
indexer = FlumeIndexer(user=request.user)
if request.POST.get('show_command'):
configs = indexer.generate_config(file_format, destination)
return {'status': 0, 'commands': configs[-1]}
else:
return indexer.start(collection_name, file_format, destination)
elif file_format['inputFormat'] == 'stream':
return _envelope_job(request, file_format, destination, start_time=start_time, lib_path=lib_path)
elif file_format['inputFormat'] == 'file':
input_path = '${nameNode}%s' % urllib_unquote(file_format["path"])
else:
input_path = None
morphline = indexer.generate_morphline_config(collection_name, file_format, unique_field, lib_path=lib_path)
return indexer.run_morphline(
request,
collection_name,
morphline,
input_path,
query,
start_time=start_time,
lib_path=lib_path
)
@api_error_handler
@require_POST
# @check_document_modify_permission()
def save_pipeline(request):
response = {'status': -1}
notebook = json.loads(request.POST.get('notebook', '{}'))
notebook_doc, save_as = _save_pipeline(notebook, request.user)
response['status'] = 0
response['save_as'] = save_as
response.update(notebook_doc.to_dict())
response['message'] = request.POST.get('editorMode') == 'true' and _('Query saved successfully') or _('Notebook saved successfully')
return JsonResponse(response)
def upload_local_file(request):
upload_file = request.FILES['inputfile']
username = request.user.username
filename = "%s_%s" % (username, uuid.uuid4())
temp_file = tempfile.NamedTemporaryFile(prefix=filename, suffix='.csv', delete=False)
temp_file.write(upload_file.read())
local_file_url = temp_file.name
temp_file.close()
return JsonResponse({'local_file_url': local_file_url})
| apache-2.0 |
fabiopereira/fabiopereirame_jekyll_mickey_blogpt | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mit |
xuleiboy1234/autoTitle | tensorflow/tensorflow/python/training/sync_replicas_optimizer.py | 21 | 20440 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Synchronize replicas for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
<empty line>
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
<empty line>
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch.
For the replicas:
<empty line>
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```python
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
with ops.colocate_with(local_anchor):
self._local_step = variable_scope.variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, ops.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
# dummy_queue is passed to the queue runner. Don't use the real queues
# because the queue runner doesn't automatically reopen it once it
# closed queues in PS devices.
dummy_queue = (
data_flow_ops.FIFOQueue(1,
types_pb2.DT_INT32,
shapes=(),
name="dummy_queue",
shared_name="dummy_queue"))
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(dummy_queue,
[sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicaOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
| mit |
tuxfux-hlp-notes/python-batches | archieves/batch-59/files/myvenv/lib/python2.7/site-packages/bs4/tests/test_html5lib.py | 40 | 4908 | """Tests to ensure that the html5lib tree builder generates good trees."""
import warnings
try:
from bs4.builder import HTML5TreeBuilder
HTML5LIB_PRESENT = True
except ImportError, e:
HTML5LIB_PRESENT = False
from bs4.element import SoupStrainer
from bs4.testing import (
HTML5TreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not HTML5LIB_PRESENT,
"html5lib seems not to be present, not testing its tree builder.")
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
"""See ``HTML5TreeBuilderSmokeTest``."""
@property
def default_builder(self):
return HTML5TreeBuilder()
def test_soupstrainer(self):
# The html5lib tree builder does not support SoupStrainers.
strainer = SoupStrainer("b")
markup = "<p>A <b>bold</b> statement.</p>"
with warnings.catch_warnings(record=True) as w:
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(
soup.decode(), self.document_for(markup))
self.assertTrue(
"the html5lib tree builder doesn't support parse_only" in
str(w[0].message))
def test_correctly_nested_tables(self):
"""html5lib inserts <tbody> tags where other parsers don't."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tbody><tr><td>Here\'s another table:'
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
'</td></tr></tbody></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_xml_declaration_followed_by_doctype(self):
markup = '''<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<p>foo</p>
</body>
</html>'''
soup = self.soup(markup)
# Verify that we can reach the <p> tag; this means the tree is connected.
self.assertEqual(b"<p>foo</p>", soup.p.encode())
def test_reparented_markup(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_ends_with_whitespace(self):
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
soup = self.soup(markup)
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
self.assertEqual(2, len(soup.find_all('p')))
def test_reparented_markup_containing_identical_whitespace_nodes(self):
"""Verify that we keep the two whitespace nodes in this
document distinct when reparenting the adjacent <tbody> tags.
"""
markup = '<table> <tbody><tbody><ims></tbody> </table>'
soup = self.soup(markup)
space1, space2 = soup.find_all(string=' ')
tbody1, tbody2 = soup.find_all('tbody')
assert space1.next_element is tbody1
assert tbody2.next_element is space2
def test_reparented_markup_containing_children(self):
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
soup = self.soup(markup)
noscript = soup.noscript
self.assertEqual("target", noscript.next_element)
target = soup.find(string='target')
# The 'aftermath' string was duplicated; we want the second one.
final_aftermath = soup.find_all(string='aftermath')[-1]
# The <noscript> tag was moved beneath a copy of the <a> tag,
# but the 'target' string within is still connected to the
# (second) 'aftermath' string.
self.assertEqual(final_aftermath, target.next_element)
self.assertEqual(target, final_aftermath.previous_element)
def test_processing_instruction(self):
"""Processing instructions become comments."""
markup = b"""<?PITarget PIContent?>"""
soup = self.soup(markup)
assert str(soup).startswith("<!--?PITarget PIContent?-->")
def test_cloned_multivalue_node(self):
markup = b"""<a class="my_class"><p></a>"""
soup = self.soup(markup)
a1, a2 = soup.find_all('a')
self.assertEqual(a1, a2)
assert a1 is not a2
def test_foster_parenting(self):
markup = b"""<table><td></tbody>A"""
soup = self.soup(markup)
self.assertEqual(u"<body>A<table><tbody><tr><td></td></tr></tbody></table></body>", soup.body.decode())
| gpl-3.0 |
bk2204/urwid | urwid/vterm.py | 7 | 50199 | #!/usr/bin/python
#
# Urwid terminal emulation widget
# Copyright (C) 2010 aszlig
# Copyright (C) 2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
import os
import sys
import pty
import time
import copy
import fcntl
import errno
import select
import struct
import signal
import atexit
import termios
import traceback
from urwid import util
from urwid.escape import DEC_SPECIAL_CHARS, ALT_DEC_SPECIAL_CHARS
from urwid.canvas import Canvas
from urwid.widget import Widget, BOX
from urwid.display_common import AttrSpec, RealTerminal, _BASIC_COLORS
from urwid.compat import ord2, chr2, B, bytes, PYTHON3
ESC = chr(27)
KEY_TRANSLATIONS = {
'enter': chr(13),
'backspace': chr(127),
'tab': chr(9),
'esc': ESC,
'up': ESC + '[A',
'down': ESC + '[B',
'right': ESC + '[C',
'left': ESC + '[D',
'home': ESC + '[1~',
'insert': ESC + '[2~',
'delete': ESC + '[3~',
'end': ESC + '[4~',
'page up': ESC + '[5~',
'page down': ESC + '[6~',
'f1': ESC + '[[A',
'f2': ESC + '[[B',
'f3': ESC + '[[C',
'f4': ESC + '[[D',
'f5': ESC + '[[E',
'f6': ESC + '[17~',
'f7': ESC + '[18~',
'f8': ESC + '[19~',
'f9': ESC + '[20~',
'f10': ESC + '[21~',
'f11': ESC + '[23~',
'f12': ESC + '[24~',
}
KEY_TRANSLATIONS_DECCKM = {
'up': ESC + 'OA',
'down': ESC + 'OB',
'right': ESC + 'OC',
'left': ESC + 'OD',
'f1': ESC + 'OP',
'f2': ESC + 'OQ',
'f3': ESC + 'OR',
'f4': ESC + 'OS',
'f5': ESC + '[15~',
}
CSI_COMMANDS = {
# possible values:
# None -> ignore sequence
# (<minimum number of args>, <fallback if no argument>, callback)
# ('alias', <symbol>)
#
# while callback is executed as:
# callback(<instance of TermCanvas>, arguments, has_question_mark)
B('@'): (1, 1, lambda s, number, q: s.insert_chars(chars=number[0])),
B('A'): (1, 1, lambda s, rows, q: s.move_cursor(0, -rows[0], relative=True)),
B('B'): (1, 1, lambda s, rows, q: s.move_cursor(0, rows[0], relative=True)),
B('C'): (1, 1, lambda s, cols, q: s.move_cursor(cols[0], 0, relative=True)),
B('D'): (1, 1, lambda s, cols, q: s.move_cursor(-cols[0], 0, relative=True)),
B('E'): (1, 1, lambda s, rows, q: s.move_cursor(0, rows[0], relative_y=True)),
B('F'): (1, 1, lambda s, rows, q: s.move_cursor(0, -rows[0], relative_y=True)),
B('G'): (1, 1, lambda s, col, q: s.move_cursor(col[0] - 1, 0, relative_y=True)),
B('H'): (2, 1, lambda s, x_y, q: s.move_cursor(x_y[1] - 1, x_y[0] - 1)),
B('J'): (1, 0, lambda s, mode, q: s.csi_erase_display(mode[0])),
B('K'): (1, 0, lambda s, mode, q: s.csi_erase_line(mode[0])),
B('L'): (1, 1, lambda s, number, q: s.insert_lines(lines=number[0])),
B('M'): (1, 1, lambda s, number, q: s.remove_lines(lines=number[0])),
B('P'): (1, 1, lambda s, number, q: s.remove_chars(chars=number[0])),
B('X'): (1, 1, lambda s, number, q: s.erase(s.term_cursor,
(s.term_cursor[0]+number[0] - 1,
s.term_cursor[1]))),
B('a'): ('alias', B('C')),
B('c'): (0, 0, lambda s, none, q: s.csi_get_device_attributes(q)),
B('d'): (1, 1, lambda s, row, q: s.move_cursor(0, row[0] - 1, relative_x=True)),
B('e'): ('alias', B('B')),
B('f'): ('alias', B('H')),
B('g'): (1, 0, lambda s, mode, q: s.csi_clear_tabstop(mode[0])),
B('h'): (1, 0, lambda s, modes, q: s.csi_set_modes(modes, q)),
B('l'): (1, 0, lambda s, modes, q: s.csi_set_modes(modes, q, reset=True)),
B('m'): (1, 0, lambda s, attrs, q: s.csi_set_attr(attrs)),
B('n'): (1, 0, lambda s, mode, q: s.csi_status_report(mode[0])),
B('q'): (1, 0, lambda s, mode, q: s.csi_set_keyboard_leds(mode[0])),
B('r'): (2, 0, lambda s, t_b, q: s.csi_set_scroll(t_b[0], t_b[1])),
B('s'): (0, 0, lambda s, none, q: s.save_cursor()),
B('u'): (0, 0, lambda s, none, q: s.restore_cursor()),
B('`'): ('alias', B('G')),
}
CHARSET_DEFAULT = 1
CHARSET_UTF8 = 2
class TermModes(object):
def __init__(self):
self.reset()
def reset(self):
# ECMA-48
self.display_ctrl = False
self.insert = False
self.lfnl = False
# DEC private modes
self.keys_decckm = False
self.reverse_video = False
self.constrain_scrolling = False
self.autowrap = True
self.visible_cursor = True
# charset stuff
self.main_charset = CHARSET_DEFAULT
class TermCharset(object):
MAPPING = {
'default': None,
'vt100': '0',
'ibmpc': 'U',
'user': None,
}
def __init__(self):
self._g = [
'default',
'vt100',
]
self._sgr_mapping = False
self.activate(0)
def define(self, g, charset):
"""
Redefine G'g' with new mapping.
"""
self._g[g] = charset
self.activate(g=self.active)
def activate(self, g):
"""
Activate the given charset slot.
"""
self.active = g
self.current = self.MAPPING.get(self._g[g], None)
def set_sgr_ibmpc(self):
"""
Set graphics rendition mapping to IBM PC CP437.
"""
self._sgr_mapping = True
def reset_sgr_ibmpc(self):
"""
Reset graphics rendition mapping to IBM PC CP437.
"""
self._sgr_mapping = False
self.activate(g=self.active)
def apply_mapping(self, char):
if self._sgr_mapping or self._g[self.active] == 'ibmpc':
dec_pos = DEC_SPECIAL_CHARS.find(char.decode('cp437'))
if dec_pos >= 0:
self.current = '0'
return str(ALT_DEC_SPECIAL_CHARS[dec_pos])
else:
self.current = 'U'
return char
else:
return char
class TermScroller(list):
"""
List subclass that handles the terminal scrollback buffer,
truncating it as necessary.
"""
SCROLLBACK_LINES = 10000
def trunc(self):
if len(self) >= self.SCROLLBACK_LINES:
self.pop(0)
def append(self, obj):
self.trunc()
super(TermScroller, self).append(obj)
def insert(self, idx, obj):
self.trunc()
super(TermScroller, self).insert(idx, obj)
def extend(self, seq):
self.trunc()
super(TermScroller, self).extend(seq)
class TermCanvas(Canvas):
cacheable = False
def __init__(self, width, height, widget):
Canvas.__init__(self)
self.width, self.height = width, height
self.widget = widget
self.modes = widget.term_modes
self.scrollback_buffer = TermScroller()
self.scrolling_up = 0
self.utf8_eat_bytes = None
self.utf8_buffer = bytes()
self.coords["cursor"] = (0, 0, None)
self.reset()
def set_term_cursor(self, x=None, y=None):
"""
Set terminal cursor to x/y and update canvas cursor. If one or both axes
are omitted, use the values of the current position.
"""
if x is None:
x = self.term_cursor[0]
if y is None:
y = self.term_cursor[1]
self.term_cursor = self.constrain_coords(x, y)
if self.modes.visible_cursor and self.scrolling_up < self.height - y:
self.cursor = (x, y + self.scrolling_up)
else:
self.cursor = None
def reset_scroll(self):
"""
Reset scrolling region to full terminal size.
"""
self.scrollregion_start = 0
self.scrollregion_end = self.height - 1
def scroll_buffer(self, up=True, reset=False, lines=None):
"""
Scroll the scrolling buffer up (up=True) or down (up=False) the given
amount of lines or half the screen height.
If just 'reset' is True, set the scrollbuffer view to the current
terminal content.
"""
if reset:
self.scrolling_up = 0
self.set_term_cursor()
return
if lines is None:
lines = self.height // 2
if not up:
lines = -lines
maxscroll = len(self.scrollback_buffer)
self.scrolling_up += lines
if self.scrolling_up > maxscroll:
self.scrolling_up = maxscroll
elif self.scrolling_up < 0:
self.scrolling_up = 0
self.set_term_cursor()
def reset(self):
"""
Reset the terminal.
"""
self.escbuf = bytes()
self.within_escape = False
self.parsestate = 0
self.attrspec = None
self.charset = TermCharset()
self.saved_cursor = None
self.saved_attrs = None
self.is_rotten_cursor = False
self.reset_scroll()
self.init_tabstops()
# terminal modes
self.modes.reset()
# initialize self.term
self.clear()
def init_tabstops(self, extend=False):
tablen, mod = divmod(self.width, 8)
if mod > 0:
tablen += 1
if extend:
while len(self.tabstops) < tablen:
self.tabstops.append(1 << 0)
else:
self.tabstops = [1 << 0] * tablen
def set_tabstop(self, x=None, remove=False, clear=False):
if clear:
for tab in xrange(len(self.tabstops)):
self.tabstops[tab] = 0
return
if x is None:
x = self.term_cursor[0]
div, mod = divmod(x, 8)
if remove:
self.tabstops[div] &= ~(1 << mod)
else:
self.tabstops[div] |= (1 << mod)
def is_tabstop(self, x=None):
if x is None:
x = self.term_cursor[0]
div, mod = divmod(x, 8)
return (self.tabstops[div] & (1 << mod)) > 0
def empty_line(self, char=B(' ')):
return [self.empty_char(char)] * self.width
def empty_char(self, char=B(' ')):
return (self.attrspec, self.charset.current, char)
def addstr(self, data):
if self.width <= 0 or self.height <= 0:
# not displayable, do nothing!
return
for byte in data:
self.addbyte(ord2(byte))
def resize(self, width, height):
"""
Resize the terminal to the given width and height.
"""
x, y = self.term_cursor
if width > self.width:
# grow
for y in xrange(self.height):
self.term[y] += [self.empty_char()] * (width - self.width)
elif width < self.width:
# shrink
for y in xrange(self.height):
self.term[y] = self.term[y][:width]
self.width = width
if height > self.height:
# grow
for y in xrange(self.height, height):
try:
last_line = self.scrollback_buffer.pop()
except IndexError:
# nothing in scrollback buffer, append an empty line
self.term.append(self.empty_line())
self.scrollregion_end += 1
continue
# adjust x axis of scrollback buffer to the current width
if len(last_line) < self.width:
last_line += [self.empty_char()] * \
(self.width - len(last_line))
else:
last_line = last_line[:self.width]
y += 1
self.term.insert(0, last_line)
elif height < self.height:
# shrink
for y in xrange(height, self.height):
self.scrollback_buffer.append(self.term.pop(0))
self.height = height
self.reset_scroll()
x, y = self.constrain_coords(x, y)
self.set_term_cursor(x, y)
# extend tabs
self.init_tabstops(extend=True)
def set_g01(self, char, mod):
"""
Set G0 or G1 according to 'char' and modifier 'mod'.
"""
if self.modes.main_charset != CHARSET_DEFAULT:
return
if mod == B('('):
g = 0
else:
g = 1
if char == B('0'):
cset = 'vt100'
elif char == B('U'):
cset = 'ibmpc'
elif char == B('K'):
cset = 'user'
else:
cset = 'default'
self.charset.define(g, cset)
def parse_csi(self, char):
"""
Parse ECMA-48 CSI (Control Sequence Introducer) sequences.
"""
qmark = self.escbuf.startswith(B('?'))
escbuf = []
for arg in self.escbuf[qmark and 1 or 0:].split(B(';')):
try:
num = int(arg)
except ValueError:
num = None
escbuf.append(num)
if CSI_COMMANDS[char] is not None:
if CSI_COMMANDS[char][0] == 'alias':
csi_cmd = CSI_COMMANDS[(CSI_COMMANDS[char][1])]
else:
csi_cmd = CSI_COMMANDS[char]
number_of_args, default_value, cmd = csi_cmd
while len(escbuf) < number_of_args:
escbuf.append(default_value)
for i in xrange(len(escbuf)):
if escbuf[i] is None or escbuf[i] == 0:
escbuf[i] = default_value
try:
cmd(self, escbuf, qmark)
except ValueError:
# ignore commands that don't match the
# unpacked tuples in CSI_COMMANDS.
pass
def parse_noncsi(self, char, mod=None):
"""
Parse escape sequences which are not CSI.
"""
if mod == B('#') and char == B('8'):
self.decaln()
elif mod == B('%'): # select main character set
if char == B('@'):
self.modes.main_charset = CHARSET_DEFAULT
elif char in B('G8'):
# 8 is obsolete and only for backwards compatibility
self.modes.main_charset = CHARSET_UTF8
elif mod == B('(') or mod == B(')'): # define G0/G1
self.set_g01(char, mod)
elif char == B('M'): # reverse line feed
self.linefeed(reverse=True)
elif char == B('D'): # line feed
self.linefeed()
elif char == B('c'): # reset terminal
self.reset()
elif char == B('E'): # newline
self.newline()
elif char == B('H'): # set tabstop
self.set_tabstop()
elif char == B('Z'): # DECID
self.widget.respond(ESC + '[?6c')
elif char == B('7'): # save current state
self.save_cursor(with_attrs=True)
elif char == B('8'): # restore current state
self.restore_cursor(with_attrs=True)
def parse_osc(self, buf):
"""
Parse operating system command.
"""
if buf.startswith(B(';')): # set window title and icon
self.widget.set_title(buf[1:])
elif buf.startswith(B('3;')): # set window title
self.widget.set_title(buf[2:])
def parse_escape(self, char):
if self.parsestate == 1:
# within CSI
if char in CSI_COMMANDS.keys():
self.parse_csi(char)
self.parsestate = 0
elif char in B('0123456789;') or (not self.escbuf and char == B('?')):
self.escbuf += char
return
elif self.parsestate == 0 and char == B(']'):
# start of OSC
self.escbuf = bytes()
self.parsestate = 2
return
elif self.parsestate == 2 and char == B("\x07"):
# end of OSC
self.parse_osc(self.escbuf.lstrip(B('0')))
elif self.parsestate == 2 and self.escbuf[-1:] + char == B(ESC + '\\'):
# end of OSC
self.parse_osc(self.escbuf[:-1].lstrip(B('0')))
elif self.parsestate == 2 and self.escbuf.startswith(B('P')) and \
len(self.escbuf) == 8:
# set palette (ESC]Pnrrggbb)
pass
elif self.parsestate == 2 and not self.escbuf and char == B('R'):
# reset palette
pass
elif self.parsestate == 2:
self.escbuf += char
return
elif self.parsestate == 0 and char == B('['):
# start of CSI
self.escbuf = bytes()
self.parsestate = 1
return
elif self.parsestate == 0 and char in (B('%'), B('#'), B('('), B(')')):
# non-CSI sequence
self.escbuf = char
self.parsestate = 3
return
elif self.parsestate == 3:
self.parse_noncsi(char, self.escbuf)
elif char in (B('c'), B('D'), B('E'), B('H'), B('M'), B('Z'), B('7'), B('8'), B('>'), B('=')):
self.parse_noncsi(char)
self.leave_escape()
def leave_escape(self):
self.within_escape = False
self.parsestate = 0
self.escbuf = bytes()
def get_utf8_len(self, bytenum):
"""
Process startbyte and return the number of bytes following it to get a
valid UTF-8 multibyte sequence.
bytenum -- an integer ordinal
"""
length = 0
while bytenum & 0x40:
bytenum <<= 1
length += 1
return length
def addbyte(self, byte):
"""
Parse main charset and add the processed byte(s) to the terminal state
machine.
byte -- an integer ordinal
"""
if (self.modes.main_charset == CHARSET_UTF8 or
util._target_encoding == 'utf8'):
if byte >= 0xc0:
# start multibyte sequence
self.utf8_eat_bytes = self.get_utf8_len(byte)
self.utf8_buffer = chr2(byte)
return
elif 0x80 <= byte < 0xc0 and self.utf8_eat_bytes is not None:
if self.utf8_eat_bytes > 1:
# continue multibyte sequence
self.utf8_eat_bytes -= 1
self.utf8_buffer += chr2(byte)
return
else:
# end multibyte sequence
self.utf8_eat_bytes = None
sequence = (self.utf8_buffer+chr2(byte)).decode('utf-8', 'ignore')
if len(sequence) == 0:
# invalid multibyte sequence, stop processing
return
char = sequence.encode(util._target_encoding, 'replace')
else:
self.utf8_eat_bytes = None
char = chr2(byte)
else:
char = chr2(byte)
self.process_char(char)
def process_char(self, char):
"""
Process a single character (single- and multi-byte).
char -- a byte string
"""
x, y = self.term_cursor
if isinstance(char, int):
char = chr(char)
dc = self.modes.display_ctrl
if char == B("\x1b") and self.parsestate != 2: # escape
self.within_escape = True
elif not dc and char == B("\x0d"): # carriage return
self.carriage_return()
elif not dc and char == B("\x0f"): # activate G0
self.charset.activate(0)
elif not dc and char == B("\x0e"): # activate G1
self.charset.activate(1)
elif not dc and char in B("\x0a\x0b\x0c"): # line feed
self.linefeed()
if self.modes.lfnl:
self.carriage_return()
elif not dc and char == B("\x09"): # char tab
self.tab()
elif not dc and char == B("\x08"): # backspace
if x > 0:
self.set_term_cursor(x - 1, y)
elif not dc and char == B("\x07") and self.parsestate != 2: # beep
# we need to check if we're in parsestate 2, as an OSC can be
# terminated by the BEL character!
self.widget.beep()
elif not dc and char in B("\x18\x1a"): # CAN/SUB
self.leave_escape()
elif not dc and char == B("\x7f"): # DEL
pass # this is ignored
elif self.within_escape:
self.parse_escape(char)
elif not dc and char == B("\x9b"): # CSI (equivalent to "ESC [")
self.within_escape = True
self.escbuf = bytes()
self.parsestate = 1
else:
self.push_cursor(char)
def set_char(self, char, x=None, y=None):
"""
Set character of either the current cursor position
or a position given by 'x' and/or 'y' to 'char'.
"""
if x is None:
x = self.term_cursor[0]
if y is None:
y = self.term_cursor[1]
x, y = self.constrain_coords(x, y)
self.term[y][x] = (self.attrspec, self.charset.current, char)
def constrain_coords(self, x, y, ignore_scrolling=False):
"""
Checks if x/y are within the terminal and returns the corrected version.
If 'ignore_scrolling' is set, constrain within the full size of the
screen and not within scrolling region.
"""
if x >= self.width:
x = self.width - 1
elif x < 0:
x = 0
if self.modes.constrain_scrolling and not ignore_scrolling:
if y > self.scrollregion_end:
y = self.scrollregion_end
elif y < self.scrollregion_start:
y = self.scrollregion_start
else:
if y >= self.height:
y = self.height - 1
elif y < 0:
y = 0
return x, y
def linefeed(self, reverse=False):
"""
Move the cursor down (or up if reverse is True) one line but don't reset
horizontal position.
"""
x, y = self.term_cursor
if reverse:
if y <= 0 < self.scrollregion_start:
pass
elif y == self.scrollregion_start:
self.scroll(reverse=True)
else:
y -= 1
else:
if y >= self.height - 1 > self.scrollregion_end:
pass
elif y == self.scrollregion_end:
self.scroll()
else:
y += 1
self.set_term_cursor(x, y)
def carriage_return(self):
self.set_term_cursor(0, self.term_cursor[1])
def newline(self):
"""
Do a carriage return followed by a line feed.
"""
self.carriage_return()
self.linefeed()
def move_cursor(self, x, y, relative_x=False, relative_y=False,
relative=False):
"""
Move cursor to position x/y while constraining terminal sizes.
If 'relative' is True, x/y is relative to the current cursor
position. 'relative_x' and 'relative_y' is the same but just with
the corresponding axis.
"""
if relative:
relative_y = relative_x = True
if relative_x:
x = self.term_cursor[0] + x
if relative_y:
y = self.term_cursor[1] + y
elif self.modes.constrain_scrolling:
y += self.scrollregion_start
self.set_term_cursor(x, y)
def push_char(self, char, x, y):
"""
Push one character to current position and advance cursor to x/y.
"""
if char is not None:
char = self.charset.apply_mapping(char)
if self.modes.insert:
self.insert_chars(char=char)
else:
self.set_char(char)
self.set_term_cursor(x, y)
def push_cursor(self, char=None):
"""
Move cursor one character forward wrapping lines as needed.
If 'char' is given, put the character into the former position.
"""
x, y = self.term_cursor
if self.modes.autowrap:
if x + 1 >= self.width and not self.is_rotten_cursor:
# "rotten cursor" - this is when the cursor gets to the rightmost
# position of the screen, the cursor position remains the same but
# one last set_char() is allowed for that piece of sh^H^H"border".
self.is_rotten_cursor = True
self.push_char(char, x, y)
else:
x += 1
if x >= self.width and self.is_rotten_cursor:
if y >= self.scrollregion_end:
self.scroll()
else:
y += 1
x = 1
self.set_term_cursor(0, y)
self.push_char(char, x, y)
self.is_rotten_cursor = False
else:
if x + 1 < self.width:
x += 1
self.is_rotten_cursor = False
self.push_char(char, x, y)
def save_cursor(self, with_attrs=False):
self.saved_cursor = tuple(self.term_cursor)
if with_attrs:
self.saved_attrs = (copy.copy(self.attrspec),
copy.copy(self.charset))
def restore_cursor(self, with_attrs=False):
if self.saved_cursor is None:
return
x, y = self.saved_cursor
self.set_term_cursor(x, y)
if with_attrs and self.saved_attrs is not None:
self.attrspec, self.charset = (copy.copy(self.saved_attrs[0]),
copy.copy(self.saved_attrs[1]))
def tab(self, tabstop=8):
"""
Moves cursor to the next 'tabstop' filling everything in between
with spaces.
"""
x, y = self.term_cursor
while x < self.width - 1:
self.set_char(B(" "))
x += 1
if self.is_tabstop(x):
break
self.is_rotten_cursor = False
self.set_term_cursor(x, y)
def scroll(self, reverse=False):
"""
Append a new line at the bottom and put the topmost line into the
scrollback buffer.
If reverse is True, do exactly the opposite, but don't save into
scrollback buffer.
"""
if reverse:
self.term.pop(self.scrollregion_end)
self.term.insert(self.scrollregion_start, self.empty_line())
else:
killed = self.term.pop(self.scrollregion_start)
self.scrollback_buffer.append(killed)
self.term.insert(self.scrollregion_end, self.empty_line())
def decaln(self):
"""
DEC screen alignment test: Fill screen with E's.
"""
for row in xrange(self.height):
self.term[row] = self.empty_line('E')
def blank_line(self, row):
"""
Blank a single line at the specified row, without modifying other lines.
"""
self.term[row] = self.empty_line()
def insert_chars(self, position=None, chars=1, char=None):
"""
Insert 'chars' number of either empty characters - or those specified by
'char' - before 'position' (or the current position if not specified)
pushing subsequent characters of the line to the right without wrapping.
"""
if position is None:
position = self.term_cursor
if chars == 0:
chars = 1
if char is None:
char = self.empty_char()
else:
char = (self.attrspec, self.charset.current, char)
x, y = position
while chars > 0:
self.term[y].insert(x, char)
self.term[y].pop()
chars -= 1
def remove_chars(self, position=None, chars=1):
"""
Remove 'chars' number of empty characters from 'position' (or the current
position if not specified) pulling subsequent characters of the line to
the left without joining any subsequent lines.
"""
if position is None:
position = self.term_cursor
if chars == 0:
chars = 1
x, y = position
while chars > 0:
self.term[y].pop(x)
self.term[y].append(self.empty_char())
chars -= 1
def insert_lines(self, row=None, lines=1):
"""
Insert 'lines' of empty lines after the specified row, pushing all
subsequent lines to the bottom. If no 'row' is specified, the current
row is used.
"""
if row is None:
row = self.term_cursor[1]
else:
row = self.scrollregion_start
if lines == 0:
lines = 1
while lines > 0:
self.term.insert(row, self.empty_line())
self.term.pop(self.scrollregion_end)
lines -= 1
def remove_lines(self, row=None, lines=1):
"""
Remove 'lines' number of lines at the specified row, pulling all
subsequent lines to the top. If no 'row' is specified, the current row
is used.
"""
if row is None:
row = self.term_cursor[1]
else:
row = self.scrollregion_start
if lines == 0:
lines = 1
while lines > 0:
self.term.pop(row)
self.term.insert(self.scrollregion_end, self.empty_line())
lines -= 1
def erase(self, start, end):
"""
Erase a region of the terminal. The 'start' tuple (x, y) defines the
starting position of the erase, while end (x, y) the last position.
For example if the terminal size is 4x3, start=(1, 1) and end=(1, 2)
would erase the following region:
....
.XXX
XX..
"""
sx, sy = self.constrain_coords(*start)
ex, ey = self.constrain_coords(*end)
# within a single row
if sy == ey:
for x in xrange(sx, ex + 1):
self.term[sy][x] = self.empty_char()
return
# spans multiple rows
y = sy
while y <= ey:
if y == sy:
for x in xrange(sx, self.width):
self.term[y][x] = self.empty_char()
elif y == ey:
for x in xrange(ex + 1):
self.term[y][x] = self.empty_char()
else:
self.blank_line(y)
y += 1
def sgi_to_attrspec(self, attrs, fg, bg, attributes):
"""
Parse SGI sequence and return an AttrSpec representing the sequence
including all earlier sequences specified as 'fg', 'bg' and
'attributes'.
"""
for attr in attrs:
if 30 <= attr <= 37:
fg = attr - 30
elif 40 <= attr <= 47:
bg = attr - 40
elif attr == 38:
# set default foreground color, set underline
attributes.add('underline')
fg = None
elif attr == 39:
# set default foreground color, remove underline
attributes.discard('underline')
fg = None
elif attr == 49:
# set default background color
bg = None
elif attr == 10:
self.charset.reset_sgr_ibmpc()
self.modes.display_ctrl = False
elif attr in (11, 12):
self.charset.set_sgr_ibmpc()
self.modes.display_ctrl = True
# set attributes
elif attr == 1:
attributes.add('bold')
elif attr == 4:
attributes.add('underline')
elif attr == 5:
attributes.add('blink')
elif attr == 7:
attributes.add('standout')
# unset attributes
elif attr == 24:
attributes.discard('underline')
elif attr == 25:
attributes.discard('blink')
elif attr == 27:
attributes.discard('standout')
elif attr == 0:
# clear all attributes
fg = bg = None
attributes.clear()
if 'bold' in attributes and fg is not None:
fg += 8
def _defaulter(color):
if color is None:
return 'default'
else:
return _BASIC_COLORS[color]
fg = _defaulter(fg)
bg = _defaulter(bg)
if len(attributes) > 0:
fg = ','.join([fg] + list(attributes))
if fg == 'default' and bg == 'default':
return None
else:
return AttrSpec(fg, bg)
def csi_set_attr(self, attrs):
"""
Set graphics rendition.
"""
if attrs[-1] == 0:
self.attrspec = None
attributes = set()
if self.attrspec is None:
fg = bg = None
else:
# set default values from previous attrspec
if 'default' in self.attrspec.foreground:
fg = None
else:
fg = self.attrspec.foreground_number
if fg >= 8: fg -= 8
if 'default' in self.attrspec.background:
bg = None
else:
bg = self.attrspec.background_number
if bg >= 8: bg -= 8
for attr in ('bold', 'underline', 'blink', 'standout'):
if not getattr(self.attrspec, attr):
continue
attributes.add(attr)
attrspec = self.sgi_to_attrspec(attrs, fg, bg, attributes)
if self.modes.reverse_video:
self.attrspec = self.reverse_attrspec(attrspec)
else:
self.attrspec = attrspec
def reverse_attrspec(self, attrspec, undo=False):
"""
Put standout mode to the 'attrspec' given and remove it if 'undo' is
True.
"""
if attrspec is None:
attrspec = AttrSpec('default', 'default')
attrs = [fg.strip() for fg in attrspec.foreground.split(',')]
if 'standout' in attrs and undo:
attrs.remove('standout')
attrspec.foreground = ','.join(attrs)
elif 'standout' not in attrs and not undo:
attrs.append('standout')
attrspec.foreground = ','.join(attrs)
return attrspec
def reverse_video(self, undo=False):
"""
Reverse video/scanmode (DECSCNM) by swapping fg and bg colors.
"""
for y in xrange(self.height):
for x in xrange(self.width):
char = self.term[y][x]
attrs = self.reverse_attrspec(char[0], undo=undo)
self.term[y][x] = (attrs,) + char[1:]
def set_mode(self, mode, flag, qmark, reset):
"""
Helper method for csi_set_modes: set single mode.
"""
if qmark:
# DEC private mode
if mode == 1:
# cursor keys send an ESC O prefix, rather than ESC [
self.modes.keys_decckm = flag
elif mode == 3:
# deccolm just clears the screen
self.clear()
elif mode == 5:
if self.modes.reverse_video != flag:
self.reverse_video(undo=not flag)
self.modes.reverse_video = flag
elif mode == 6:
self.modes.constrain_scrolling = flag
self.set_term_cursor(0, 0)
elif mode == 7:
self.modes.autowrap = flag
elif mode == 25:
self.modes.visible_cursor = flag
self.set_term_cursor()
else:
# ECMA-48
if mode == 3:
self.modes.display_ctrl = flag
elif mode == 4:
self.modes.insert = flag
elif mode == 20:
self.modes.lfnl = flag
def csi_set_modes(self, modes, qmark, reset=False):
"""
Set (DECSET/ECMA-48) or reset modes (DECRST/ECMA-48) if reset is True.
"""
flag = not reset
for mode in modes:
self.set_mode(mode, flag, qmark, reset)
def csi_set_scroll(self, top=0, bottom=0):
"""
Set scrolling region, 'top' is the line number of first line in the
scrolling region. 'bottom' is the line number of bottom line. If both
are set to 0, the whole screen will be used (default).
"""
if top == 0:
top = 1
if bottom == 0:
bottom = self.height
if top < bottom <= self.height:
self.scrollregion_start = self.constrain_coords(
0, top - 1, ignore_scrolling=True
)[1]
self.scrollregion_end = self.constrain_coords(
0, bottom - 1, ignore_scrolling=True
)[1]
self.set_term_cursor(0, 0)
def csi_clear_tabstop(self, mode=0):
"""
Clear tabstop at current position or if 'mode' is 3, delete all
tabstops.
"""
if mode == 0:
self.set_tabstop(remove=True)
elif mode == 3:
self.set_tabstop(clear=True)
def csi_get_device_attributes(self, qmark):
"""
Report device attributes (what are you?). In our case, we'll report
ourself as a VT102 terminal.
"""
if not qmark:
self.widget.respond(ESC + '[?6c')
def csi_status_report(self, mode):
"""
Report various information about the terminal status.
Information is queried by 'mode', where possible values are:
5 -> device status report
6 -> cursor position report
"""
if mode == 5:
# terminal OK
self.widget.respond(ESC + '[0n')
elif mode == 6:
x, y = self.term_cursor
self.widget.respond(ESC + '[%d;%dR' % (y + 1, x + 1))
def csi_erase_line(self, mode):
"""
Erase current line, modes are:
0 -> erase from cursor to end of line.
1 -> erase from start of line to cursor.
2 -> erase whole line.
"""
x, y = self.term_cursor
if mode == 0:
self.erase(self.term_cursor, (self.width - 1, y))
elif mode == 1:
self.erase((0, y), (x, y))
elif mode == 2:
self.blank_line(y)
def csi_erase_display(self, mode):
"""
Erase display, modes are:
0 -> erase from cursor to end of display.
1 -> erase from start to cursor.
2 -> erase the whole display.
"""
if mode == 0:
self.erase(self.term_cursor, (self.width - 1, self.height - 1))
if mode == 1:
self.erase((0, 0), (self.term_cursor[0] - 1, self.term_cursor[1]))
elif mode == 2:
self.clear(cursor=self.term_cursor)
def csi_set_keyboard_leds(self, mode=0):
"""
Set keyboard LEDs, modes are:
0 -> clear all LEDs
1 -> set scroll lock LED
2 -> set num lock LED
3 -> set caps lock LED
This currently just emits a signal, so it can be processed by another
widget or the main application.
"""
states = {
0: 'clear',
1: 'scroll_lock',
2: 'num_lock',
3: 'caps_lock',
}
if mode in states:
self.widget.leds(states[mode])
def clear(self, cursor=None):
"""
Clears the whole terminal screen and resets the cursor position
to (0, 0) or to the coordinates given by 'cursor'.
"""
self.term = [self.empty_line() for x in xrange(self.height)]
if cursor is None:
self.set_term_cursor(0, 0)
else:
self.set_term_cursor(*cursor)
def cols(self):
return self.width
def rows(self):
return self.height
def content(self, trim_left=0, trim_right=0, cols=None, rows=None,
attr_map=None):
if self.scrolling_up == 0:
for line in self.term:
yield line
else:
buf = self.scrollback_buffer + self.term
for line in buf[-(self.height+self.scrolling_up):-self.scrolling_up]:
yield line
def content_delta(self, other):
if other is self:
return [self.cols()]*self.rows()
return self.content()
class Terminal(Widget):
_selectable = True
_sizing = frozenset([BOX])
signals = ['closed', 'beep', 'leds', 'title']
def __init__(self, command, env=None, main_loop=None, escape_sequence=None):
"""
A terminal emulator within a widget.
'command' is the command to execute inside the terminal, provided as a
list of the command followed by its arguments. If 'command' is None,
the command is the current user's shell. You can also provide a callable
instead of a command, which will be executed in the subprocess.
'env' can be used to pass custom environment variables. If omitted,
os.environ is used.
'main_loop' should be provided, because the canvas state machine needs
to act on input from the PTY master device. This object must have
watch_file and remove_watch_file methods.
'escape_sequence' is the urwid key symbol which should be used to break
out of the terminal widget. If it's not specified, "ctrl a" is used.
"""
self.__super.__init__()
if escape_sequence is None:
self.escape_sequence = "ctrl a"
else:
self.escape_sequence = escape_sequence
if env is None:
self.env = dict(os.environ)
else:
self.env = dict(env)
if command is None:
self.command = [self.env.get('SHELL', '/bin/sh')]
else:
self.command = command
self.keygrab = False
self.last_key = None
self.response_buffer = []
self.term_modes = TermModes()
self.main_loop = main_loop
self.master = None
self.pid = None
self.width = None
self.height = None
self.term = None
self.has_focus = False
self.terminated = False
def spawn(self):
env = self.env
env['TERM'] = 'linux'
self.pid, self.master = pty.fork()
if self.pid == 0:
if callable(self.command):
try:
try:
self.command()
except:
sys.stderr.write(traceback.format_exc())
sys.stderr.flush()
finally:
os._exit(0)
else:
os.execvpe(self.command[0], self.command, env)
if self.main_loop is None:
fcntl.fcntl(self.master, fcntl.F_SETFL, os.O_NONBLOCK)
atexit.register(self.terminate)
def terminate(self):
if self.terminated:
return
self.terminated = True
self.remove_watch()
self.change_focus(False)
if self.pid > 0:
self.set_termsize(0, 0)
for sig in (signal.SIGHUP, signal.SIGCONT, signal.SIGINT,
signal.SIGTERM, signal.SIGKILL):
try:
os.kill(self.pid, sig)
pid, status = os.waitpid(self.pid, os.WNOHANG)
except OSError:
break
if pid == 0:
break
time.sleep(0.1)
try:
os.waitpid(self.pid, 0)
except OSError:
pass
os.close(self.master)
def beep(self):
self._emit('beep')
def leds(self, which):
self._emit('leds', which)
def respond(self, string):
"""
Respond to the underlying application with 'string'.
"""
self.response_buffer.append(string)
def flush_responses(self):
for string in self.response_buffer:
os.write(self.master, string.encode('ascii'))
self.response_buffer = []
def set_termsize(self, width, height):
winsize = struct.pack("HHHH", height, width, 0, 0)
fcntl.ioctl(self.master, termios.TIOCSWINSZ, winsize)
def touch_term(self, width, height):
process_opened = False
if self.pid is None:
self.spawn()
process_opened = True
if self.width == width and self.height == height:
return
self.set_termsize(width, height)
if not self.term:
self.term = TermCanvas(width, height, self)
else:
self.term.resize(width, height)
self.width = width
self.height = height
if process_opened:
self.add_watch()
def set_title(self, title):
self._emit('title', title)
def change_focus(self, has_focus):
"""
Ignore SIGINT if this widget has focus.
"""
if self.terminated or self.has_focus == has_focus:
return
self.has_focus = has_focus
if has_focus:
self.old_tios = RealTerminal().tty_signal_keys()
RealTerminal().tty_signal_keys(*(['undefined'] * 5))
else:
RealTerminal().tty_signal_keys(*self.old_tios)
def render(self, size, focus=False):
if not self.terminated:
self.change_focus(focus)
width, height = size
self.touch_term(width, height)
if self.main_loop is None:
self.feed()
return self.term
def add_watch(self):
if self.main_loop is None:
return
self.main_loop.watch_file(self.master, self.feed)
def remove_watch(self):
if self.main_loop is None:
return
self.main_loop.remove_watch_file(self.master)
def selectable(self):
return True
def wait_and_feed(self, timeout=1.0):
select.select([self.master], [], [], timeout)
self.feed()
def feed(self):
data = ''
try:
data = os.read(self.master, 4096)
except OSError, e:
if e.errno == 5: # End Of File
data = ''
elif e.errno == errno.EWOULDBLOCK: # empty buffer
return
else:
raise
if data == '': # EOF on BSD
self.terminate()
self._emit('closed')
return
self.term.addstr(data)
self.flush_responses()
def keypress(self, size, key):
if self.terminated:
return key
if key == "window resize":
width, height = size
self.touch_term(width, height)
return
if (self.last_key == self.escape_sequence
and key == self.escape_sequence):
# escape sequence pressed twice...
self.last_key = key
self.keygrab = True
# ... so pass it to the terminal
elif self.keygrab:
if self.escape_sequence == key:
# stop grabbing the terminal
self.keygrab = False
self.last_key = key
return
else:
if key == 'page up':
self.term.scroll_buffer()
self.last_key = key
self._invalidate()
return
elif key == 'page down':
self.term.scroll_buffer(up=False)
self.last_key = key
self._invalidate()
return
elif (self.last_key == self.escape_sequence
and key != self.escape_sequence):
# hand down keypress directly after ungrab.
self.last_key = key
return key
elif self.escape_sequence == key:
# start grabbing the terminal
self.keygrab = True
self.last_key = key
return
elif self._command_map[key] is None or key == 'enter':
# printable character or escape sequence means:
# lock in terminal...
self.keygrab = True
# ... and do key processing
else:
# hand down keypress
self.last_key = key
return key
self.last_key = key
self.term.scroll_buffer(reset=True)
if key.startswith("ctrl "):
if key[-1].islower():
key = chr(ord(key[-1]) - ord('a') + 1)
else:
key = chr(ord(key[-1]) - ord('A') + 1)
else:
if self.term_modes.keys_decckm and key in KEY_TRANSLATIONS_DECCKM:
key = KEY_TRANSLATIONS_DECCKM.get(key)
else:
key = KEY_TRANSLATIONS.get(key, key)
# ENTER transmits both a carriage return and linefeed in LF/NL mode.
if self.term_modes.lfnl and key == "\x0d":
key += "\x0a"
if PYTHON3:
key = key.encode('ascii')
os.write(self.master, key)
| lgpl-2.1 |
hsharsha/depot_tools | third_party/logilab/common/ureports/__init__.py | 92 | 6113 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Universal report objects and some formatting drivers.
A way to create simple reports using python objects, primarily designed to be
formatted as text and html.
"""
__docformat__ = "restructuredtext en"
import sys
from logilab.common.compat import StringIO
from logilab.common.textutils import linesep
def get_nodes(node, klass):
"""return an iterator on all children node of the given klass"""
for child in node.children:
if isinstance(child, klass):
yield child
# recurse (FIXME: recursion controled by an option)
for grandchild in get_nodes(child, klass):
yield grandchild
def layout_title(layout):
"""try to return the layout's title as string, return None if not found
"""
for child in layout.children:
if isinstance(child, Title):
return u' '.join([node.data for node in get_nodes(child, Text)])
def build_summary(layout, level=1):
"""make a summary for the report, including X level"""
assert level > 0
level -= 1
summary = List(klass=u'summary')
for child in layout.children:
if not isinstance(child, Section):
continue
label = layout_title(child)
if not label and not child.id:
continue
if not child.id:
child.id = label.replace(' ', '-')
node = Link(u'#'+child.id, label=label or child.id)
# FIXME: Three following lines produce not very compliant
# docbook: there are some useless <para><para>. They might be
# replaced by the three commented lines but this then produces
# a bug in html display...
if level and [n for n in child.children if isinstance(n, Section)]:
node = Paragraph([node, build_summary(child, level)])
summary.append(node)
# summary.append(node)
# if level and [n for n in child.children if isinstance(n, Section)]:
# summary.append(build_summary(child, level))
return summary
class BaseWriter(object):
"""base class for ureport writers"""
def format(self, layout, stream=None, encoding=None):
"""format and write the given layout into the stream object
unicode policy: unicode strings may be found in the layout;
try to call stream.write with it, but give it back encoded using
the given encoding if it fails
"""
if stream is None:
stream = sys.stdout
if not encoding:
encoding = getattr(stream, 'encoding', 'UTF-8')
self.encoding = encoding or 'UTF-8'
self.__compute_funcs = []
self.out = stream
self.begin_format(layout)
layout.accept(self)
self.end_format(layout)
def format_children(self, layout):
"""recurse on the layout children and call their accept method
(see the Visitor pattern)
"""
for child in getattr(layout, 'children', ()):
child.accept(self)
def writeln(self, string=u''):
"""write a line in the output buffer"""
self.write(string + linesep)
def write(self, string):
"""write a string in the output buffer"""
try:
self.out.write(string)
except UnicodeEncodeError:
self.out.write(string.encode(self.encoding))
def begin_format(self, layout):
"""begin to format a layout"""
self.section = 0
def end_format(self, layout):
"""finished to format a layout"""
def get_table_content(self, table):
"""trick to get table content without actually writing it
return an aligned list of lists containing table cells values as string
"""
result = [[]]
cols = table.cols
for cell in self.compute_content(table):
if cols == 0:
result.append([])
cols = table.cols
cols -= 1
result[-1].append(cell)
# fill missing cells
while len(result[-1]) < cols:
result[-1].append(u'')
return result
def compute_content(self, layout):
"""trick to compute the formatting of children layout before actually
writing it
return an iterator on strings (one for each child element)
"""
# use cells !
def write(data):
try:
stream.write(data)
except UnicodeEncodeError:
stream.write(data.encode(self.encoding))
def writeln(data=u''):
try:
stream.write(data+linesep)
except UnicodeEncodeError:
stream.write(data.encode(self.encoding)+linesep)
self.write = write
self.writeln = writeln
self.__compute_funcs.append((write, writeln))
for child in layout.children:
stream = StringIO()
child.accept(self)
yield stream.getvalue()
self.__compute_funcs.pop()
try:
self.write, self.writeln = self.__compute_funcs[-1]
except IndexError:
del self.write
del self.writeln
from logilab.common.ureports.nodes import *
from logilab.common.ureports.text_writer import TextWriter
from logilab.common.ureports.html_writer import HTMLWriter
| bsd-3-clause |
theflofly/tensorflow | tensorflow/python/saved_model/saved_model_test.py | 13 | 64354 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import types_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import main_op
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver_test_utils
from tensorflow.python.training import training
from tensorflow.python.util import compat
SAVED_MODEL_PATH = ("cc/saved_model/testdata/half_plus_two/00000123")
def tearDownModule():
file_io.delete_recursively(test.get_temp_dir())
class SavedModelTestBase(test.TestCase):
def _get_export_dir(self, label):
return os.path.join(test.get_temp_dir(), label)
def _init_and_validate_variable(self, sess, variable_name, variable_value):
v = variables.VariableV1(variable_value, name=variable_name)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(variable_value, self.evaluate(v))
def _build_asset_collection(self, asset_file_name, asset_file_contents,
asset_file_tensor_name, asset_subdir=""):
parent_dir = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_subdir))
file_io.recursive_create_dir(parent_dir)
asset_filepath = os.path.join(
compat.as_bytes(parent_dir), compat.as_bytes(asset_file_name))
file_io.write_string_to_file(asset_filepath, asset_file_contents)
asset_file_tensor = constant_op.constant(
asset_filepath, name=asset_file_tensor_name)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
return asset_collection
class SavedModelTest(SavedModelTestBase):
def _validate_assets(self,
export_dir,
asset_file_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name,
asset_file_def[asset_id].filename)
self.assertEqual(expected_asset_tensor_name,
asset_file_def[asset_id].tensor_info.name)
def _validate_inputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_inputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def({
"foo_inputs": tensor_info
}, dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_fail(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
self.assertRaises(
AssertionError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_outputs_tensor_info_accept(self, builder, tensor_info):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_outputs": tensor_info}, "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"],
signature_def_map={"foo_key": foo_signature})
def _validate_sig_def_keys(self, builder, valid_tensor_info, invalid_key):
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
foo_signature = signature_def_utils.build_signature_def(
dict(), {"foo_key": valid_tensor_info}, "foo")
self.assertRaises(
KeyError,
builder.add_meta_graph_and_variables,
sess, ["foo"],
signature_def_map={invalid_key: foo_signature})
def testMaybeSavedModelDir(self):
base_path = test.test_src_dir_path("/python/saved_model")
self.assertFalse(loader.maybe_saved_model_directory(base_path))
base_path = test.test_src_dir_path(SAVED_MODEL_PATH)
self.assertTrue(loader.maybe_saved_model_directory(base_path))
base_path = "complete_garbage"
self.assertFalse(loader.maybe_saved_model_directory(base_path))
def testBadSavedModelFileFormat(self):
export_dir = self._get_export_dir("test_bad_saved_model_file_format")
# Attempt to load a SavedModel from an export directory that does not exist.
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError,
"SavedModel file does not exist at: %s" %
export_dir):
loader.load(sess, ["foo"], export_dir)
os.makedirs(export_dir)
# Write an invalid binary proto to saved_model.pb.
path_to_pb = os.path.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
with open(path_to_pb, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PB):
loader.load(sess, ["foo"], export_dir)
# Cleanup the directory and start again.
file_io.delete_recursively(export_dir)
os.makedirs(export_dir)
# Write an invalid text proto to saved_model.pbtxt
path_to_pbtxt = os.path.join(export_dir,
constants.SAVED_MODEL_FILENAME_PBTXT)
with open(path_to_pbtxt, "w") as f:
f.write("invalid content")
with self.session(graph=ops.Graph()) as sess:
with self.assertRaisesRegexp(IOError, "Cannot parse file.*%s" %
constants.SAVED_MODEL_FILENAME_PBTXT):
loader.load(sess, ["foo"], export_dir)
@test_util.run_deprecated_v1
def testVerifySessionGraphUsage(self):
export_dir = self._get_export_dir("test_verify_session_graph_usage")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Save the SavedModel to disk.
builder.save()
# Build a session and supply it to the load operation.
sess = session.Session(graph=ops.Graph())
loader.load(sess, [tag_constants.TRAINING], export_dir)
# Check the variable within the scope of the session and its graph.
with sess:
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_deprecated_v1
def testSequence(self):
export_dir = self._get_export_dir("test_sequence")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Expect an assertion error since add_meta_graph_and_variables() should be
# invoked before any add_meta_graph() calls.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(AssertionError, builder.add_meta_graph, ["foo"])
# Expect an assertion error for multiple calls of
# add_meta_graph_and_variables() since weights should be saved exactly once.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["bar"])
self.assertRaises(AssertionError, builder.add_meta_graph_and_variables,
sess, ["baz"])
@test_util.run_deprecated_v1
def testTags(self):
export_dir = self._get_export_dir("test_tags")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, [tag_constants.TRAINING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - a single tag (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph([tag_constants.SERVING])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.GPU])
# Graph that updates the single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
# - multiple tags (from predefined constants for serving on TPU).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 45)
builder.add_meta_graph([tag_constants.SERVING, tag_constants.TPU])
# Graph that updates the single variable. SavedModel is invoked:
# - to add the model (weights are not updated).
# - multiple custom tags.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 44)
builder.add_meta_graph(["foo", "bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with a single predefined tag whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags whose variables were not
# saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.GPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple predefined tags (for serving on TPU)
# whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.SERVING, tag_constants.TPU], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with multiple tags. Provide duplicate tags to test set
# semantics.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo", "bar", "foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Try restoring a graph with a non-existent tag. This should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["INVALID"],
export_dir)
# Try restoring a graph where a subset of the tags match. Since tag matching
# for meta graph defs follows "all" semantics, this should yield a runtime
# error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(RuntimeError, loader.load, sess, ["foo", "baz"],
export_dir)
@test_util.run_v1_only("b/120545219")
def testVariables(self):
export_dir = self._get_export_dir("test_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with two variables. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v1", 1)
self._init_and_validate_variable(sess, "v2", 2)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with a single variable (subset of the variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v2", 3)
builder.add_meta_graph(["bar"])
# Graph with a single variable (disjoint set of variables from the previous
# graph whose weights were saved). SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v3", 4)
builder.add_meta_graph(["baz"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 2)
self.assertEqual(1, collection_vars[0].eval())
self.assertEqual(2, collection_vars[1].eval())
# Restore the graph with tag "bar", whose variables were not saved. Only the
# subset of the variables added to the graph will be restored with the
# checkpointed value.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertEqual(len(collection_vars), 1)
self.assertEqual(2, collection_vars[0].eval())
# Try restoring the graph with tag "baz", whose variables were not saved.
# Since this graph has a disjoint set of variables from the set that was
# saved, this should raise an error.
with self.session(graph=ops.Graph()) as sess:
self.assertRaises(errors.NotFoundError, loader.load, sess, ["baz"],
export_dir)
@test_util.run_deprecated_v1
def testGraphWithoutVariables(self):
export_dir = self._get_export_dir("test_graph_has_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with no variables.
with self.session(graph=ops.Graph()) as sess:
constant_5_name = constant_op.constant(5.0).name
builder.add_meta_graph_and_variables(sess, ["foo"])
# Second graph with no variables
with self.session(graph=ops.Graph()) as sess:
constant_6_name = constant_op.constant(6.0).name
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_5_name)
b = constant_op.constant(6.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
# Restore the graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
# Read the constant a from the graph.
a = ops.get_default_graph().get_tensor_by_name(constant_6_name)
b = constant_op.constant(5.0)
c = a * b
self.assertEqual(30.0, self.evaluate(c))
@test_util.run_deprecated_v1
def testNoOverwrite(self):
export_dir = self._get_export_dir("test_no_overwrite")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# An attempt to create another builder with the same export directory should
# result in an assertion error.
self.assertRaises(AssertionError, saved_model_builder._SavedModelBuilder,
export_dir)
@test_util.run_deprecated_v1
def testSaveAsText(self):
export_dir = self._get_export_dir("test_astext")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable. SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Restore the graph with tag "foo", whose variables were saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Restore the graph with tag "bar", whose variables were not saved.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
@test_util.run_v1_only("b/120545219")
def testCollections(self):
export_dir = self._get_export_dir("test_collections")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable added to a collection. SavedModel invoked to:
# - add with weights.
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(42, name="v")
ops.add_to_collection("foo_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(42, self.evaluate(v))
builder.add_meta_graph_and_variables(sess, ["foo"])
# Graph with the same single variable added to a different collection.
# SavedModel invoked to:
# - simply add the model (weights are not updated).
with self.session(graph=ops.Graph()) as sess:
v = variables.VariableV1(43, name="v")
ops.add_to_collection("bar_vars", v)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(43, self.evaluate(v))
builder.add_meta_graph(["bar"])
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo", whose variables were saved. The
# collection 'foo_vars' should contain a single element. The collection
# 'bar_vars' should not be found.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
collection_foo_vars = ops.get_collection("foo_vars")
self.assertEqual(len(collection_foo_vars), 1)
self.assertEqual(42, collection_foo_vars[0].eval())
self.assertEqual(len(ops.get_collection("bar_vars")), 0)
# Restore the graph with tag "bar", whose variables were not saved. The
# collection-def exported as part of the meta graph def is updated to
# reflect the new collection. The value of the variable in the
# collection-def corresponds to the saved value (from the previous graph
# with tag "foo").
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["bar"], export_dir)
collection_bar_vars = ops.get_collection("bar_vars")
self.assertEqual(len(collection_bar_vars), 1)
self.assertEqual(42, collection_bar_vars[0].eval())
self.assertEqual(len(ops.get_collection("foo_vars")), 0)
@test_util.run_deprecated_v1
def testSignatureDefs(self):
export_dir = self._get_export_dir("test_signature_defs")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Graph with a single variable and a single entry in the signature def map.
# SavedModel is invoked to add with weights.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build and populate an empty SignatureDef for testing.
foo_signature = signature_def_utils.build_signature_def(dict(),
dict(), "foo")
builder.add_meta_graph_and_variables(
sess, ["foo"], signature_def_map={"foo_key": foo_signature})
# Graph with the same single variable and multiple entries in the signature
# def map. No weights are saved by SavedModel.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 43)
# Build and populate a different SignatureDef for testing.
bar_signature = signature_def_utils.build_signature_def(dict(),
dict(), "bar")
# Also, build a different SignatureDef corresponding to "foo_key" defined
# in the previous graph.
foo_new_signature = signature_def_utils.build_signature_def(dict(),
dict(),
"foo_new")
builder.add_meta_graph(
["bar"],
signature_def_map={
"bar_key": bar_signature,
"foo_key": foo_new_signature
})
# Save the SavedModel to disk.
builder.save()
# Restore the graph with tag "foo". The single entry in the SignatureDef map
# corresponding to "foo_key" should exist.
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
foo_signature = foo_graph.signature_def
self.assertEqual(len(foo_signature), 1)
self.assertEqual("foo", foo_signature["foo_key"].method_name)
# Restore the graph with tag "bar". The SignatureDef map should have two
# entries. One corresponding to "bar_key" and another corresponding to the
# new value of "foo_key".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
bar_signature = bar_graph.signature_def
self.assertEqual(len(bar_signature), 2)
self.assertEqual("bar", bar_signature["bar_key"].method_name)
self.assertEqual("foo_new", bar_signature["foo_key"].method_name)
def testSignatureDefValidationFails(self):
export_dir = self._get_export_dir("test_signature_def_validation_fail")
builder = saved_model_builder._SavedModelBuilder(export_dir)
tensor_without_encoding = meta_graph_pb2.TensorInfo()
tensor_without_encoding.dtype = types_pb2.DT_FLOAT
self._validate_inputs_tensor_info_fail(builder, tensor_without_encoding)
self._validate_outputs_tensor_info_fail(builder, tensor_without_encoding)
tensor_without_dtype = meta_graph_pb2.TensorInfo()
tensor_without_dtype.name = "x"
self._validate_inputs_tensor_info_fail(builder, tensor_without_dtype)
self._validate_outputs_tensor_info_fail(builder, tensor_without_dtype)
tensor_empty = meta_graph_pb2.TensorInfo()
self._validate_inputs_tensor_info_fail(builder, tensor_empty)
self._validate_outputs_tensor_info_fail(builder, tensor_empty)
valid_tensor_info = meta_graph_pb2.TensorInfo()
valid_tensor_info.name = "foo"
valid_tensor_info.dtype = types_pb2.DT_FLOAT
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.INIT_OP_SIGNATURE_KEY)
self._validate_sig_def_keys(builder, valid_tensor_info,
constants.TRAIN_OP_SIGNATURE_KEY)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithName(self):
tensor_with_name = meta_graph_pb2.TensorInfo()
tensor_with_name.name = "foo"
tensor_with_name.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_name_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_name)
export_dir = self._get_export_dir("test_signature_def_validation_name_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_name)
@test_util.run_deprecated_v1
def testSignatureDefValidationSucceedsWithCoo(self):
tensor_with_coo = meta_graph_pb2.TensorInfo()
# TODO(soergel) test validation of each of the fields of coo_sparse
tensor_with_coo.coo_sparse.values_tensor_name = "foo"
tensor_with_coo.dtype = types_pb2.DT_FLOAT
export_dir = self._get_export_dir("test_signature_def_validation_coo_1")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_inputs_tensor_info_accept(builder, tensor_with_coo)
export_dir = self._get_export_dir("test_signature_def_validation_coo_2")
builder = saved_model_builder._SavedModelBuilder(export_dir)
self._validate_outputs_tensor_info_accept(builder, tensor_with_coo)
@test_util.run_deprecated_v1
def testAssets(self):
export_dir = self._get_export_dir("test_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionDiffFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_diff_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar bak", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar bak", "asset_file_tensor:0")
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_1",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFilepath(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_path")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor")
asset_list = self._build_asset_collection("hello42.txt", "foo bar baz",
"asset_file_tensor_1")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionSameFile(self):
export_dir = self._get_export_dir("test_assets_name_collision_same_file")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor", asset_subdir="1")
asset_list = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor_1", asset_subdir="2")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz", "asset_file_tensor:0")
# The second tensor should be recorded, but the same.
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt",
"foo bar baz",
"asset_file_tensor_1:0",
asset_id=1)
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("hello42.txt_1"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testAssetsNameCollisionManyFiles(self):
export_dir = self._get_export_dir("test_assets_name_collision_many_files")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
for i in range(5):
idx = str(i)
asset_list = self._build_asset_collection(
"hello42.txt",
"foo bar baz " + idx,
"asset_file_tensor_" + idx,
asset_subdir=idx)
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
for i in range(1, 5):
idx = str(i)
self._validate_assets(
export_dir,
foo_graph.asset_file_def,
"hello42.txt_" + idx,
"foo bar baz " + idx,
"asset_file_tensor_{}:0".format(idx),
asset_id=i)
self._validate_assets(export_dir, foo_graph.asset_file_def, "hello42.txt",
"foo bar baz 0", "asset_file_tensor_0:0")
@test_util.run_v1_only("b/120545219")
def testCustomInitOp(self):
export_dir = self._get_export_dir("test_main_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3")
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the main_op.
with ops.control_dependencies([main_op.main_op()]):
add_v1_v2 = math_ops.add(v1._ref(), v2._ref())
custom_init_op = control_flow_ops.group(state_ops.assign(v3, add_v1_v2))
self.evaluate(custom_init_op)
builder.add_meta_graph_and_variables(
sess, ["foo"], init_op=custom_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the main_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
@test_util.run_v1_only("b/120545219")
def testTrainOp(self):
export_dir = self._get_export_dir("test_train_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(3, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
@test_util.run_v1_only("b/120545219")
def testTrainOpGroup(self):
export_dir = self._get_export_dir("test_train_op_group")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
train_op = control_flow_ops.group()
self.evaluate(train_op)
builder.add_meta_graph_and_variables(sess, ["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Operation)
@test_util.run_v1_only("b/120545219")
def testTrainOpAfterVariables(self):
export_dir = self._get_export_dir("test_train_op_after_variables")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["pre_foo"])
train_op = state_ops.assign_add(v1, v2)
self.evaluate(train_op)
builder.add_meta_graph(["foo"], train_op=train_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
meta_graph_def = loader.load(sess, ["foo"], export_dir)
self.assertIsInstance(
loader_impl.get_train_op(meta_graph_def), ops.Tensor)
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["pre_foo"], export_dir)
self.assertFalse(ops.get_collection(constants.TRAIN_OP_KEY))
@test_util.run_deprecated_v1
def testMultipleAssets(self):
export_dir = self._get_export_dir("test_multiple_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `foo` graph.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection specific to `bar` graph.
asset_list = self._build_asset_collection("bar.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
self._validate_assets(export_dir, bar_graph.asset_file_def, "bar.txt",
"content_bar", "asset_file_tensor:0")
@test_util.run_deprecated_v1
def testDuplicateAssets(self):
export_dir = self._get_export_dir("test_duplicate_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `foo` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "foo".
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_list=asset_list)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset collection with `foo.txt` that has `bar` specific
# content.
asset_list = self._build_asset_collection("foo.txt", "content_bar",
"asset_file_tensor")
# Add the asset collection as part of the graph with tag "bar".
builder.add_meta_graph(["bar"], assets_list=asset_list)
# Save the SavedModel to disk.
builder.save()
# Check assets restored for graph with tag "foo".
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_assets(export_dir, foo_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# Check assets restored for graph with tag "bar".
with self.session(graph=ops.Graph()) as sess:
bar_graph = loader.load(sess, ["bar"], export_dir)
# Validate the assets for `bar` graph. `foo.txt` should contain the
# original contents corresponding to `foo` graph since an asset with the
# same name across multiple graphs is only stored the first time
self._validate_assets(export_dir, bar_graph.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
@test_util.run_v1_only("b/120545219")
def testOp(self):
export_dir = self._get_export_dir("test_op")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variables.VariableV1(1, name="v1")
with sess.graph.device("/cpu:1"):
v2 = variables.VariableV1(2, name="v2")
# v3 is an unsaved variable derived from v1 and v2. It is used to
# exercise the ability to run an init op when restoring a graph.
v3 = variables.VariableV1(1, name="v3", trainable=False, collections=[])
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
init_op = control_flow_ops.group(assign_v3, name="init_op")
ops.add_to_collection("v", v1)
ops.add_to_collection("v", v2)
ops.add_to_collection("v", v3)
ops.add_to_collection("init_op", init_op)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Validate variables, run the init op and verify result.
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
ops.get_collection("init_op")[0].run()
self.assertEqual(3, ops.get_collection("v")[2].eval())
def testCustomSaveable(self):
export_dir = self._get_export_dir("custom_saveable")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
# CheckpointedOp is a key-value table that can be saved across sessions.
# The table register itself in SAVEABLE_OBJECTS collection.
v1 = saver_test_utils.CheckpointedOp(name="v1")
self.evaluate(variables.global_variables_initializer())
v1.insert("k1", 3.0).run()
# Once the table is restored, we can access it through this reference.
ops.add_to_collection("table_ref", v1.table_ref)
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk.
builder.save()
with session.Session(
graph=ops.Graph(),
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
loader.load(sess, ["foo"], export_dir)
# Instantiate a wrapper object from the checkpointed reference.
v1 = saver_test_utils.CheckpointedOp(
name="v1", table_ref=ops.get_collection("table_ref")[0])
self.assertEqual(b"k1", v1.keys().eval())
self.assertEqual(3.0, v1.values().eval())
@test_util.run_deprecated_v1
def testCustomSaver(self):
export_dir = self._get_export_dir("test_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
custom_saver = training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"], saver=custom_saver)
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertFalse("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "my_saver/restore_all")
@test_util.run_deprecated_v1
def testNoCustomSaver(self):
export_dir = self._get_export_dir("test_no_custom_saver")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
training.Saver(name="my_saver")
builder.add_meta_graph_and_variables(sess, ["tag"])
# Save the SavedModel to disk.
builder.save()
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, ["tag"], export_dir)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue("my_saver/restore_all" in graph_ops)
self.assertTrue("save/restore_all" in graph_ops)
self.assertEqual(
saved_graph.saver_def.restore_op_name, "save/restore_all")
@test_util.run_deprecated_v1
def testMultipleCustomSavers(self):
export_dir = self._get_export_dir("test_multiple_custom_savers")
builder = saved_model_builder._SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
variables.VariableV1(1, name="v1")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["tag_0"])
saver_1 = training.Saver()
builder.add_meta_graph(["tag_1"], saver=saver_1)
saver_2 = training.Saver()
builder.add_meta_graph(["tag_2"], saver=saver_2)
# Save the SavedModel to disk.
builder.save()
def _validate_custom_saver(tag_name, saver_name):
with ops.Graph().as_default() as graph:
with self.session(graph=graph) as sess:
saved_graph = loader.load(sess, [tag_name], export_dir)
self.assertEqual(
saved_graph.saver_def.restore_op_name,
saver_name)
_validate_custom_saver("tag_0", "save/restore_all")
_validate_custom_saver("tag_1", "save_1/restore_all")
_validate_custom_saver("tag_2", "save_2/restore_all")
@test_util.run_deprecated_v1
def testImportScope(self):
export_dir = self._get_export_dir("test_scoped_assets")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Build a SavedModel with a variable, an asset, and a constant tensor.
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
asset_list = self._build_asset_collection("foo.txt", "content_foo",
"asset_file_tensor")
constant_op.constant("constant value", name="constant_tensor_name")
builder.add_meta_graph_and_variables(
sess, ["tag_name"], assets_list=asset_list)
# Save the asset file path for later comparison.
asset_file_path = asset_list[0].eval()
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
# Restore the SavedModel under an import_scope in a new graph/session.
graph_proto = loader.load(
sess, ["tag_name"], export_dir, import_scope="scope_name")
# The loaded variable tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
"scope_name/v:0",
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].name)
self.assertEqual(
42,
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# The loaded asset tensor should be scoped, but the asset file path and
# contents should be unchanged.
asset_list = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
self.assertEqual(1, len(asset_list))
self.assertEqual(asset_file_path, asset_list[0].eval())
self.assertEqual("scope_name/asset_file_tensor:0", asset_list[0].name)
# The static asset data inside graph_proto.collection_def should not be
# scoped.
self._validate_assets(export_dir, graph_proto.asset_file_def, "foo.txt",
"content_foo", "asset_file_tensor:0")
# The constant tensor should be scoped, but its contents should be
# unchanged.
self.assertEqual(
compat.as_bytes("constant value"),
ops.get_default_graph().get_tensor_by_name(
"scope_name/constant_tensor_name:0").eval())
@test_util.run_deprecated_v1
def testClearDevices(self):
export_dir = self._get_export_dir("test_clear_devices")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Specify a device and save a variable.
ops.reset_default_graph()
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
self._init_and_validate_variable(sess, "v", 42)
builder.add_meta_graph_and_variables(
sess, [tag_constants.TRAINING], clear_devices=True)
# Save the SavedModel to disk.
builder.save()
# Restore the graph with a single predefined tag whose variables were saved
# without any device information.
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, [tag_constants.TRAINING], export_dir)
self.assertEqual(
42, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)[0].eval())
# Tests the behavior of loading SavedModels that having missing attrs or attrs
# with incorrect types.
def testInconsistentConsumerDefaultAttrs(self):
export_dir = self._get_export_dir(
"test_strip_default_attrs_no_consumer_defaults")
builder = saved_model_builder._SavedModelBuilder(export_dir)
# Add a graph with a single variable and a test op with a defaultless
# float32 attr, "test_attr".
with session.Session(graph=ops.Graph()) as sess:
variables.VariableV1(1.0, dtype=dtypes.float64, name="var")
test_ops.test_attr(T=dtypes.float32, name="test_attr")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(sess, ["foo"])
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Rewrite the SavedModel to remove the T attr from "test_attr".
saved_model_file = os.path.join(
export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
with open(saved_model_file) as f:
original_saved_model = f.read()
no_attr_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", "")
with open(saved_model_file, "w") as f:
f.write(no_attr_saved_model)
# Loading the SavedModel via the loader must fail because the SavedModel
# does not have any attr values for the "TestAttr" node, and there is no
# default specified in the TestAttr OpDef.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
ValueError, "NodeDef missing attr 'T' from Op<name=TestAttr"):
loader.load(sess, ["foo"], export_dir)
# Rewrite the SavedModel to change the type of the T attr in "test_attr"
bad_type_saved_model = original_saved_model.replace("""
attr {
key: "T"
value {
type: DT_FLOAT
}
}""", """
attr {
key: "T"
value {
type: DT_DOUBLE
}
}""")
with open(saved_model_file, "w") as f:
f.write(bad_type_saved_model)
# Loading the SavedModel via the loader must fail because there is no
# OpKernel registered to handle T = double.
sess = session.Session(graph=ops.Graph())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"No OpKernel was registered to support Op 'TestAttr' used by node "
"test_attr \\(defined at .*\\) with these attrs: \\[.*\\]\n"
"Registered devices:.*\n"
"Registered kernels:.*"
):
loader.load(sess, ["foo"], export_dir)
class SavedModelV1Test(SavedModelTestBase):
def _validate_asset_collection(self,
export_dir,
graph_collection_def,
expected_asset_file_name,
expected_asset_file_contents,
expected_asset_tensor_name,
asset_id=0):
assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
asset = meta_graph_pb2.AssetFileDef()
assets_any[asset_id].Unpack(asset)
assets_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(expected_asset_file_name))
actual_asset_contents = file_io.read_file_to_string(assets_path)
self.assertEqual(expected_asset_file_contents,
compat.as_text(actual_asset_contents))
self.assertEqual(expected_asset_file_name, asset.filename)
self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name)
@test_util.run_deprecated_v1
def testWritingAssetsToCollection(self):
export_dir = self._get_export_dir("test_writing_assets_to_collection")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
self._init_and_validate_variable(sess, "v", 42)
# Build an asset list.
ignored_filepath = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
file_io.write_string_to_file(ignored_filepath, "will be ignored")
asset_collection = self._build_asset_collection(
"hello42.txt", "foo bar baz", "asset_file_tensor")
builder.add_meta_graph_and_variables(
sess, ["foo"], assets_collection=asset_collection)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
foo_graph = loader.load(sess, ["foo"], export_dir)
self._validate_asset_collection(export_dir, foo_graph.collection_def,
"hello42.txt", "foo bar baz",
"asset_file_tensor:0")
ignored_asset_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes("ignored.txt"))
self.assertFalse(file_io.file_exists(ignored_asset_path))
@test_util.run_deprecated_v1
def testLegacyInitOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir(
"test_legacy_init_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir,
constants.LEGACY_INIT_OP_KEY)
@test_util.run_deprecated_v1
def testMainOpWithNonEmptyCollection(self):
export_dir = self._get_export_dir("test_main_op_with_non_empty_collection")
self._testInitOpsWithNonEmptyCollection(export_dir, constants.MAIN_OP_KEY)
def _testInitOpsWithNonEmptyCollection(self, export_dir, key):
builder = saved_model_builder.SavedModelBuilder(export_dir)
g = ops.Graph()
with self.session(graph=g) as sess:
# Initialize variable `v1` to 1.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
# Initialize another variable `v2` to 42.
v2 = variables.VariableV1(42, name="v2", trainable=False, collections=[])
ops.add_to_collection("v", v2)
# Set up an assignment op to be run as part of the init op.
assign_v2 = state_ops.assign(v2, v1)
init_op = control_flow_ops.group(assign_v2, name="init_op")
self.evaluate(variables.global_variables_initializer())
ops.add_to_collection(key, control_flow_ops.no_op())
# ValueError should be raised since the LEGACY_INIT_OP_KEY collection
# is not empty and we don't support multiple init ops.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=init_op)
# We shouldn't be able to add as MAIN_OP, either.
with self.assertRaisesRegexp(ValueError, "Graph already contains"):
builder.add_meta_graph_and_variables(sess, ["foo"], main_op=init_op)
def testStripDefaultAttrs(self):
export_dir = self._get_export_dir("test_strip_default_attrs")
builder = saved_model_builder.SavedModelBuilder(export_dir)
# Add a graph with two float32 variables and a Complex Op composing them
# with strip_default_attrs enabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], strip_default_attrs=True)
# Add a graph with the same float32 variables and a Complex Op composing
# them with strip_default_attrs disabled.
with session.Session(graph=ops.Graph()) as sess:
real_num = variables.VariableV1(1.0, dtype=dtypes.float32, name="real")
imag_num = variables.VariableV1(2.0, dtype=dtypes.float32, name="imag")
math_ops.complex(real_num, imag_num, name="complex")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph(["bar"], strip_default_attrs=False)
# Save the SavedModel to disk in text format.
builder.save(as_text=True)
# Loading graph "foo" via the loader must restore the defaults for the
# "Complex" node based on the "Complex" OpDef in the Op registry.
sess = session.Session(graph=ops.Graph())
meta_graph_def = loader.load(sess, ["foo"], export_dir)
complex_node = test_util.get_node_def_from_graph("complex",
meta_graph_def.graph_def)
self.assertIn("T", complex_node.attr)
self.assertIn("Tout", complex_node.attr)
# Load graph "foo" from disk as-is to verify default attrs are stripped.
saved_model_pb = loader_impl.parse_saved_model(export_dir)
self.assertIsNotNone(saved_model_pb)
meta_graph_foo_def = None
meta_graph_bar_def = None
for meta_graph_def in saved_model_pb.meta_graphs:
if set(meta_graph_def.meta_info_def.tags) == set(["foo"]):
meta_graph_foo_def = meta_graph_def
elif set(meta_graph_def.meta_info_def.tags) == set(["bar"]):
meta_graph_bar_def = meta_graph_def
self.assertIsNotNone(meta_graph_foo_def)
self.assertIsNotNone(meta_graph_bar_def)
# "Complex" Op has 2 attributes with defaults:
# o "T" : float32. (input type)
# o "Tout" : complex64. (output type)
# "Complex" Op in graph "foo" shouldn't have attributes "T" and "Tout".
# Graph "foo" was saved with strip_default_attrs set to True.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_foo_def.graph_def)
self.assertNotIn("T", node_def.attr)
self.assertNotIn("Tout", node_def.attr)
# "Complex" Op in graph "bar" must have attributes "T" and "Tout".
# Graph "bar" was saved with strip_default_attrs set to False.
node_def = test_util.get_node_def_from_graph("complex",
meta_graph_bar_def.graph_def)
self.assertIn("T", node_def.attr)
self.assertIn("Tout", node_def.attr)
@test_util.run_v1_only("b/120545219")
def testLegacyInitOp(self):
export_dir = self._get_export_dir("test_legacy_init_op")
builder = saved_model_builder.SavedModelBuilder(export_dir)
with self.session(graph=ops.Graph()) as sess:
# Add `v1` and `v2` variables to the graph.
v1 = variables.VariableV1(1, name="v1")
ops.add_to_collection("v", v1)
v2 = variables.VariableV1(2, name="v2")
ops.add_to_collection("v", v2)
# Initialize another variable `v3` to 42.
v3 = variables.VariableV1(42, name="v3", trainable=False, collections=[])
ops.add_to_collection("v", v3)
# Set up an assignment op to be run as part of the init_op.
assign_v3 = state_ops.assign(v3, math_ops.add(v1, v2))
legacy_init_op = control_flow_ops.group(assign_v3, name="legacy_init_op")
self.evaluate(variables.global_variables_initializer())
builder.add_meta_graph_and_variables(
sess, ["foo"], legacy_init_op=legacy_init_op)
# Save the SavedModel to disk.
builder.save()
with self.session(graph=ops.Graph()) as sess:
loader.load(sess, ["foo"], export_dir)
self.assertEqual(1, ops.get_collection("v")[0].eval())
self.assertEqual(2, ops.get_collection("v")[1].eval())
# Evaluates to the sum of the first two variables and assigned as part of
# the legacy_init_op, following a restore.
self.assertEqual(3, ops.get_collection("v")[2].eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
INNUENDOWEB/INNUca | src/SPAdes-3.10.1-Linux/share/spades/joblib3/pool.py | 237 | 23894 | """Custom implementation of multiprocessing.Pool with custom pickler
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any"""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not"""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memmory mapped file"""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return np.memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = np.memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file"""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API. It might not be the case when using the MemmapingPool API
directly.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._context_id = context_id
self._prewarm = prewarm
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
if self._context_id is not None:
marker = self._context_id
else:
marker = hash(a)
basename = "%d-%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), id(a), marker)
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# Let's use the memmap reducer
return reduce_memmap(load(filename, mmap_mode=self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memmory mapped datastructures.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing"""
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memmory mapping in temp_folder.
Use None to disable memmaping of large arrays.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API.
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
use_shared_mem = False
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
atexit.register(lambda: delete_folder(pool_folder))
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
context_id=context_id, prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
super(MemmapingPool, self).terminate()
delete_folder(self._temp_folder)
| gpl-3.0 |
iansf/sky_engine | sky/engine/bindings/scripts/v8_interface.py | 10 | 37056 | # Copyright (C) 2013 Google Inc. All rights reserved.
# coding=utf-8
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Generate template values for an interface.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
from collections import defaultdict
import itertools
from operator import itemgetter
import idl_definitions
from idl_definitions import IdlOperation
import idl_types
from idl_types import IdlType, inherits_interface
import v8_attributes
from v8_globals import includes
import v8_methods
import v8_types
from v8_types import cpp_template_type
import v8_utilities
from v8_utilities import (capitalize, cpp_name,
has_extended_attribute_value,
extended_attribute_value_as_list)
INTERFACE_H_INCLUDES = frozenset([
'platform/heap/Handle.h',
])
INTERFACE_CPP_INCLUDES = frozenset([
'sky/engine/bindings/exception_state.h',
'core/dom/Document.h',
'base/trace_event/trace_event.h',
'wtf/GetPtr.h',
'wtf/RefPtr.h',
])
def interface_context(interface):
includes.clear()
includes.update(INTERFACE_CPP_INCLUDES)
header_includes = set(INTERFACE_H_INCLUDES)
parent_interface = interface.parent
if parent_interface:
header_includes.update(v8_types.includes_for_interface(parent_interface))
extended_attributes = interface.extended_attributes
# [DependentLifetime]
is_dependent_lifetime = 'DependentLifetime' in extended_attributes
# [Iterable]
iterator_method = None
if 'Iterable' in extended_attributes:
iterator_operation = IdlOperation(interface.idl_name)
iterator_operation.name = 'iterator'
iterator_operation.idl_type = IdlType('Iterator')
iterator_operation.extended_attributes['RaisesException'] = None
iterator_operation.extended_attributes['CallWith'] = 'ScriptState'
iterator_method = v8_methods.method_context(interface,
iterator_operation)
# [SetWrapperReferenceFrom]
reachable_node_function = extended_attributes.get('SetWrapperReferenceFrom')
if reachable_node_function:
includes.update(['core/dom/Element.h'])
# [SetWrapperReferenceTo]
set_wrapper_reference_to_list = [{
'name': argument.name,
# FIXME: properly should be:
# 'cpp_type': argument.idl_type.cpp_type_args(raw_type=True),
# (if type is non-wrapper type like NodeFilter, normally RefPtr)
# Raw pointers faster though, and NodeFilter hacky anyway.
'cpp_type': argument.idl_type.implemented_as + '*',
'idl_type': argument.idl_type,
} for argument in extended_attributes.get('SetWrapperReferenceTo', [])]
for set_wrapper_reference_to in set_wrapper_reference_to_list:
set_wrapper_reference_to['idl_type'].add_includes_for_type()
# [NotScriptWrappable]
is_script_wrappable = 'NotScriptWrappable' not in extended_attributes
# [SpecialWrapFor]
if 'SpecialWrapFor' in extended_attributes:
special_wrap_for = extended_attribute_value_as_list(interface, 'SpecialWrapFor')
else:
special_wrap_for = []
for special_wrap_interface in special_wrap_for:
v8_types.add_includes_for_interface(special_wrap_interface)
# [Custom=Wrap], [SetWrapperReferenceFrom]
has_visit_dom_wrapper = (
has_extended_attribute_value(interface, 'Custom', 'VisitDOMWrapper') or
reachable_node_function or
set_wrapper_reference_to_list)
wrapper_class_id = ('NodeClassId' if inherits_interface(interface.name, 'Node') else 'ObjectClassId')
context = {
'cpp_class': cpp_name(interface),
'has_custom_wrap': has_extended_attribute_value(interface, 'Custom', 'Wrap'), # [Custom=Wrap]
'has_visit_dom_wrapper': has_visit_dom_wrapper,
'header_includes': header_includes,
'interface_name': interface.name,
'is_dependent_lifetime': is_dependent_lifetime,
'is_exception': interface.is_exception,
'is_script_wrappable': is_script_wrappable,
'iterator_method': iterator_method,
'lifetime': 'Dependent'
if (has_visit_dom_wrapper or
is_dependent_lifetime)
else 'Independent',
'parent_interface': parent_interface,
'reachable_node_function': reachable_node_function,
'set_wrapper_reference_to_list': set_wrapper_reference_to_list,
'special_wrap_for': special_wrap_for,
'wrapper_class_id': wrapper_class_id,
}
# Constructors
constructors = [constructor_context(interface, constructor)
for constructor in interface.constructors
# FIXME: shouldn't put named constructors with constructors
# (currently needed for Perl compatibility)
# Handle named constructors separately
if constructor.name == 'Constructor']
if len(constructors) > 1:
context['constructor_overloads'] = overloads_context(constructors)
# [CustomConstructor]
custom_constructors = [{ # Only needed for computing interface length
'number_of_required_arguments':
number_of_required_arguments(constructor),
} for constructor in interface.custom_constructors]
# [EventConstructor]
has_event_constructor = 'EventConstructor' in extended_attributes
any_type_attributes = [attribute for attribute in interface.attributes
if attribute.idl_type.name == 'Any']
# [NamedConstructor]
named_constructor = named_constructor_context(interface)
if (constructors or custom_constructors or has_event_constructor or
named_constructor):
includes.add('core/frame/LocalDOMWindow.h')
context.update({
'any_type_attributes': any_type_attributes,
'constructors': constructors,
'has_custom_constructor': bool(custom_constructors),
'has_event_constructor': has_event_constructor,
'interface_length':
interface_length(interface, constructors + custom_constructors),
'is_constructor_raises_exception': extended_attributes.get('RaisesException') == 'Constructor', # [RaisesException=Constructor]
'named_constructor': named_constructor,
})
constants = [constant_context(constant) for constant in interface.constants]
# Constants
context.update({
'constants': constants,
'has_constant_configuration': True,
})
# Attributes
attributes = [v8_attributes.attribute_context(interface, attribute)
for attribute in interface.attributes]
context.update({
'attributes': attributes,
'has_conditional_attributes': any(attribute['exposed_test'] for attribute in attributes),
'has_constructor_attributes': any(attribute['constructor_type'] for attribute in attributes),
'has_replaceable_attributes': any(attribute['is_replaceable'] for attribute in attributes),
})
# Methods
methods = [v8_methods.method_context(interface, method)
for method in interface.operations
if method.name] # Skip anonymous special operations (methods)
compute_method_overloads_context(methods)
# Stringifier
if interface.stringifier:
stringifier = interface.stringifier
method = IdlOperation(interface.idl_name)
method.name = 'toString'
method.idl_type = IdlType('DOMString')
method.extended_attributes.update(stringifier.extended_attributes)
if stringifier.attribute:
method.extended_attributes['ImplementedAs'] = stringifier.attribute.name
elif stringifier.operation:
method.extended_attributes['ImplementedAs'] = stringifier.operation.name
methods.append(v8_methods.method_context(interface, method))
conditionally_enabled_methods = []
custom_registration_methods = []
method_configuration_methods = []
for method in methods:
# Skip all but one method in each set of overloaded methods.
if 'overload_index' in method and 'overloads' not in method:
continue
if 'overloads' in method:
overloads = method['overloads']
conditionally_exposed_function = overloads['exposed_test_all']
has_custom_registration = overloads['has_custom_registration_all']
else:
conditionally_exposed_function = method['exposed_test']
has_custom_registration = method['has_custom_registration']
if conditionally_exposed_function:
conditionally_enabled_methods.append(method)
continue
if has_custom_registration:
custom_registration_methods.append(method)
continue
method_configuration_methods.append(method)
for method in methods:
# The value of the Function object’s “length” property is a Number
# determined as follows:
# 1. Let S be the effective overload set for regular operations (if the
# operation is a regular operation) or for static operations (if the
# operation is a static operation) with identifier id on interface I and
# with argument count 0.
# 2. Return the length of the shortest argument list of the entries in S.
# FIXME: This calculation doesn't take into account whether runtime
# enabled overloads are actually enabled, so length may be incorrect.
# E.g., [RuntimeEnabled=Foo] void f(); void f(long x);
# should have length 1 if Foo is not enabled, but length 0 if it is.
method['length'] = (method['overloads']['minarg'] if 'overloads' in method else
method['number_of_required_arguments'])
context.update({
'conditionally_enabled_methods': conditionally_enabled_methods,
'custom_registration_methods': custom_registration_methods,
'method_configuration_methods': method_configuration_methods,
'methods': methods,
})
context.update({
'indexed_property_getter': indexed_property_getter(interface),
'indexed_property_setter': indexed_property_setter(interface),
'indexed_property_deleter': indexed_property_deleter(interface),
'is_override_builtins': 'OverrideBuiltins' in extended_attributes,
'named_property_getter': named_property_getter(interface),
'named_property_setter': named_property_setter(interface),
'named_property_deleter': named_property_deleter(interface),
})
return context
# [DeprecateAs]
def constant_context(constant):
# (Blink-only) string literals are unquoted in tokenizer, must be re-quoted
# in C++.
if constant.idl_type.name == 'String':
value = '"%s"' % constant.value
else:
value = constant.value
extended_attributes = constant.extended_attributes
return {
'cpp_class': extended_attributes.get('PartialInterfaceImplementedAs'),
'idl_type': constant.idl_type.name,
'name': constant.name,
'value': value,
}
################################################################################
# Overloads
################################################################################
def compute_method_overloads_context(methods):
# Regular methods
compute_method_overloads_context_by_type([method for method in methods
if not method['is_static']])
# Static methods
compute_method_overloads_context_by_type([method for method in methods
if method['is_static']])
def compute_method_overloads_context_by_type(methods):
"""Computes |method.overload*| template values.
Called separately for static and non-static (regular) methods,
as these are overloaded separately.
Modifies |method| in place for |method| in |methods|.
Doesn't change the |methods| list itself (only the values, i.e. individual
methods), so ok to treat these separately.
"""
# Add overload information only to overloaded methods, so template code can
# easily verify if a function is overloaded
for name, overloads in method_overloads_by_name(methods):
# Resolution function is generated after last overloaded function;
# package necessary information into |method.overloads| for that method.
overloads[-1]['overloads'] = overloads_context(overloads)
overloads[-1]['overloads']['name'] = name
def method_overloads_by_name(methods):
"""Returns generator of overloaded methods by name: [name, [method]]"""
# Filter to only methods that are actually overloaded
method_counts = Counter(method['name'] for method in methods)
overloaded_method_names = set(name
for name, count in method_counts.iteritems()
if count > 1)
overloaded_methods = [method for method in methods
if method['name'] in overloaded_method_names]
# Group by name (generally will be defined together, but not necessarily)
return sort_and_groupby(overloaded_methods, itemgetter('name'))
def overloads_context(overloads):
"""Returns |overloads| template values for a single name.
Sets |method.overload_index| in place for |method| in |overloads|
and returns dict of overall overload template values.
"""
assert len(overloads) > 1 # only apply to overloaded names
for index, method in enumerate(overloads, 1):
method['overload_index'] = index
effective_overloads_by_length = effective_overload_set_by_length(overloads)
lengths = [length for length, _ in effective_overloads_by_length]
name = overloads[0].get('name', '<constructor>')
# Check and fail if all overloads with the shortest acceptable arguments
# list are runtime enabled, since we would otherwise set 'length' on the
# function object to an incorrect value when none of those overloads were
# actually enabled at runtime. The exception is if all overloads are
# controlled by the same runtime enabled feature, in which case there would
# be no function object at all if it is not enabled.
shortest_overloads = effective_overloads_by_length[0][1]
# Check and fail if overloads disagree on any of the extended attributes
# that affect how the method should be registered.
# Skip the check for overloaded constructors, since they don't support any
# of the extended attributes in question.
if not overloads[0].get('is_constructor'):
overload_extended_attributes = [
method['custom_registration_extended_attributes']
for method in overloads]
for extended_attribute in v8_methods.CUSTOM_REGISTRATION_EXTENDED_ATTRIBUTES:
if common_key(overload_extended_attributes, extended_attribute) is None:
raise ValueError('Overloads of %s have conflicting extended attribute %s'
% (name, extended_attribute))
# Check and fail if overloads disagree about whether the return type
# is a Promise or not.
promise_overload_count = sum(1 for method in overloads if method.get('idl_type') == 'Promise')
if promise_overload_count not in (0, len(overloads)):
raise ValueError('Overloads of %s have conflicting Promise/non-Promise types'
% (name))
return {
'exposed_test_all': common_value(overloads, 'exposed_test'), # [Exposed]
'has_custom_registration_all': common_value(overloads, 'has_custom_registration'),
# 1. Let maxarg be the length of the longest type list of the
# entries in S.
'maxarg': lengths[-1],
'minarg': lengths[0],
'valid_arities': lengths
# Only need to report valid arities if there is a gap in the
# sequence of possible lengths, otherwise invalid length means
# "not enough arguments".
if lengths[-1] - lengths[0] != len(lengths) - 1 else None,
}
def effective_overload_set(F):
"""Returns the effective overload set of an overloaded function.
An effective overload set is the set of overloaded functions + signatures
(type list of arguments, with optional and variadic arguments included or
not), and is used in the overload resolution algorithm.
For example, given input [f1(optional long x), f2(DOMString s)], the output
is informally [f1(), f1(long), f2(DOMString)], and formally
[(f1, [], []), (f1, [long], [optional]), (f2, [DOMString], [required])].
Currently the optionality list is a list of |is_optional| booleans (True
means optional, False means required); to support variadics this needs to
be tri-valued as required, optional, or variadic.
Formally:
An effective overload set represents the allowable invocations for a
particular operation, constructor (specified with [Constructor] or
[NamedConstructor]), legacy caller or callback function.
An additional argument N (argument count) is needed when overloading
variadics, but we don't use that currently.
Spec: http://heycam.github.io/webidl/#dfn-effective-overload-set
Formally the input and output lists are sets, but methods are stored
internally as dicts, which can't be stored in a set because they are not
hashable, so we use lists instead.
Arguments:
F: list of overloads for a given callable name.
Returns:
S: list of tuples of the form (callable, type list, optionality list).
"""
# Code closely follows the algorithm in the spec, for clarity and
# correctness, and hence is not very Pythonic.
# 1. Initialize S to ∅.
# (We use a list because we can't use a set, as noted above.)
S = []
# 2. Let F be a set with elements as follows, according to the kind of
# effective overload set:
# (Passed as argument, nothing to do.)
# 3. & 4. (maxarg, m) are only needed for variadics, not used.
# 5. For each operation, extended attribute or callback function X in F:
for X in F: # X is the "callable", F is the overloads.
arguments = X['arguments']
# 1. Let n be the number of arguments X is declared to take.
n = len(arguments)
# 2. Let t0..n−1 be a list of types, where ti is the type of X’s
# argument at index i.
# (“type list”)
t = tuple(argument['idl_type_object'] for argument in arguments)
# 3. Let o0..n−1 be a list of optionality values, where oi is “variadic”
# if X’s argument at index i is a final, variadic argument, “optional”
# if the argument is optional, and “required” otherwise.
# (“optionality list”)
# (We’re just using a boolean for optional vs. required.)
o = tuple(argument['is_optional'] for argument in arguments)
# 4. Add to S the tuple <X, t0..n−1, o0..n−1>.
S.append((X, t, o))
# 5. If X is declared to be variadic, then:
# (Not used, so not implemented.)
# 6. Initialize i to n−1.
i = n - 1
# 7. While i ≥ 0:
# Spec bug (fencepost error); should be “While i > 0:”
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=25590
while i > 0:
# 1. If argument i of X is not optional, then break this loop.
if not o[i]:
break
# 2. Otherwise, add to S the tuple <X, t0..i−1, o0..i−1>.
S.append((X, t[:i], o[:i]))
# 3. Set i to i−1.
i = i - 1
# 8. If n > 0 and all arguments of X are optional, then add to S the
# tuple <X, (), ()> (where “()” represents the empty list).
if n > 0 and all(oi for oi in o):
S.append((X, [], []))
# 6. The effective overload set is S.
return S
def effective_overload_set_by_length(overloads):
def type_list_length(entry):
# Entries in the effective overload set are 3-tuples:
# (callable, type list, optionality list)
return len(entry[1])
effective_overloads = effective_overload_set(overloads)
return list(sort_and_groupby(effective_overloads, type_list_length))
def distinguishing_argument_index(entries):
"""Returns the distinguishing argument index for a sequence of entries.
Entries are elements of the effective overload set with the same number
of arguments (formally, same type list length), each a 3-tuple of the form
(callable, type list, optionality list).
Spec: http://heycam.github.io/webidl/#dfn-distinguishing-argument-index
If there is more than one entry in an effective overload set that has a
given type list length, then for those entries there must be an index i
such that for each pair of entries the types at index i are
distinguishable.
The lowest such index is termed the distinguishing argument index for the
entries of the effective overload set with the given type list length.
"""
# Only applicable “If there is more than one entry”
assert len(entries) > 1
type_lists = [tuple(idl_type.name for idl_type in entry[1])
for entry in entries]
type_list_length = len(type_lists[0])
# Only applicable for entries that “[have] a given type list length”
assert all(len(type_list) == type_list_length for type_list in type_lists)
name = entries[0][0].get('name', 'Constructor') # for error reporting
# The spec defines the distinguishing argument index by conditions it must
# satisfy, but does not give an algorithm.
#
# We compute the distinguishing argument index by first computing the
# minimum index where not all types are the same, and then checking that
# all types in this position are distinguishable (and the optionality lists
# up to this point are identical), since "minimum index where not all types
# are the same" is a *necessary* condition, and more direct to check than
# distinguishability.
types_by_index = (set(types) for types in zip(*type_lists))
try:
# “In addition, for each index j, where j is less than the
# distinguishing argument index for a given type list length, the types
# at index j in all of the entries’ type lists must be the same”
index = next(i for i, types in enumerate(types_by_index)
if len(types) > 1)
except StopIteration:
raise ValueError('No distinguishing index found for %s, length %s:\n'
'All entries have the same type list:\n'
'%s' % (name, type_list_length, type_lists[0]))
# Check optionality
# “and the booleans in the corresponding list indicating argument
# optionality must be the same.”
# FIXME: spec typo: optionality value is no longer a boolean
# https://www.w3.org/Bugs/Public/show_bug.cgi?id=25628
initial_optionality_lists = set(entry[2][:index] for entry in entries)
if len(initial_optionality_lists) > 1:
raise ValueError(
'Invalid optionality lists for %s, length %s:\n'
'Optionality lists differ below distinguishing argument index %s:\n'
'%s'
% (name, type_list_length, index, set(initial_optionality_lists)))
# Check distinguishability
# http://heycam.github.io/webidl/#dfn-distinguishable
# Use names to check for distinct types, since objects are distinct
# FIXME: check distinguishability more precisely, for validation
distinguishing_argument_type_names = [type_list[index]
for type_list in type_lists]
if (len(set(distinguishing_argument_type_names)) !=
len(distinguishing_argument_type_names)):
raise ValueError('Types in distinguishing argument are not distinct:\n'
'%s' % distinguishing_argument_type_names)
return index
################################################################################
# Utility functions
################################################################################
def Counter(iterable):
# Once using Python 2.7, using collections.Counter
counter = defaultdict(lambda: 0)
for item in iterable:
counter[item] += 1
return counter
def common(dicts, f):
"""Returns common result of f across an iterable of dicts, or None.
Call f for each dict and return its result if the same across all dicts.
"""
values = (f(d) for d in dicts)
first_value = next(values)
if all(value == first_value for value in values):
return first_value
return None
def common_key(dicts, key):
"""Returns common presence of a key across an iterable of dicts, or None.
True if all dicts have the key, False if none of the dicts have the key,
and None if some but not all dicts have the key.
"""
return common(dicts, lambda d: key in d)
def common_value(dicts, key):
"""Returns common value of a key across an iterable of dicts, or None.
Auxiliary function for overloads, so can consolidate an extended attribute
that appears with the same value on all items in an overload set.
"""
return common(dicts, lambda d: d.get(key))
def sort_and_groupby(l, key=None):
"""Returns a generator of (key, list), sorting and grouping list by key."""
l.sort(key=key)
return ((k, list(g)) for k, g in itertools.groupby(l, key))
################################################################################
# Constructors
################################################################################
# [Constructor]
def constructor_context(interface, constructor):
arguments_need_try_catch = any(v8_methods.argument_needs_try_catch(constructor, argument)
for argument in constructor.arguments)
# [RaisesException=Constructor]
is_constructor_raises_exception = \
interface.extended_attributes.get('RaisesException') == 'Constructor'
return {
'arguments': [v8_methods.argument_context(interface, constructor, argument, index)
for index, argument in enumerate(constructor.arguments)],
'arguments_need_try_catch': arguments_need_try_catch,
'cpp_type': cpp_template_type(
'RefPtr',
cpp_name(interface)),
'cpp_value': v8_methods.cpp_value(
interface, constructor, len(constructor.arguments)),
'has_exception_state':
is_constructor_raises_exception or
any(argument for argument in constructor.arguments
if argument.idl_type.name == 'SerializedScriptValue' or
argument.idl_type.may_raise_exception_on_conversion),
'is_call_with_document':
# [ConstructorCallWith=Document]
has_extended_attribute_value(interface,
'ConstructorCallWith', 'Document'),
'is_call_with_execution_context':
# [ConstructorCallWith=ExecutionContext]
has_extended_attribute_value(interface,
'ConstructorCallWith', 'ExecutionContext'),
'is_constructor': True,
'is_named_constructor': False,
'is_raises_exception': is_constructor_raises_exception,
'number_of_required_arguments':
number_of_required_arguments(constructor),
}
# [NamedConstructor]
def named_constructor_context(interface):
extended_attributes = interface.extended_attributes
if 'NamedConstructor' not in extended_attributes:
return None
# FIXME: parser should return named constructor separately;
# included in constructors (and only name stored in extended attribute)
# for Perl compatibility
idl_constructor = interface.constructors[-1]
assert idl_constructor.name == 'NamedConstructor'
context = constructor_context(interface, idl_constructor)
context.update({
'name': extended_attributes['NamedConstructor'],
'is_named_constructor': True,
})
return context
def number_of_required_arguments(constructor):
return len([argument for argument in constructor.arguments
if not argument.is_optional])
def interface_length(interface, constructors):
# Docs: http://heycam.github.io/webidl/#es-interface-call
if 'EventConstructor' in interface.extended_attributes:
return 1
if not constructors:
return 0
return min(constructor['number_of_required_arguments']
for constructor in constructors)
################################################################################
# Special operations (methods)
# http://heycam.github.io/webidl/#idl-special-operations
################################################################################
def property_getter(getter, cpp_arguments):
def is_null_expression(idl_type):
if idl_type.is_union_type:
notnull = ' || '.join([
member_argument['null_check_value']
for member_argument in idl_type.union_arguments])
return '!(%s)' % notnull
if idl_type.name == 'String':
return 'result.isNull()'
if idl_type.is_interface_type:
return '!result'
return ''
idl_type = getter.idl_type
extended_attributes = getter.extended_attributes
is_raises_exception = 'RaisesException' in extended_attributes
# FIXME: make more generic, so can use v8_methods.cpp_value
cpp_method_name = 'impl->%s' % cpp_name(getter)
if is_raises_exception:
cpp_arguments.append('exceptionState')
union_arguments = idl_type.union_arguments
if union_arguments:
cpp_arguments.extend([member_argument['cpp_value']
for member_argument in union_arguments])
cpp_value = '%s(%s)' % (cpp_method_name, ', '.join(cpp_arguments))
return {
'cpp_type': idl_type.cpp_type,
'cpp_value': cpp_value,
'is_custom':
'Custom' in extended_attributes and
(not extended_attributes['Custom'] or
has_extended_attribute_value(getter, 'Custom', 'PropertyGetter')),
'is_custom_property_enumerator': has_extended_attribute_value(
getter, 'Custom', 'PropertyEnumerator'),
'is_custom_property_query': has_extended_attribute_value(
getter, 'Custom', 'PropertyQuery'),
'is_enumerable': 'NotEnumerable' not in extended_attributes,
'is_null_expression': is_null_expression(idl_type),
'is_raises_exception': is_raises_exception,
'name': cpp_name(getter),
'union_arguments': union_arguments,
}
def property_setter(setter):
idl_type = setter.arguments[1].idl_type
extended_attributes = setter.extended_attributes
is_raises_exception = 'RaisesException' in extended_attributes
return {
'has_type_checking_interface':
has_extended_attribute_value(setter, 'TypeChecking', 'Interface') and
idl_type.is_wrapper_type,
'idl_type': idl_type.base_type,
'is_custom': 'Custom' in extended_attributes,
'has_exception_state': is_raises_exception or
idl_type.is_integer_type,
'is_raises_exception': is_raises_exception,
'name': cpp_name(setter),
}
def property_deleter(deleter):
idl_type = deleter.idl_type
if str(idl_type) != 'boolean':
raise Exception(
'Only deleters with boolean type are allowed, but type is "%s"' %
idl_type)
extended_attributes = deleter.extended_attributes
return {
'is_custom': 'Custom' in extended_attributes,
'is_raises_exception': 'RaisesException' in extended_attributes,
'name': cpp_name(deleter),
}
################################################################################
# Indexed properties
# http://heycam.github.io/webidl/#idl-indexed-properties
################################################################################
def indexed_property_getter(interface):
try:
# Find indexed property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1)
getter = next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
return property_getter(getter, ['index'])
def indexed_property_setter(interface):
try:
# Find indexed property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG1, ARG_TYPE ARG2)
setter = next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
return property_setter(setter)
def indexed_property_deleter(interface):
try:
# Find indexed property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](unsigned long ARG)
deleter = next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'unsigned long'))
except StopIteration:
return None
return property_deleter(deleter)
################################################################################
# Named properties
# http://heycam.github.io/webidl/#idl-named-properties
################################################################################
def named_property_getter(interface):
try:
# Find named property getter, if present; has form:
# getter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1)
getter = next(
method
for method in interface.operations
if ('getter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
getter.name = getter.name or 'anonymousNamedGetter'
return property_getter(getter, ['propertyName'])
def named_property_setter(interface):
try:
# Find named property setter, if present; has form:
# setter RETURN_TYPE [OPTIONAL_IDENTIFIER](DOMString ARG1, ARG_TYPE ARG2)
setter = next(
method
for method in interface.operations
if ('setter' in method.specials and
len(method.arguments) == 2 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
return property_setter(setter)
def named_property_deleter(interface):
try:
# Find named property deleter, if present; has form:
# deleter TYPE [OPTIONAL_IDENTIFIER](DOMString ARG)
deleter = next(
method
for method in interface.operations
if ('deleter' in method.specials and
len(method.arguments) == 1 and
str(method.arguments[0].idl_type) == 'DOMString'))
except StopIteration:
return None
return property_deleter(deleter)
| bsd-3-clause |
2013Commons/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/AbstractAlgebra.py | 37 | 22134 | #._cv_part guppy.heapy.AbstractAlgebra
class AA:
def __mul__(self, other):
return BOAPP('op', self, other)
def __add__(self, other):
return BOAPP('op2', self, other)
def __eq__(self, other):
return BOAPP('eq', self, other)
class ANAME(AA):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
class BOAPP(AA):
def __init__(self, funcname, *args):
self.funcname = funcname
self.args = args
def __str__(self):
return '%s(%s)'%(self.funcname, ','.join([str(x) for x in self.args]))
class AlgebraicStructure:
def __init__(self, mod, range, *ops, **kwds):
self.mod = mod
self.range = range
self.ops = []
for i, op in enumerate(ops):
if hasattr(op, 'range') and op.range == range:
pass
elif callable(op) or op in mod.LE.binary_operation_name:
opkwds = {}
if 'identity' in kwds:
opkwds['identity'] = kwds['identity']
op = mod.binary_operation.new(range, op, **opkwds)
else:
raise TypeError, '%s is not a valid operation'%op
self.ops.append(op)
setattr(self, 'op%d'%i, op)
self.numops = len(self.ops)
for k, v in kwds.items():
setattr(self, k, v)
def eq(self, x, y, *more):
if not x == y:
return False
for m in more:
if not y == m:
return False
return True
class BinaryOperation:
def __init__(self, range, op, identity=None, invert=None, zeros=None, zero=None, complement=None):
self.range = range
self.arity = 2
if isinstance(op, str):
opname = op
func = eval('lambda x,y: x %s y'%opname)
elif callable(op):
func = op
opname = str(func)
self.opname = opname
self.__call__ = func
if identity is not None:
self.identity = identity
if invert is not None:
self.invert = invert
if zeros is not None:
self.zeros = zeros
if zero is not None:
self.zero = zero
if complement is not None:
self.complement = complement
# mimic alg. st.
self.op0 = self
def eq(self, x, y, *more):
if not x == y:
return False
for m in more:
if not y == m:
return False
return True
class BinaryAlgebraicStructureFamily:
def __call__(self, names, cond):
di = {}
for name in names:
di[name] = ANAME(name)
c = eval(cond, di)
assert isinstance(c, AA)
def gentestfunc(binop):
d = {'op':binop}
expr = 'lambda %s:%s'%(','.join(names), c)
d = {'op':binop,
'eq': lambda x,y: x==y}
f = eval (expr, d)
def testfunc(env, x, y):
if not f(x, y):
return env.failed('not %s in %s'%((x, y), expr))
return True
return testfunc
return self.Set(self, (gentestfunc, cond))
def c_test_contains(self, a, b, env):
f, name = a.arg
return env.forall_pairs(b.range,
f(b),
'not in %s'%name)
class TernaryAlgebraicStructureFamily:
def __call__(self, names, cond):
di = {}
for name in names:
di[name] = ANAME(name)
c = eval(cond, di)
assert isinstance(c, AA)
def gentestfunc(binop):
d = {'op':binop}
expr = 'lambda %s:%s'%(','.join(names), c)
d = {'op':binop,
'eq': lambda x,y: x==y}
f = eval (expr, d)
def testfunc(env, x, y, z):
if not f(x, y, z):
return env.failed('not %s in %s'%((x, y, z), expr))
return True
return testfunc
return self.Set(self, (gentestfunc, cond))
def c_test_contains(self, a, b, env):
f, name = a.arg
return env.forall_triples(b.range,
f(b),
'not in %s'%name)
class DistributiveAlgebraicStructureFamily:
def __call__(self, names, cond):
di = {}
for name in names:
di[name] = ANAME(name)
c = eval(cond, di)
assert isinstance(c, AA)
def gentestfunc(binop1, binop2):
d = {'op':binop1, 'op2': binop2}
expr = 'lambda %s:%s'%(','.join(names), c)
d = {'op':binop1,
'op2':binop2,
'eq': lambda x,y: x==y}
f = eval (expr, d)
def testfunc(env, x, y, z):
if not f(x, y, z):
return env.failed('not %s in %s'%((x, y, z), expr))
return True
return testfunc
return self.Set(self, (gentestfunc, cond))
def c_test_contains(self, a, b, env):
f, name = a.arg
op1, op2 = b
if isinstance(op1, tuple):
op1 = self.specmod.AA.binary_operation.new(*op1)
if isinstance(op2, tuple):
op2 = self.specmod.AA.binary_operation.new(*op2)
if not op1.range == op2.range:
return env.failed('Not the same range')
return env.forall_triples(op1.range,
f(op1, op2),
'not in %s'%name)
class _GLUECLAMP_:
def _get_abelian_group(self):
return self.Spec.adaptuple(
self.group.new,
self.group & self.Spec.attr('op0', self.commutative))
def _get_associative(self):
return self.asuf('xyz', 'x * (y * z) == (x * y) * z')
def algestruct(self, S, *args, **kwds):
S = self.Spec.setcast(S)
return AlgebraicStructure(self, S, *args, **kwds)
def asuf(self, names, cond):
if len(names) == 2:
x = self.BinaryAlgebraicStructure(names, cond)
elif len(names) == 3:
x = self.TernaryAlgebraicStructure(names, cond)
else:
raise ValueError
return self.Spec.adaptuple(self.binary_operation.new, x)
def _get_binary_operation(self):
def binop(S, func, **kwds):
S = self.Spec.setcast(S)
if isinstance(func, BinaryOperation) and func.range == S and not kwds:
return func
return BinaryOperation(S, func, **kwds)
e = self.Spec
return e.adaptuple(
binop,
e.expset("""(
attr('range', set) &
attr('arity', equals(2)) &
expset('''mapping(range, range, '->', range)''', 'range')
)"""))
def _get_binary_relation(self):
return self.relation
def _get_BinaryAlgebraicStructure(self):
return self.family(BinaryAlgebraicStructureFamily)
def _get_boolean_algebra(self):
def boolalg(set, op0, op1, complement, id0, id1):
if complement in ('~', '-', 'not'):
complement = eval('lambda x: %s x'%complement)
return self.algestruct(
set,
self.binary_operation.new(set, op0, identity = id0, zero = id1, complement=complement),
self.binary_operation.new(set, op1, identity = id1, zero = id0, complement=complement)
)
e = self.Spec
return (e.adaptuple(
boolalg,
e.attr('op0', e.AA.commutative & e.AA.complemented & e.AA.monoid) &
e.attr('op1', e.AA.commutative & e.AA.complemented & e.AA.monoid) &
e.attr(('op0', 'op1'), e.AA.distributive) &
e.attr(('op1', 'op0'), e.AA.distributive) &
e.attr(('op0.zero', 'op1.identity'), e.LE.eq) &
e.attr(('op1.zero', 'op0.identity'), e.LE.eq)
))
def _get_complemented(self):
# Not a standard term: expresses for an op op, that
# x op x' = op.zero where x' = op.complement
def p(env, x):
op = x.op0
zero = op.zero
f = op
complement = f.complement
return env.forall(x.range, lambda env, y:
x.eq(f(y, complement(y)), zero), 'complemented')
return self.Spec.predicate(p, 'complemented')
def _get_commutative(self):
return self.asuf('xy', 'x * y == y * x')
def _get_DistributiveAlgebraicStructure(self):
return self.family(DistributiveAlgebraicStructureFamily)
def _get_distributive(self):
return self.distributive_1 & self.distributive_2
def _get_distributive_1(self):
return self.DistributiveAlgebraicStructure(
'xyz', 'x * (y + z) == (x * y) + (x * z)')
def _get_distributive_2(self):
return self.DistributiveAlgebraicStructure(
'xyz', '(x + y) * z == (x * z) + (y * z)')
def _get_field(self):
e = self.Spec
AA = self
class Field:
def __init__(self, S, add, mul, neg, invert, zero, one):
if neg in ('-','~','not'):
neg = eval('lambda x: %s x'%neg)
self.range = S
self.ring = AA.ring.new(S, add, mul, neg, zero)
self.mulgroup = AA.group.new(S - e.equals(zero), mul, invert, one)
return e.adaptuple(
Field,
e.attr('ring', e.AA.ring) &
e.attr('mulgroup', e.AA.abelian_group))
def _get_group(self):
def mkgroup(S, op, invert, identity):
if invert in ('-', '~', 'not'):
invert = eval('lambda x: %s x'%invert)
return self.algestruct(S, op, identity=identity, invert=invert)
def p(env, g):
try:
inv = g.invert
except AttributeError:
env.failed("no invert function")
f = g.op0
return env.forall(g.range, lambda env, x:
g.eq(f(inv(x), x), f(x, inv(x)), g.identity))
e = self.Spec
return e.adaptuple(
mkgroup,
self.monoid & self.Spec.predicate(p, 'group'))
def _get_latticeform(self):
# latticeform is a representation category
class RelationSpec:
def _get_spec_quadruple(self, e):
binop = (e.boolean << (e.PyObject, e.PyObject) |
e.AA.LE.binary_operation_name)
return e.cprod(
e.LE.setcastable,
e.relation.fuop,
binop,
binop)
def _get_spec_struct(self, e):
return (attr('range', e.set),
attr('LE'),
attr('GLB'),
attr('LUB')
)
def map_quadruple_to_struct(self, e, (S, LE, GLB, LUB)):
S = e.setcast(S)
LE = e.relation.paxa.fromuniversal((e.relation.defipair, (S, LE)))
GLB = e.AA.binary_operation.new(S, GLB)
LUB = e.AA.binary_operation.new(S, LUB)
class C:
pass
c = C()
c.range = S
c.LE = LE
c.GLB = GLB
c.LUB = LUB
return c
return self.Spec.repcat(RelationSpec)
def _get_lattice(self):
e = self.Spec
def p(env, lat):
def test(R, op, name):
def testlb(env, x, y):
lb = op(x, y)
if not (R(lb, x) and R(lb, y)):
return env.failed('not an %s'%name)
if R(x, lb) or R(y, lb): return True # redundant fast way out
return env.forall(lat.range,
lambda env, lb2:
(not (R(lb2, x) and R(lb2, y)) or
R(lb2, lb)))
return env.forall_pairs(lat.range, testlb)
return (test( lambda x, y: env.contains(lat.LE, (x, y)), lat.GLB, 'lower bound') and
test( lambda x, y: env.contains(lat.LE, (y, x)), lat.LUB, 'upper bound'))
return (
e.abstractset(
self.latticeform.struct.fromuniversal,
e.attr('range') &
e.attr('LE', e.AA.partial_order.paxa) &
e.attr('GLB', e.AA.binary_operation) &
e.attr('LUB', e.AA.binary_operation) &
e.predicate(p, 'lattice')
)
)
def _get_LE(self):
return self.Spec.LocalEnv(self.Spec, self._Specification_.LocalEnvExpr)
def _get_monoid(self):
def p(env, x):
op = x.op0
e = op.identity
f = op
return env.forall(x.range, lambda env, y:
x.eq(f(e, y), f(y, e), y))
def mkmonoid(S, op, identity):
return self.algestruct(S, op, identity=identity)
e = self.Spec
return e.adaptuple(
mkmonoid,
e.attr('op0', self.associative) &
e.predicate(p, 'monoid'))
def _get_ring(self):
def mkring(S, add, mul, neg, zero):
if neg in ('-','~','not'):
neg = eval('lambda x: %s x'%neg)
return self.algestruct(
S,
self.binary_operation.new(S, add, identity=zero, invert=neg),
self.binary_operation.new(S, mul))
e = self.Spec
return (e.adaptuple(
mkring,
(e.attr('op0', e.AA.abelian_group) &
e.attr('op1', e.AA.semigroup) &
e.attr(('op1', 'op0'), e.AA.distributive)
)))
def _get_semigroup(self):
return self.Spec.adaptuple(self.binary_operation.new, self.Spec.attr('op0', self.associative))
def _get_Spec(self):
return self._parent.Spec
def _get_TernaryAlgebraicStructure(self):
return self.family(TernaryAlgebraicStructureFamily)
def family(self, F):
class C(F, self.Spec.SpecFamily):
def __init__(innerself, *args, **kwds):
self.Spec.SpecFamily.__init__(innerself, *args, **kwds)
try:
ini = F.__init__
except AttributeError:
pass
else:
ini(innerself, *args, **kwds)
C.__name__ = F.__name__
return self.Spec.family(C)
#
# 2. Relations and their properties
#
def relpropred(self, s, name):
return self.relprop(self.Spec.predicate(s, name))
def _get_antisymmetric(self):
# Assumes implicitly equality relation via '==' operation.
# Could be generalized, see notes Jan 19 2005
return self.relpropred(
lambda env, R: env.forall_pairs(
R.range,
lambda env, x, y:
(not (env.contains(R, (x, y)) and env.contains(R, (y, x))) or
x == y)),
"antisymmetric wrt '==' op")
def _get_equivalence_relation(self):
return (
self.reflexive &
self.symmetric &
self.transitive)
def _get_irreflexive(self):
return self.relpropred(
lambda env, R: env.forall(R.range,
lambda env, x: env.test_contains_not(R, (x, x), 'irrreflexive')),
'reflexive')
def _get_partial_order(self):
return (
self.reflexive &
self.antisymmetric &
self.transitive)
def _get_total_order(self):
return (
self.partial_order &
self.total_relation)
def _get_total_relation(self):
# Nonstandard name (?)
return self.relpropred(
lambda env, R: env.forall_pairs(
R.range,
lambda env, x, y:
(env.contains(R, (x, y)) or env.contains(R, (y, x)))),
"total_relation: xRy or yRx for all x,y in A")
def _get_reflexive(self):
return self.relpropred(
lambda env, R: env.forall(
R.range,
lambda env, x: env.test_contains(R, (x, x), 'reflexive')),
'reflexive')
def _get_symmetric(self):
return self.relpropred(
lambda env, R: env.forall(
R,
lambda env, (x, y):
env.test_contains(R, (y, x), 'symmetric')),
'symmetric')
def _get_transitive(self):
return self.relpropred(
lambda env, R: env.forall(
R,
lambda env, (x, y):
env.forall(R.range,
lambda env, z:
(not env.contains(R, (y, z)) or
env.test_contains(R, (x, z), 'transitive')))),
'transitive')
def relprop(self, s):
e = self.Spec
return e.abstractset(
e.relation.paxa.fromuniversal,
s)
return e.adaptuple(
self.relation.new,
e.attr(('domain', 'range'), e.LE.eq) &
s)
class _Specification_:
"""
Specification of some general algebraic structures
"""
def GetExamples(self, te, obj):
AA = obj
LE = AA.LE
env = te.mod
S3 = [
[0,1,2,3,4,5],
[1,0,4,5,2,3],
[2,5,0,4,3,1],
[3,4,5,0,1,2],
[4,3,1,2,5,0],
[5,2,3,1,0,4]]
Type = env.Type
asexs = [
# Too slow now with many examples, cubic complexity for associative etc.
# sets are tested more extensively elsewhere
#(env.set, env.set, env.empty, ~env.empty, env.equals(0), env.equals(0, 1), env.equals(1)),
(env.set, env.set, env.empty),
(env.Type.Int, -1, 0, 1),
#(env.Type.Float, -2.5,-1.0, 0.0, 1.3, 2.0),
#(env.Type.Float, -2.0,-1.0, 0.0, 1.0, 2.0),
(env.Type.Float, -1.0, 0.0),
(env.Type.String, '', '1234%^', 'asdf*&('),
(LE.algebraic_class,AA.binary_operation),
(AA.binary_operation,
(int, '*')),
(~AA.binary_operation,
(env.equals(1), '+')),
(AA.commutative, (int, '*')),
(~AA.commutative, (int, '-')),
(AA.associative, (int, '*')),
(~AA.associative, (int, '-')),
(AA.distributive, ((int, '*'), (int, '-'))),
(AA.distributive_1, ((int, '*'), (int, '-'))),
(AA.distributive_2, ((int, '*'), (int, '-'))),
(~AA.distributive, ((int, '*'), (int, '|'))),
(~AA.distributive_1,((int, '*'), (int, '|'))),
(~AA.distributive_2,((int, '*'), (int, '|'))),
(AA.semigroup, (int, '*')),
(AA.semigroup, (str, '+')),
(~AA.semigroup, (int, '-')),
(AA.monoid, (int, '*', 1)),
(AA.monoid, (str, '+', '')),
(~AA.monoid, (int, '*', 0)),
(AA.group, (int, '+', '-', 0)),
(~AA.group, (int, '*', '-', 1)),
(AA.abelian_group, (int, '+', '-', 0)),
(AA.group & ~AA.abelian_group, (
env.equals(0,1,2,3,4,5),
lambda x,y : S3[x][y],
lambda x:[0,1,2,3,5,4][x],
0)),
(AA.ring, (int, '+', '*', '-', 0)),
(~AA.ring, (str, '+', '*', '-', 0),
(int, '*', '*', '-', 0),
(int, '+', '+', '-', 0),
(int, '+', '*', '~', 0),
(int, '+', '*', '-', 1)),
(AA.field, (float, '+', '*', '-', lambda x:1.0/x, 0.0, 1.0)),
(~AA.field, (float, '+', '*', '-', lambda x:2.0/x, 0.0, 1.0)),
(AA.boolean_algebra,(env.equals(False, True), 'or', 'and', 'not', False, True),
(int, '|', '&', '~', 0, ~0),
(env.set, '|', '&', '~', env.empty, ~env.empty)
),
(~AA.boolean_algebra,
# Mutate each argument..
(env.equals(True, True), 'or', 'or', 'not', False, True),
(env.equals(False, True), 'and', 'and', 'not', False, True),
(env.equals(False, True), 'or', 'or', 'not', False, True),
(env.equals(False, True), 'or', 'and', '~', False, True),
(env.equals(False, True), 'or', 'and', 'not', True, True),
(env.equals(False, True), 'or', 'and', 'not', False, False),
)
]
ex = []
for a in asexs:
name = a[0]
exs = list(a[1:])
if isinstance(name, str):
x = env
names = name.split('.')
for name in names:
x = getattr(x, name)
else:
x = name
ex.append((x, exs))
return ex
class LocalEnvExpr:
exec("""\
if 1:
binary_operation_name <is> equals(
'+', '-', '*', '/', '%', '|', '&', '**', '<<', '>>')
algebraic_class <is> (setof(Type.Tuple) &
attr('new', callable))
relation_class <is> (setof( setof(any*any) |
Type.Tuple))
relational_operator_name <is> equals(
'<', '<=', '>', '>=', '==', '!=', 'in', 'not in', 'is', 'is not')
""".replace('<is>', ' = lambda IS: '))
class GlueTypeExpr:
exec("""
if 1:
abelian_group <in> setof(AA.group)
associative <in> setof(AA.binary_operation)
binary_operation <in> doc('''
A \emp{binary operation} $*$ on a set $S$ is a function $*: S \cross S \mapsto S$.
The element in $S$ assigned to $(x, y)$ is denoted $x*y$.
\citemh(p.21)
''') & LE.algebraic_class
boolean_algebra <in> LE.algebraic_class
commutative <in> LE.algebraic_class
distributive <in> setof(cprod(AA.binary_operation, AA.binary_operation))
distributive_1 <in> setof(cprod(AA.binary_operation, AA.binary_operation))
distributive_2 <in> setof(cprod(AA.binary_operation, AA.binary_operation))
field <in> LE.algebraic_class
group <in> (LE.algebraic_class & doc('''
''' ))
monoid <in> LE.algebraic_class
ring <in> (LE.algebraic_class,
attr('new', argnames('S', 'add', 'mul', 'neg', 'zero')))
semigroup <in> LE.algebraic_class
""".replace('<in>', '= lambda IN:'))
# Relations and functions
def GetExamples(self, te, obj):
AA = obj
LE = AA.LE
e = te.mod
S = e.iso(0, 1, 2)
def subsetof(x, y):
# Subset relation treating ints as bitsets
return x & y == x
def D(S, op):
return (e.relation.defipair, (S, op))
def L(*args):
return (AA.latticeform.quadruple, args)
asexs = [
(e.PyObject, 0), # why not ()?
#(AA.relation, D(S, '==')),
(AA.reflexive, D(S, '==')),
#(AA.reflexive, AA.relation.new(S, '<=')),
(~AA.reflexive, D(S, '<')),
(AA.symmetric, D(S, '==')),
(~AA.symmetric, D(S, '<=')),
(AA.transitive, D(S, '<')),
(~AA.transitive, D(S, '!=')),
(AA.irreflexive, D(S, '<')),
(~AA.irreflexive, D(S, '<=')),
(AA.antisymmetric, D(S, '<=')),
(~AA.antisymmetric, D(S, '!=')),
(AA.total_relation, D(S, '<=')),
(~AA.total_relation, D(S, '!=')),
(AA.equivalence_relation, D(S, '==')),
(~AA.equivalence_relation, D(S, '<=')),
(AA.partial_order, D(S, subsetof)),
(~AA.partial_order, D(S, '<')),
(AA.total_order, D(S, '<=')),
(~AA.total_order, D(S, subsetof)),
(e.Type.Int, 0, 1, 2, 3),
(AA.lattice, L(int, '<=', min, max)),
(~AA.lattice, L(int, '<=', '&', max)),
(~AA.lattice, L(int, '<=', min, '|')),
(AA.lattice, L(int, lambda x, y: x & y == x, '&', '|')),
(~AA.lattice, L(int, lambda x, y: x & y == x, min, '|')),
(~AA.lattice, L(int, lambda x, y: x & y == x, '&', max)),
(AA.lattice.quadruple, (int, '<=', min, max)),
]
return asexs
class GlueTypeExpr:
exec("""\
if 1:
reflexive <in> doc('x R x for every x in A',
AA.LE.relation_class)
symmetric <in> doc('x R y implies y R x, for all x, y in A',
AA.LE.relation_class)
transitive <in> doc('x R y, y R z implies x R z, for all x, y, z in A',
AA.LE.relation_class)
irreflexive <in> doc('not (x R y), for all x in A',
AA.LE.relation_class)
antisymmetric <in> doc('x R y, y R x implies x == y, for all x, y in A',
AA.LE.relation_class)
total_relation <in> doc('x R y or y R x, for all x, y in A',
AA.LE.relation_class)
equivalence_relation<in> doc('Reflexive, symmetric and transitive',
AA.LE.relation_class)
partial_order <in> doc('Reflexive, antisymmetric and transitive',
AA.LE.relation_class)
total_order <in> doc('Partial order and x R y or y R x, for all x, y in A',
AA.LE.relation_class)
lattice <in> attr('quadruple', doc('''\
Tuples (S, R, V, A), where:
S: set or convertible to set, i.e. 'setcastable'
R: relation operator on S
V: binary operator on S
A: binary operator on S
R, V and A are either operator symbols or functions.
(S, R) forms a partial order such that
every pair x, y of elements in S have a greatest
lower bound GLB and a least upper bound LUB.
The GLB is given by V(x, y) or x V y depending on if V is
a function or operator symbol. Likewise, ULB is given
by A(x, y) or x A y.
For example, these are lattice specifications:
(int, '<=', min, max)
(int, lambda x, y: x & y == x, '&', '|')
''', setof(tupleform(
('S', 'R', 'V', 'A'),
attr('S', SPLE.setcastable) &
expset('''\
attr('R', AA.LE.relational_operator_name | boolean<<(S, S)) &
attr('V', AA.LE.binary_operation_name | setcast(S)<<(S, S)) &
attr('A', AA.LE.binary_operation_name | setcast(S)<<(S, S))
''', 'S')
))))
""".replace('<in>', '=lambda IN:'))
from guppy.heapy.test import support
import sys, unittest
class TestCase(support.TestCase):
pass
class FirstCase(TestCase):
def test_1(self):
Spec = self.heapy.Spec
TestEnv = Spec.mkTestEnv(_Specification_)
#print SpecSpec.getstr(1000)
TestEnv.test(self.guppy.heapy.AbstractAlgebra)
support.run_unittest(FirstCase, 1)
| apache-2.0 |
Clinical-Genomics/scout | scout/parse/clinvar.py | 1 | 7077 | from scout.constants import CASEDATA_HEADER, CLINVAR_HEADER
def set_submission_objects(form_fields):
"""Creates a list of submission objects (variant and case-data) from the clinvar submission form in blueprints/variants/clinvar.html.
Args:
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
Returns:
submission_objects(list): a list of variant and case-data submission objects, ready to be included in the clinvar database collection
"""
variant_ids = get_submission_variants(
form_fields
) # A list of variant IDs present in the submitted form
# Extract list of variant objects to be submitted
variant_objs = get_objects_from_form(variant_ids, form_fields, "variant")
# Extract list of casedata objects to be submitted
casedata_objs = get_objects_from_form(variant_ids, form_fields, "casedata")
return (variant_objs, casedata_objs)
def get_objects_from_form(variant_ids, form_fields, object_type):
"""Extract the objects to be saved in the clinvar database collection.
object_type param specifies if these objects are variant or casedata objects
Args:
variant_ids(list): list of database variant ids
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
object_type(str): either 'variant' or 'case_data'
Returns:
submission_objects(list): list of submission objects of either type 'variant' or 'casedata'
"""
submission_fields = []
if object_type == "variant":
submission_fields = CLINVAR_HEADER
else: # collect casedata objects
submission_fields = CASEDATA_HEADER
# A list of objects (variants of casedata info) to be saved into clinvar database collection
submission_objects = []
# Loop over the form fields and collect the data:
for variant_id in variant_ids: # loop over the variants
subm_obj = {} # A new submission object for each
# Don't included casedata for a variant unless specified by user
if object_type == "casedata" and "casedata_" + variant_id not in form_fields:
continue
subm_obj["csv_type"] = object_type
subm_obj["case_id"] = form_fields.get("case_id")
subm_obj["category"] = form_fields.get("category@" + variant_id)
for key, values in submission_fields.items(): # loop over the form info fields
field_value = form_fields.get(key + "@" + variant_id)
if field_value and not field_value == "-":
if key == "ref_seq": # split this field into
refseq_raw = field_value.split("|")
subm_obj["ref_seq"] = refseq_raw[0]
subm_obj["hgvs"] = refseq_raw[1]
else:
subm_obj[key] = field_value
# Create a unique ID for the database
# For casedata : = caseID_sampleID_variantID
# For variants : ID = caseID_variantID
if object_type == "casedata":
subm_obj["_id"] = (
str(subm_obj["case_id"]) + "_" + variant_id + "_" + str(subm_obj["individual_id"])
)
else:
subm_obj["_id"] = str(subm_obj["case_id"]) + "_" + variant_id
submission_objects.append(subm_obj)
return submission_objects
def get_submission_variants(form_fields):
"""Extracts a list of variant ids from the clinvar submission form in blueprints/variants/clinvar.html (creation of a new clinvar submission).
Args:
form_fields(dict): it's the submission form dictionary. Keys have the same names as CLINVAR_HEADER and CASEDATA_HEADER
Returns:
clinvars: A list of variant IDs
"""
clinvars = []
# if the html checkbox named 'all_vars' is checked in the html form, then all pinned variants from a case should be included in the clinvar submission file,
# otherwise just the selected one.
if "all_vars" in form_fields:
for field, value in form_fields.items():
if field.startswith("local_id"):
clinvars.append(form_fields[field].replace("local_id@", ""))
else:
clinvars = [form_fields["main_var"]] # also a list, but has one element
return clinvars
def clinvar_submission_header(submission_objs, csv_type):
"""Determine which fields to include in csv header by checking a list of submission objects
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
csv_type(str) : 'variant_data' or 'case_data'
Returns:
custom_header(dict): A dictionary with the fields required in the csv header. Keys and values are specified in CLINVAR_HEADER and CASEDATA_HEADER
"""
complete_header = {} # header containing all available fields
custom_header = {} # header reflecting the real data included in the submission objects
if csv_type == "variant_data":
complete_header = CLINVAR_HEADER
else:
complete_header = CASEDATA_HEADER
for (
header_key,
header_value,
) in complete_header.items(): # loop over the info fields provided in each submission object
for clinvar_obj in submission_objs: # loop over the submission objects
for (
key,
value,
) in clinvar_obj.items(): # loop over the keys and values of the clinvar objects
if (
not header_key in custom_header and header_key == key
): # add to custom header if missing and specified in submission object
custom_header[header_key] = header_value
return custom_header
def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for (
submission_obj
) in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for (
header_key,
header_value,
) in submission_header.items(): # header_keys are the same keys as in submission_objs
if (
header_key in submission_obj
): # The field is filled in for this variant/casedata object
csv_line.append('"' + submission_obj.get(header_key) + '"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(",".join(csv_line))
return submission_lines
| bsd-3-clause |
egroeper/exscript | tests/Exscript/util/ipv4Test.py | 6 | 5830 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
import Exscript.util.ipv4
class ipv4Test(unittest.TestCase):
CORRELATE = Exscript.util.ipv4
def testIsIp(self):
from Exscript.util.ipv4 import is_ip
self.assert_(is_ip('0.0.0.0'))
self.assert_(is_ip('255.255.255.255'))
self.assert_(is_ip('1.2.3.4'))
self.assert_(not is_ip(''))
self.assert_(not is_ip('1'))
self.assert_(not is_ip('1.2.3.'))
self.assert_(not is_ip('.1.2.3'))
self.assert_(not is_ip('1.23.4'))
self.assert_(not is_ip('1..3.4'))
def testNormalizeIp(self):
from Exscript.util.ipv4 import normalize_ip
self.assertEqual(normalize_ip('0.0.0.0'), '000.000.000.000')
self.assertEqual(normalize_ip('255.255.255.255'), '255.255.255.255')
self.assertEqual(normalize_ip('001.002.003.004'), '001.002.003.004')
self.assertEqual(normalize_ip('192.168.010.001'), '192.168.010.001')
self.assertEqual(normalize_ip('0.128.255.0'), '000.128.255.000')
def testCleanIp(self):
from Exscript.util.ipv4 import clean_ip
self.assertEqual(clean_ip('0.0.0.0'), '0.0.0.0')
self.assertEqual(clean_ip('255.255.255.255'), '255.255.255.255')
self.assertEqual(clean_ip('001.002.003.004'), '1.2.3.4')
self.assertEqual(clean_ip('192.168.010.001'), '192.168.10.1')
self.assertEqual(clean_ip('0.128.255.0'), '0.128.255.0')
def testIp2Int(self):
from Exscript.util.ipv4 import ip2int
self.assertEqual(ip2int('0.0.0.0'), 0x00000000l)
self.assertEqual(ip2int('255.255.255.255'), 0xFFFFFFFFl)
self.assertEqual(ip2int('255.255.255.0'), 0xFFFFFF00l)
self.assertEqual(ip2int('0.255.255.0'), 0x00FFFF00l)
self.assertEqual(ip2int('0.128.255.0'), 0x0080FF00l)
def testInt2Ip(self):
from Exscript.util.ipv4 import int2ip, ip2int
for ip in ('0.0.0.0',
'255.255.255.255',
'255.255.255.0',
'0.255.255.0',
'0.128.255.0'):
self.assertEqual(int2ip(ip2int(ip)), ip)
def testPfxlen2MaskInt(self):
from Exscript.util.ipv4 import pfxlen2mask_int, int2ip
self.assertEqual(int2ip(pfxlen2mask_int(32)), '255.255.255.255')
self.assertEqual(int2ip(pfxlen2mask_int(31)), '255.255.255.254')
self.assertEqual(int2ip(pfxlen2mask_int(30)), '255.255.255.252')
self.assertEqual(int2ip(pfxlen2mask_int(2)), '192.0.0.0')
self.assertEqual(int2ip(pfxlen2mask_int(1)), '128.0.0.0')
self.assertEqual(int2ip(pfxlen2mask_int(0)), '0.0.0.0')
def testPfxlen2Mask(self):
from Exscript.util.ipv4 import pfxlen2mask
self.assertEqual(pfxlen2mask(32), '255.255.255.255')
self.assertEqual(pfxlen2mask(31), '255.255.255.254')
self.assertEqual(pfxlen2mask(30), '255.255.255.252')
self.assertEqual(pfxlen2mask(2), '192.0.0.0')
self.assertEqual(pfxlen2mask(1), '128.0.0.0')
self.assertEqual(pfxlen2mask(0), '0.0.0.0')
def testMask2Pfxlen(self):
from Exscript.util.ipv4 import mask2pfxlen
self.assertEqual(32, mask2pfxlen('255.255.255.255'))
self.assertEqual(31, mask2pfxlen('255.255.255.254'))
self.assertEqual(30, mask2pfxlen('255.255.255.252'))
self.assertEqual(2, mask2pfxlen('192.0.0.0'))
self.assertEqual(1, mask2pfxlen('128.0.0.0'))
self.assertEqual(0, mask2pfxlen('0.0.0.0'))
def testParsePrefix(self):
from Exscript.util.ipv4 import parse_prefix
self.assertEqual(('1.2.3.4', 24), parse_prefix('1.2.3.4'))
self.assertEqual(('1.2.3.4', 32), parse_prefix('1.2.3.4', 32))
self.assertEqual(('1.2.3.4', 15), parse_prefix('1.2.3.4/15'))
self.assertEqual(('1.2.3.4', 15), parse_prefix('1.2.3.4/15', 32))
def testNetwork(self):
from Exscript.util.ipv4 import network
self.assertEqual(network('10.0.0.0/30'), '10.0.0.0')
self.assertEqual(network('10.0.0.1/30'), '10.0.0.0')
self.assertEqual(network('10.0.0.2/30'), '10.0.0.0')
self.assertEqual(network('10.0.0.3/30'), '10.0.0.0')
self.assertEqual(network('10.0.0.0/24'), '10.0.0.0')
self.assertEqual(network('10.0.0.255/24'), '10.0.0.0')
def testBroadcast(self):
from Exscript.util.ipv4 import broadcast
self.assertEqual(broadcast('10.0.0.0/30'), '10.0.0.3')
self.assertEqual(broadcast('10.0.0.1/30'), '10.0.0.3')
self.assertEqual(broadcast('10.0.0.2/30'), '10.0.0.3')
self.assertEqual(broadcast('10.0.0.3/30'), '10.0.0.3')
self.assertEqual(broadcast('10.0.0.0/24'), '10.0.0.255')
self.assertEqual(broadcast('10.0.0.255/24'), '10.0.0.255')
def testRemoteIp(self):
from Exscript.util.ipv4 import remote_ip
self.assertEqual(remote_ip('10.0.0.0'), '10.0.0.3')
self.assertEqual(remote_ip('10.0.0.1'), '10.0.0.2')
self.assertEqual(remote_ip('10.0.0.2'), '10.0.0.1')
self.assertEqual(remote_ip('10.0.0.3'), '10.0.0.0')
def testSort(self):
from Exscript.util.ipv4 import sort
import random
ip_list = ['0.0.0.0',
'0.0.0.255',
'1.2.3.4',
'255.255.0.255',
'255.255.255.255',
'255.255.255.255']
ip_list_copy = ip_list[:]
for i in range(50):
random.shuffle(ip_list_copy)
self.assertEqual(ip_list, sort(ip_list_copy))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ipv4Test)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 |
aviciimaxwell/odoo | openerp/addons/base/module/wizard/base_import_language.py | 337 | 2644 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context = dict(context, overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
j-carl/boto | tests/unit/provider/test_provider.py | 79 | 17271 | #!/usr/bin/env python
from datetime import datetime, timedelta
from tests.compat import mock, unittest
import os
from boto import provider
from boto.compat import expanduser
INSTANCE_CONFIG = {
'allowall': {
u'AccessKeyId': u'iam_access_key',
u'Code': u'Success',
u'Expiration': u'2012-09-01T03:57:34Z',
u'LastUpdated': u'2012-08-31T21:43:40Z',
u'SecretAccessKey': u'iam_secret_key',
u'Token': u'iam_token',
u'Type': u'AWS-HMAC'
}
}
class TestProvider(unittest.TestCase):
def setUp(self):
self.environ = {}
self.config = {}
self.shared_config = {}
self.metadata_patch = mock.patch('boto.utils.get_instance_metadata')
self.config_patch = mock.patch('boto.provider.config.get',
self.get_config)
self.has_config_patch = mock.patch('boto.provider.config.has_option',
self.has_config)
self.config_object_patch = mock.patch.object(
provider.Config, 'get', self.get_shared_config)
self.has_config_object_patch = mock.patch.object(
provider.Config, 'has_option', self.has_shared_config)
self.environ_patch = mock.patch('os.environ', self.environ)
self.get_instance_metadata = self.metadata_patch.start()
self.get_instance_metadata.return_value = None
self.config_patch.start()
self.has_config_patch.start()
self.config_object_patch.start()
self.has_config_object_patch.start()
self.environ_patch.start()
def tearDown(self):
self.metadata_patch.stop()
self.config_patch.stop()
self.has_config_patch.stop()
self.config_object_patch.stop()
self.has_config_object_patch.stop()
self.environ_patch.stop()
def has_config(self, section_name, key):
try:
self.config[section_name][key]
return True
except KeyError:
return False
def get_config(self, section_name, key):
try:
return self.config[section_name][key]
except KeyError:
return None
def has_shared_config(self, section_name, key):
try:
self.shared_config[section_name][key]
return True
except KeyError:
return False
def get_shared_config(self, section_name, key):
try:
return self.shared_config[section_name][key]
except KeyError:
return None
def test_passed_in_values_are_used(self):
p = provider.Provider('aws', 'access_key', 'secret_key', 'security_token')
self.assertEqual(p.access_key, 'access_key')
self.assertEqual(p.secret_key, 'secret_key')
self.assertEqual(p.security_token, 'security_token')
def test_environment_variables_are_used(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'env_access_key')
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertIsNone(p.security_token)
def test_environment_variable_aws_security_token(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'env_access_key')
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertEqual(p.security_token, 'env_security_token')
def test_no_credentials_provided(self):
p = provider.Provider(
'aws',
provider.NO_CREDENTIALS_PROVIDED,
provider.NO_CREDENTIALS_PROVIDED,
provider.NO_CREDENTIALS_PROVIDED
)
self.assertEqual(p.access_key, provider.NO_CREDENTIALS_PROVIDED)
self.assertEqual(p.secret_key, provider.NO_CREDENTIALS_PROVIDED)
self.assertEqual(p.security_token, provider.NO_CREDENTIALS_PROVIDED)
def test_config_profile_values_are_used(self):
self.config = {
'profile dev': {
'aws_access_key_id': 'dev_access_key',
'aws_secret_access_key': 'dev_secret_key',
}, 'profile prod': {
'aws_access_key_id': 'prod_access_key',
'aws_secret_access_key': 'prod_secret_key',
}, 'profile prod_withtoken': {
'aws_access_key_id': 'prod_access_key',
'aws_secret_access_key': 'prod_secret_key',
'aws_security_token': 'prod_token',
}, 'Credentials': {
'aws_access_key_id': 'default_access_key',
'aws_secret_access_key': 'default_secret_key'
}
}
p = provider.Provider('aws', profile_name='prod')
self.assertEqual(p.access_key, 'prod_access_key')
self.assertEqual(p.secret_key, 'prod_secret_key')
p = provider.Provider('aws', profile_name='prod_withtoken')
self.assertEqual(p.access_key, 'prod_access_key')
self.assertEqual(p.secret_key, 'prod_secret_key')
self.assertEqual(p.security_token, 'prod_token')
q = provider.Provider('aws', profile_name='dev')
self.assertEqual(q.access_key, 'dev_access_key')
self.assertEqual(q.secret_key, 'dev_secret_key')
def test_config_missing_profile(self):
# None of these default profiles should be loaded!
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
}
}
self.config = {
'Credentials': {
'aws_access_key_id': 'default_access_key',
'aws_secret_access_key': 'default_secret_key'
}
}
with self.assertRaises(provider.ProfileNotFoundError):
provider.Provider('aws', profile_name='doesntexist')
def test_config_values_are_used(self):
self.config = {
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'aws_secret_access_key': 'cfg_secret_key',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'cfg_access_key')
self.assertEqual(p.secret_key, 'cfg_secret_key')
self.assertIsNone(p.security_token)
def test_config_value_security_token_is_used(self):
self.config = {
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'aws_secret_access_key': 'cfg_secret_key',
'aws_security_token': 'cfg_security_token',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'cfg_access_key')
self.assertEqual(p.secret_key, 'cfg_secret_key')
self.assertEqual(p.security_token, 'cfg_security_token')
def test_keyring_is_used(self):
self.config = {
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'keyring': 'test',
}
}
import sys
try:
import keyring
imported = True
except ImportError:
sys.modules['keyring'] = keyring = type(mock)('keyring', '')
imported = False
try:
with mock.patch('keyring.get_password', create=True):
keyring.get_password.side_effect = (
lambda kr, login: kr+login+'pw')
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'cfg_access_key')
self.assertEqual(p.secret_key, 'testcfg_access_keypw')
self.assertIsNone(p.security_token)
finally:
if not imported:
del sys.modules['keyring']
def test_passed_in_values_beat_env_vars(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
p = provider.Provider('aws', 'access_key', 'secret_key')
self.assertEqual(p.access_key, 'access_key')
self.assertEqual(p.secret_key, 'secret_key')
self.assertEqual(p.security_token, None)
def test_env_vars_beat_shared_creds_values(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'env_access_key')
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertIsNone(p.security_token)
def test_shared_creds_beat_config_values(self):
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
}
}
self.config = {
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'aws_secret_access_key': 'cfg_secret_key',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'shared_access_key')
self.assertEqual(p.secret_key, 'shared_secret_key')
self.assertIsNone(p.security_token)
def test_shared_creds_profile_beats_defaults(self):
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
},
'foo': {
'aws_access_key_id': 'foo_access_key',
'aws_secret_access_key': 'foo_secret_key',
}
}
p = provider.Provider('aws', profile_name='foo')
self.assertEqual(p.access_key, 'foo_access_key')
self.assertEqual(p.secret_key, 'foo_secret_key')
self.assertIsNone(p.security_token)
def test_env_profile_loads_profile(self):
self.environ['AWS_PROFILE'] = 'foo'
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
},
'foo': {
'aws_access_key_id': 'shared_access_key_foo',
'aws_secret_access_key': 'shared_secret_key_foo',
}
}
self.config = {
'profile foo': {
'aws_access_key_id': 'cfg_access_key_foo',
'aws_secret_access_key': 'cfg_secret_key_foo',
},
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'aws_secret_access_key': 'cfg_secret_key',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'shared_access_key_foo')
self.assertEqual(p.secret_key, 'shared_secret_key_foo')
self.assertIsNone(p.security_token)
self.shared_config = {}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'cfg_access_key_foo')
self.assertEqual(p.secret_key, 'cfg_secret_key_foo')
self.assertIsNone(p.security_token)
def test_env_vars_security_token_beats_config_values(self):
self.environ['AWS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'env_secret_key'
self.environ['AWS_SECURITY_TOKEN'] = 'env_security_token'
self.shared_config = {
'default': {
'aws_access_key_id': 'shared_access_key',
'aws_secret_access_key': 'shared_secret_key',
'aws_security_token': 'shared_security_token',
}
}
self.config = {
'Credentials': {
'aws_access_key_id': 'cfg_access_key',
'aws_secret_access_key': 'cfg_secret_key',
'aws_security_token': 'cfg_security_token',
}
}
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'env_access_key')
self.assertEqual(p.secret_key, 'env_secret_key')
self.assertEqual(p.security_token, 'env_security_token')
self.environ.clear()
p = provider.Provider('aws')
self.assertEqual(p.security_token, 'shared_security_token')
self.shared_config.clear()
p = provider.Provider('aws')
self.assertEqual(p.security_token, 'cfg_security_token')
def test_metadata_server_credentials(self):
self.get_instance_metadata.return_value = INSTANCE_CONFIG
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'iam_access_key')
self.assertEqual(p.secret_key, 'iam_secret_key')
self.assertEqual(p.security_token, 'iam_token')
self.assertEqual(
self.get_instance_metadata.call_args[1]['data'],
'meta-data/iam/security-credentials/')
def test_refresh_credentials(self):
now = datetime.utcnow()
first_expiration = (now + timedelta(seconds=10)).strftime(
"%Y-%m-%dT%H:%M:%SZ")
credentials = {
u'AccessKeyId': u'first_access_key',
u'Code': u'Success',
u'Expiration': first_expiration,
u'LastUpdated': u'2012-08-31T21:43:40Z',
u'SecretAccessKey': u'first_secret_key',
u'Token': u'first_token',
u'Type': u'AWS-HMAC'
}
instance_config = {'allowall': credentials}
self.get_instance_metadata.return_value = instance_config
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'first_access_key')
self.assertEqual(p.secret_key, 'first_secret_key')
self.assertEqual(p.security_token, 'first_token')
self.assertIsNotNone(p._credential_expiry_time)
# Now set the expiration to something in the past.
expired = now - timedelta(seconds=20)
p._credential_expiry_time = expired
credentials['AccessKeyId'] = 'second_access_key'
credentials['SecretAccessKey'] = 'second_secret_key'
credentials['Token'] = 'second_token'
self.get_instance_metadata.return_value = instance_config
# Now upon attribute access, the credentials should be updated.
self.assertEqual(p.access_key, 'second_access_key')
self.assertEqual(p.secret_key, 'second_secret_key')
self.assertEqual(p.security_token, 'second_token')
@mock.patch('boto.provider.config.getint')
@mock.patch('boto.provider.config.getfloat')
def test_metadata_config_params(self, config_float, config_int):
config_int.return_value = 10
config_float.return_value = 4.0
self.get_instance_metadata.return_value = INSTANCE_CONFIG
p = provider.Provider('aws')
self.assertEqual(p.access_key, 'iam_access_key')
self.assertEqual(p.secret_key, 'iam_secret_key')
self.assertEqual(p.security_token, 'iam_token')
self.get_instance_metadata.assert_called_with(
timeout=4.0, num_retries=10,
data='meta-data/iam/security-credentials/')
def test_provider_google(self):
self.environ['GS_ACCESS_KEY_ID'] = 'env_access_key'
self.environ['GS_SECRET_ACCESS_KEY'] = 'env_secret_key'
self.shared_config = {
'default': {
'gs_access_key_id': 'shared_access_key',
'gs_secret_access_key': 'shared_secret_key',
}
}
self.config = {
'Credentials': {
'gs_access_key_id': 'cfg_access_key',
'gs_secret_access_key': 'cfg_secret_key',
}
}
p = provider.Provider('google')
self.assertEqual(p.access_key, 'env_access_key')
self.assertEqual(p.secret_key, 'env_secret_key')
self.environ.clear()
p = provider.Provider('google')
self.assertEqual(p.access_key, 'shared_access_key')
self.assertEqual(p.secret_key, 'shared_secret_key')
self.shared_config.clear()
p = provider.Provider('google')
self.assertEqual(p.access_key, 'cfg_access_key')
self.assertEqual(p.secret_key, 'cfg_secret_key')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch.object(provider.Config, 'load_from_path')
def test_shared_config_loading(self, load_from_path, exists):
provider.Provider('aws')
path = os.path.join(expanduser('~'), '.aws', 'credentials')
exists.assert_called_once_with(path)
load_from_path.assert_called_once_with(path)
exists.reset_mock()
load_from_path.reset_mock()
provider.Provider('google')
path = os.path.join(expanduser('~'), '.google', 'credentials')
exists.assert_called_once_with(path)
load_from_path.assert_called_once_with(path)
if __name__ == '__main__':
unittest.main()
| mit |
nikolas/edx-platform | common/djangoapps/student/tests/test_password_policy.py | 113 | 12723 | # -*- coding: utf-8 -*-
"""
This test file will verify proper password policy enforcement, which is an option feature
"""
import json
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.utils.importlib import import_module
from django.test.utils import override_settings
from django.conf import settings
from mock import patch
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from student.views import create_account
@patch.dict("django.conf.settings.FEATURES", {'ENFORCE_PASSWORD_POLICY': True})
class TestPasswordPolicy(TestCase):
"""
Go through some password policy tests to make sure things are properly working
"""
def setUp(self):
super(TestPasswordPolicy, self).setUp()
self.url = reverse('create_account')
self.request_factory = RequestFactory()
self.url_params = {
'username': 'username',
'email': 'foo_bar@bar.com',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_too_short(self):
self.url_params['password'] = 'aaa'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 6 characters or more)",
)
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_long_enough(self):
self.url_params['password'] = 'ThisIsALongerPassword'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MAX_LENGTH=12)
def test_password_length_too_long(self):
self.url_params['password'] = 'ThisPasswordIsWayTooLong'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 12 characters or less)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_not_enough_uppercase(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more uppercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_enough_uppercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_not_enough_lowercase(self):
self.url_params['password'] = 'THISSHOULDFAIL'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more lowercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_enough_lowercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_not_enough_digits(self):
self.url_params['password'] = 'thishasnodigits'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more digits)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_enough_digits(self):
self.url_params['password'] = 'Th1sSh0uldPa88'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_not_enough_punctuations(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more punctuation characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_enough_punctuations(self):
self.url_params['password'] = 'Th!sSh.uldPa$*'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_not_enough_words(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more unique words)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_enough_wordss(self):
self.url_params['password'] = u'this should pass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_fail(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
errstring = (
"Password: Must be more complex ("
"must contain 3 or more uppercase characters, "
"must contain 3 or more digits, "
"must contain 3 or more punctuation characters, "
"must contain 3 or more unique words"
")"
)
self.assertEqual(obj['value'], errstring)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_pass(self):
self.url_params['password'] = u'tH1s Sh0u!d P3#$'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail1(self):
self.url_params['password'] = 'foo'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail2(self):
self.url_params['password'] = 'bar'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail3(self):
self.url_params['password'] = 'fo0'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_pass(self):
self.url_params['password'] = 'this_is_ok'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
def test_with_unicode(self):
self.url_params['password'] = u'四節比分和七年前'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MIN_LENGTH=6, SESSION_ENGINE='django.contrib.sessions.backends.cache')
def test_ext_auth_password_length_too_short(self):
"""
Tests that even if password policy is enforced, ext_auth registrations aren't subject to it
"""
self.url_params['password'] = 'aaa' # shouldn't pass validation
request = self.request_factory.post(self.url, self.url_params)
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
internal_password=self.url_params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
mako_middleware_process_request(request)
response = create_account(request)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
class TestUsernamePasswordNonmatch(TestCase):
"""
Test that registration username and password fields differ
"""
def setUp(self):
super(TestUsernamePasswordNonmatch, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': 'foo_bar@bar.com',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
def test_with_username_password_match(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "foobar"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Username and password fields cannot match",
)
def test_with_username_password_nonmatch(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "nonmatch"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
| agpl-3.0 |
jkleckner/ansible | lib/ansible/utils/__init__.py | 2 | 33093 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import shlex
import yaml
import copy
import optparse
import operator
from ansible import errors
from ansible import __version__
from ansible.utils.plugins import *
from ansible.utils import template
from ansible.callbacks import display
import ansible.constants as C
import time
import StringIO
import stat
import termios
import tty
import pipes
import random
import difflib
import warnings
import traceback
import getpass
import sys
import textwrap
VERBOSITY=0
# list of all deprecation messages to prevent duplicate display
deprecations = {}
warns = {}
MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
try:
import json
except ImportError:
import simplejson as json
try:
from hashlib import md5 as _md5
except ImportError:
from md5 import md5 as _md5
PASSLIB_AVAILABLE = False
try:
import passlib.hash
PASSLIB_AVAILABLE = True
except:
pass
KEYCZAR_AVAILABLE=False
try:
import keyczar.errors as key_errors
from keyczar.keys import AesKey
KEYCZAR_AVAILABLE=True
except ImportError:
pass
###############################################################
# Abstractions around keyczar
###############################################################
def key_for_hostname(hostname):
# fireball mode is an implementation of ansible firing up zeromq via SSH
# to use no persistent daemons or key management
if not KEYCZAR_AVAILABLE:
raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
key_path = os.path.expanduser("~/.fireball.keys")
if not os.path.exists(key_path):
os.makedirs(key_path)
key_path = os.path.expanduser("~/.fireball.keys/%s" % hostname)
# use new AES keys every 2 hours, which means fireball must not allow running for longer either
if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
key = AesKey.Generate()
fh = open(key_path, "w")
fh.write(str(key))
fh.close()
return key
else:
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
return key
def encrypt(key, msg):
return key.Encrypt(msg)
def decrypt(key, msg):
try:
return key.Decrypt(msg)
except key_errors.InvalidSignatureError:
raise errors.AnsibleError("decryption failed")
###############################################################
# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
###############################################################
def err(msg):
''' print an error message to stderr '''
print >> sys.stderr, msg
def exit(msg, rc=1):
''' quit with an error to stdout and a failure code '''
err(msg)
sys.exit(rc)
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
if format:
return json.dumps(result2, sort_keys=True, indent=4)
else:
return json.dumps(result2, sort_keys=True)
def write_tree_file(tree, hostname, buf):
''' write something into treedir/hostname '''
# TODO: might be nice to append playbook runs per host in a similar way
# in which case, we'd want append mode.
path = os.path.join(tree, hostname)
fd = open(path, "w+")
fd.write(buf)
fd.close()
def is_failed(result):
''' is a given JSON result a failed result? '''
return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
def is_changed(result):
''' is a given JSON result a changed result? '''
return (result.get('changed', False) in [ True, 'True', 'true'])
def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
if conditional is None or conditional == '':
return True
if isinstance(conditional, list):
for x in conditional:
if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
return False
return True
if not isinstance(conditional, basestring):
return conditional
conditional = conditional.replace("jinja2_compare ","")
# allow variable names
if conditional in inject and str(inject[conditional]).find('-') == -1:
conditional = inject[conditional]
conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
original = str(conditional).replace("jinja2_compare ","")
# a Jinja2 evaluation that results in something Python can eval!
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
conditional = template.template(basedir, presented, inject)
val = conditional.strip()
if val == presented:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
# otherwise fail
if conditional.find("is undefined") != -1:
return True
elif conditional.find("is defined") != -1:
return False
else:
raise errors.AnsibleError("error while evaluating conditional: %s" % original)
elif val == "True":
return True
elif val == "False":
return False
else:
raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
def is_executable(path):
'''is the given path executable?'''
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def unfrackpath(path):
'''
returns a path that is free of symlinks, environment
variables, relative path traversals and symbols (~)
example:
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
def prepare_writeable_dir(tree,mode=0777):
''' make sure a directory exists and is writeable '''
# modify the mode to ensure the owner at least
# has read/write access to this directory
mode |= 0700
# make sure the tree path is always expanded
# and normalized and free of symlinks
tree = unfrackpath(tree)
if not os.path.exists(tree):
try:
os.makedirs(tree, mode)
except (IOError, OSError), e:
raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
if not os.access(tree, os.W_OK):
raise errors.AnsibleError("Cannot write to path %s" % tree)
return tree
def path_dwim(basedir, given):
'''
make relative paths work like folks expect.
'''
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(basedir, given))
def path_dwim_relative(original, dirname, source, playbook_base, check=True):
''' find one file in a directory one level up in a dir named dirname relative to current '''
# (used by roles code)
basedir = os.path.dirname(original)
if os.path.islink(basedir):
basedir = unfrackpath(basedir)
template2 = os.path.join(basedir, dirname, source)
else:
template2 = os.path.join(basedir, '..', dirname, source)
source2 = path_dwim(basedir, template2)
if os.path.exists(source2):
return source2
obvious_local_path = path_dwim(playbook_base, source)
if os.path.exists(obvious_local_path):
return obvious_local_path
if check:
raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
return source2 # which does not exist
def json_loads(data):
''' parse a JSON string and return a data structure '''
return json.loads(data)
def parse_json(raw_data):
''' this version for module return data only '''
orig_data = raw_data
# ignore stuff like tcgetattr spewage or other warnings
data = filter_leading_non_json_lines(raw_data)
try:
return json.loads(data)
except:
# not JSON, but try "Baby JSON" which allows many of our modules to not
# require JSON and makes writing modules in bash much simpler
results = {}
try:
tokens = shlex.split(data)
except:
print "failed to parse json: "+ data
raise
for t in tokens:
if t.find("=") == -1:
raise errors.AnsibleError("failed to parse: %s" % orig_data)
(key,value) = t.split("=", 1)
if key == 'changed' or 'failed':
if value.lower() in [ 'true', '1' ]:
value = True
elif value.lower() in [ 'false', '0' ]:
value = False
if key == 'rc':
value = int(value)
results[key] = value
if len(results.keys()) == 0:
return { "failed" : True, "parsed" : False, "msg" : orig_data }
return results
def smush_braces(data):
''' smush Jinaj2 braces so unresolved templates like {{ foo }} don't get parsed weird by key=value code '''
while data.find('{{ ') != -1:
data = data.replace('{{ ', '{{')
while data.find(' }}') != -1:
data = data.replace(' }}', '}}')
return data
def smush_ds(data):
# things like key={{ foo }} are not handled by shlex.split well, so preprocess any YAML we load
# so we do not have to call smush elsewhere
if type(data) == list:
return [ smush_ds(x) for x in data ]
elif type(data) == dict:
for (k,v) in data.items():
data[k] = smush_ds(v)
return data
elif isinstance(data, basestring):
return smush_braces(data)
else:
return data
def parse_yaml(data):
''' convert a yaml string to a data structure '''
return smush_ds(yaml.safe_load(data))
def process_common_errors(msg, probline, column):
replaced = probline.replace(" ","")
if replaced.find(":{{") != -1 and replaced.find("}}") != -1:
msg = msg + """
This one looks easy to fix. YAML thought it was looking for the start of a
hash/dictionary and was confused to see a second "{". Most likely this was
meant to be an ansible template evaluation instead, so we have to give the
parser a small hint that we wanted a string instead. The solution here is to
just quote the entire value.
For instance, if the original line was:
app_path: {{ base_path }}/foo
It should be written as:
app_path: "{{ base_path }}/foo"
"""
return msg
elif len(probline) and len(probline) >= column and probline[column] == ":" and probline.count(':') > 1:
msg = msg + """
This one looks easy to fix. There seems to be an extra unquoted colon in the line
and this is confusing the parser. It was only expecting to find one free
colon. The solution is just add some quotes around the colon, or quote the
entire line after the first colon.
For instance, if the original line was:
copy: src=file.txt dest=/path/filename:with_colon.txt
It can be written as:
copy: src=file.txt dest='/path/filename:with_colon.txt'
Or:
copy: 'src=file.txt dest=/path/filename:with_colon.txt'
"""
return msg
else:
parts = probline.split(":")
if len(parts) > 1:
middle = parts[1].strip()
match = False
unbalanced = False
if middle.startswith("'") and not middle.endswith("'"):
match = True
elif middle.startswith('"') and not middle.endswith('"'):
match = True
if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count("'") > 2:
unbalanced = True
if match:
msg = msg + """
This one looks easy to fix. It seems that there is a value started
with a quote, and the YAML parser is expecting to see the line ended
with the same kind of quote. For instance:
when: "ok" in result.stdout
Could be written as:
when: '"ok" in result.stdout'
or equivalently:
when: "'ok' in result.stdout"
"""
return msg
if unbalanced:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
unbalanced quotes. If starting a value with a quote, make sure the
line ends with the same set of quotes. For instance this arbitrary
example:
foo: "bad" "wolf"
Could be written as:
foo: '"bad" "wolf"'
"""
return msg
return msg
def process_yaml_error(exc, data, path=None):
if hasattr(exc, 'problem_mark'):
mark = exc.problem_mark
if mark.line -1 >= 0:
before_probline = data.split("\n")[mark.line-1]
else:
before_probline = ''
probline = data.split("\n")[mark.line]
arrow = " " * mark.column + "^"
msg = """Syntax Error while loading YAML script, %s
Note: The error may actually appear before this position: line %s, column %s
%s
%s
%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
unquoted_var = None
if '{{' in probline and '}}' in probline:
if '"{{' not in probline or "'{{" not in probline:
unquoted_var = True
msg = process_common_errors(msg, probline, mark.column)
if not unquoted_var:
msg = process_common_errors(msg, probline, mark.column)
else:
msg = msg + """
We could be wrong, but this one looks like it might be an issue with
missing quotes. Always quote template expression brackets when they
start a value. For instance:
with_items:
- {{ foo }}
Should be written as:
with_items:
- "{{ foo }}"
"""
msg = process_common_errors(msg, probline, mark.column)
else:
# No problem markers means we have to throw a generic
# "stuff messed up" type message. Sry bud.
if path:
msg = "Could not parse YAML. Check over %s again." % path
else:
msg = "Could not parse YAML."
raise errors.AnsibleYAMLValidationFailed(msg)
def parse_yaml_from_file(path):
''' convert a yaml file to a data structure '''
try:
data = file(path).read()
return parse_yaml(data)
except IOError:
raise errors.AnsibleError("file not found: %s" % path)
except yaml.YAMLError, exc:
process_yaml_error(exc, data, path)
def parse_kv(args):
''' convert a string of key/value items to a dict '''
options = {}
if args is not None:
# attempting to split a unicode here does bad things
args = args.encode('utf-8')
vargs = [x.decode('utf-8') for x in shlex.split(args, posix=True)]
#vargs = shlex.split(str(args), posix=True)
for x in vargs:
if x.find("=") != -1:
k, v = x.split("=",1)
options[k]=v
return options
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = copy.deepcopy(a)
# next, iterate over b keys and values
for k, v in b.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def md5s(data):
''' Return MD5 hex digest of data. '''
digest = _md5()
try:
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
return digest.hexdigest()
def md5(filename):
''' Return MD5 hex digest of local file, or None if file is not present. '''
if not os.path.exists(filename):
return None
digest = _md5()
blocksize = 64 * 1024
infile = open(filename, 'rb')
block = infile.read(blocksize)
while block:
digest.update(block)
block = infile.read(blocksize)
infile.close()
return digest.hexdigest()
def default(value, function):
''' syntactic sugar around lazy evaluation of defaults '''
if value is None:
return function()
return value
def _gitinfo():
''' returns a string containing git branch, commit id and commit date '''
result = None
repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git')
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
branch_path = os.path.join(repo_path, "refs", "heads", branch)
if os.path.exists(branch_path):
f = open(branch_path)
commit = f.readline()[:10]
f.close()
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
else:
result = ''
return result
def version(prog):
result = "{0} {1}".format(prog, __version__)
gitinfo = _gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
return result
def getch():
''' read in a single character '''
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
####################################################################
# option handling code for /usr/bin/ansible and ansible-playbook
# below this line
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
def increment_debug(option, opt, value, parser):
global VERBOSITY
VERBOSITY += 1
def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
''' create an options parser for any ansible script '''
parser = SortedOptParser(usage, version=version("%prog"))
parser.add_option('-v','--verbose', default=False, action="callback",
callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
parser.add_option('-i', '--inventory-file', dest='inventory',
help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
default=constants.DEFAULT_HOST_LIST)
parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
help='ask for SSH password')
parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection')
parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password')
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-M', '--module-path', dest='module_path',
help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
default=None)
if subset_opts:
parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
dest='timeout',
help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if runas_opts:
parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true",
dest='sudo', help="run operations with sudo (nopasswd)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', help='desired sudo user (default=root)',
default=None) # Can't default to root because we need to detect when this option was given
parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER,
dest='remote_user',
help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
if connect_opts:
parser.add_option('-c', '--connection', dest='connection',
default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
if async_opts:
parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur"
)
if diff_opts:
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check"
)
return parser
def ask_passwords(ask_pass=False, ask_sudo_pass=False):
sshpass = None
sudopass = None
sudo_prompt = "sudo password: "
if ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
sudo_prompt = "sudo password [defaults to SSH password]: "
if ask_sudo_pass:
sudopass = getpass.getpass(prompt=sudo_prompt)
if ask_pass and sudopass == '':
sudopass = sshpass
return (sshpass, sudopass)
def do_encrypt(result, encrypt, salt_size=None, salt=None):
if PASSLIB_AVAILABLE:
try:
crypt = getattr(passlib.hash, encrypt)
except:
raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
if salt_size:
result = crypt.encrypt(result, salt_size=salt_size)
elif salt:
result = crypt.encrypt(result, salt=salt)
else:
result = crypt.encrypt(result)
else:
raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
return result
def last_non_blank_line(buf):
all_lines = buf.splitlines()
all_lines.reverse()
for line in all_lines:
if (len(line) > 0):
return line
# shouldn't occur unless there's no output
return ""
def filter_leading_non_json_lines(buf):
'''
used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO.StringIO()
stop_filtering = False
for line in buf.splitlines():
if stop_filtering or "=" in line or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def boolean(value):
val = str(value)
if val.lower() in [ "true", "t", "y", "1", "yes" ]:
return True
else:
return False
def make_sudo_cmd(sudo_user, executable, cmd):
"""
helper function for connection plugins to create sudo commands
"""
# Rather than detect if sudo wants a password this time, -k makes
# sudo always ask for a password if one is required.
# Passing a quoted compound command to sudo (or sudo -s)
# directly doesn't work, so we shellquote it with pipes.quote()
# and pass the quoted string to the user's shell. We loop reading
# output until we see the randomly-generated sudo prompt set with
# the -p option.
randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
prompt = '[sudo via ansible, key=%s] password: ' % randbits
success_key = 'SUDO-SUCCESS-%s' % randbits
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % (
C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS,
prompt, sudo_user, executable or '$SHELL', pipes.quote('echo %s; %s' % (success_key, cmd)))
return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt, success_key)
_TO_UNICODE_TYPES = (unicode, type(None))
def to_unicode(value):
if isinstance(value, _TO_UNICODE_TYPES):
return value
return value.decode("utf-8")
def get_diff(diff):
# called by --diff usage in playbook and runner via callbacks
# include names in diffs 'before' and 'after' and do diff -U 10
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
for line in list(differ):
ret.append(line)
return u"".join(ret)
except UnicodeDecodeError:
return ">> the files are different, but the diff library cannot compare unicode strings"
def is_list_of_strings(items):
for x in items:
if not isinstance(x, basestring):
return False
return True
def safe_eval(str, locals=None, include_exceptions=False):
'''
this is intended for allowing things like:
with_items: a_list_variable
where Jinja2 would return a string
but we do not want to allow it to call functions (outside of Jinja2, where
the env is constrained)
'''
# FIXME: is there a more native way to do this?
def is_set(var):
return not var.startswith("$") and not '{{' in var
def is_unset(var):
return var.startswith("$") or '{{' in var
# do not allow method calls to modules
if not isinstance(str, basestring):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (str, None)
return str
if re.search(r'\w\.\w+\(', str):
if include_exceptions:
return (str, None)
return str
# do not allow imports
if re.search(r'import \w+', str):
if include_exceptions:
return (str, None)
return str
try:
result = None
if not locals:
result = eval(str)
else:
result = eval(str, None, locals)
if include_exceptions:
return (result, None)
else:
return result
except Exception, e:
if include_exceptions:
return (str, e)
return str
def listify_lookup_plugin_terms(terms, basedir, inject):
if isinstance(terms, basestring):
# someone did:
# with_items: alist
# OR
# with_items: {{ alist }}
stripped = terms.strip()
if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/"):
# if not already a list, get ready to evaluate with Jinja2
# not sure why the "/" is in above code :)
try:
new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
if isinstance(new_terms, basestring) and new_terms.find("{{") != -1:
pass
else:
terms = new_terms
except:
pass
if '{' in terms or '[' in terms:
# Jinja2 already evaluated a variable to a list.
# Jinja2-ified list needs to be converted back to a real type
# TODO: something a bit less heavy than eval
return safe_eval(terms)
if isinstance(terms, basestring):
terms = [ terms ]
return terms
def deprecated(msg, version, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
else:
new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in deprecations:
display(new_msg, color='purple', stderr=True)
deprecations[new_msg] = 1
def warning(msg):
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, 79)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in warns:
display(new_msg, color='bright purple', stderr=True)
warns[new_msg] = 1
def combine_vars(a, b):
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
'''Return a random password string of length containing only chars.'''
password = []
while len(password) < length:
new_char = os.urandom(1)
if new_char in chars:
password.append(new_char)
return ''.join(password)
| gpl-3.0 |
BT-rmartin/odoo | addons/mrp_repair/__init__.py | 380 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_repair
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
plus1s/shadowsocks-py-mu | shadowsocks/common.py | 1 | 9463 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import hashlib
import hmac
ONETIMEAUTH_BYTES = 10
ONETIMEAUTH_CHUNK_BYTES = 12
ONETIMEAUTH_CHUNK_DATA_LEN = 2
# Index consts for transfer statistics
U = 0
D = 1
def sha1_hmac(secret, data):
return hmac.new(secret, data, hashlib.sha1).digest()
def onetimeauth_verify(_hash, data, key):
return _hash == sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def onetimeauth_gen(data, key):
return sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 0x01
ADDRTYPE_IPV6 = 0x04
ADDRTYPE_HOST = 0x03
ADDRTYPE_AUTH = 0x10
ADDRTYPE_MASK = 0xF
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
acdha/django-inlines | tests/test_cases/test_forms.py | 1 | 2649 | from django import forms
from django_inlines import registry
from django_inlines.forms import InlineField
from test_app.inlines import BasicInline
from .test_common import InlinesTestCase
__all__ = ('InlineFormTestCase',)
class InlineForm(forms.Form):
content = InlineField()
class InlineFormTestCase(InlinesTestCase):
def setUp(self):
registry.register('echo', BasicInline)
def test_valid_form(self):
form = InlineForm({
'content':
u'{{ echo arg1 arg2 kwarg1=kwarg1 kwarg2=kwarg2 }}'})
self.assertTrue(form.is_valid())
def test_syntax_errors(self):
content = u'''
{{ }}
Text Token
{{ echo2 }}
{{ echo arg1=arg1 arg2 kwarg1=kwarg1 kwarg2=kwarg2 }}
'''
form = InlineForm({'content': content})
self.assertFalse(form.is_valid())
self.assertEqual([
u'Syntax error on line 2. Empty inline found.',
u'Syntax error on line 6. Inline `echo2` is not registered.',
u'Syntax error on line 8. Inline `echo arg1=arg1 arg2 '
u'kwarg1=kwarg1 kwarg2=kwarg2`, non-keyword argument found '
u'after keyword argument.'],
form.errors['content'])
def test_inline_errors(self):
content = u'''{{ echo arg1 arg3 kwarg1=kwarg1 kwarg2=kwarg2 }}
'''
form = InlineForm({'content': content})
self.assertFalse(form.is_valid())
self.assertEqual([
u'Inline error on line 1. Inline `echo arg1 arg3 kwarg1=kwarg1 '
u'kwarg2=kwarg2`, argument `arg2` (pos 2): `arg3` is not a valid '
u'choice.'],
form.errors['content'])
def test_inline_and_syntax_errors(self):
content = u'''
{{ }}
Text Token
{{ echo2 }}
{{ echo arg1 arg3 kwarg1=kwarg1 kwarg2=kwarg2 }}
{{ echo arg1=arg1 arg2 kwarg1=kwarg1 kwarg2=kwarg2 }}
'''
form = InlineForm({'content': content})
self.assertFalse(form.is_valid())
self.assertEqual([
u'Syntax error on line 2. Empty inline found.',
u'Syntax error on line 6. Inline `echo2` is not registered.',
u'Inline error on line 7. Inline `echo arg1 arg3 kwarg1=kwarg1 '
u'kwarg2=kwarg2`, argument `arg2` (pos 2): `arg3` is not a valid '
u'choice.',
u'Syntax error on line 8. Inline `echo arg1=arg1 arg2 '
u'kwarg1=kwarg1 kwarg2=kwarg2`, non-keyword argument found '
u'after keyword argument.', ],
form.errors['content'])
| bsd-3-clause |
Pluto-tv/chromium-crosswalk | third_party/closure_linter/closure_linter/errorrules.py | 124 | 2276 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linter error rules class for Closure Linter."""
__author__ = 'robbyw@google.com (Robert Walker)'
import gflags as flags
from closure_linter import errors
FLAGS = flags.FLAGS
flags.DEFINE_boolean('jsdoc', True,
'Whether to report errors for missing JsDoc.')
flags.DEFINE_list('disable', None,
'Disable specific error. Usage Ex.: gjslint --disable 1,'
'0011 foo.js.')
flags.DEFINE_integer('max_line_length', 80, 'Maximum line length allowed '
'without warning.', lower_bound=1)
disabled_error_nums = None
def GetMaxLineLength():
"""Returns allowed maximum length of line.
Returns:
Length of line allowed without any warning.
"""
return FLAGS.max_line_length
def ShouldReportError(error):
"""Whether the given error should be reported.
Returns:
True for all errors except missing documentation errors and disabled
errors. For missing documentation, it returns the value of the
jsdoc flag.
"""
global disabled_error_nums
if disabled_error_nums is None:
disabled_error_nums = []
if FLAGS.disable:
for error_str in FLAGS.disable:
error_num = 0
try:
error_num = int(error_str)
except ValueError:
pass
disabled_error_nums.append(error_num)
return ((FLAGS.jsdoc or error not in (
errors.MISSING_PARAMETER_DOCUMENTATION,
errors.MISSING_RETURN_DOCUMENTATION,
errors.MISSING_MEMBER_DOCUMENTATION,
errors.MISSING_PRIVATE,
errors.MISSING_JSDOC_TAG_THIS)) and
(not FLAGS.disable or error not in disabled_error_nums))
| bsd-3-clause |
tangp3/gpdb | gpMgmt/bin/gppylib/test/behave/mgmt_utils/steps/logger.py | 19 | 4142 | import os
import glob
from gppylib.test.behave_utils.utils import execute_sql_singleton
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if master_data_dir is None:
raise Exception('Please set MASTER_DATA_DIRECTORY in environment')
def gp_fts_log_in_master_log_count(mdd):
return gp_in_master_log_count(mdd, 'FTS: probe result processing is complete')
def gp_in_master_log_count(mdd, pattern):
pg_log_glob = os.path.join(mdd, 'pg_log', 'gpdb-20??-??-*.csv')
files = glob.glob(pg_log_glob)
files.sort()
if not files:
raise Exception('pg_log not found with the following pattern on master: %s' % pg_log_glob)
fd = open(files[-1])
output = fd.read()
counter = 0
for line in output.splitlines():
if pattern in line:
counter = counter + 1
return counter
def gp_fts_log_in_segment_log_count():
COUNT_SQL = """
SELECT count(*) from gp_toolkit.__gp_log_segment_ext
WHERE logmessage like '%FTS: Probe Request%';
"""
result = execute_sql_singleton('template1', COUNT_SQL)
return result
QUERY_PLAN_SIZE_STRING = 'Query plan size to dispatch:'
@then(u'the count of query plan logs in pg_log is stored')
def impl(context):
context.master_plan_size_count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
@then(u'the count of query plan logs is not changed')
def impl(context):
count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
if count != context.master_plan_size_count:
raise Exception("'%s' is still being logged in pg_log when it should be off counts (%d, %d)" % (QUERY_PLAN_SIZE_STRING, count, context.master_plan_size_count))
print "IVAN query plan logs is unchanged: %d %d" % (count, context.master_plan_size_count)
@then(u'the count of query plan logs is increased')
def impl(context):
count = gp_in_master_log_count(master_data_dir, QUERY_PLAN_SIZE_STRING)
if count <= context.master_plan_size_count:
raise Exception("'%s' is not being logged in pg_log when it should be on counts (%d, %d)" % (QUERY_PLAN_SIZE_STRING, count, context.master_plan_size_count))
print "IVAN query plan logs is increased: %d %d" % (count, context.master_plan_size_count)
@then('the count of verbose logs in pg_log is stored')
def impl(context):
context.master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
context.segment_fts_log_count = gp_fts_log_in_segment_log_count()
@then('the count of verbose fts logs is not changed')
def impl(context):
master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
segment_fts_log_count = gp_fts_log_in_segment_log_count()
if master_fts_log_count != context.master_fts_log_count:
raise Exception("Number of FTS logs on master has changed when logging is turned off: orig count %d new count %d" % (context.master_fts_log_count, master_fts_log_count))
if segment_fts_log_count != context.segment_fts_log_count:
raise Exception("Number of FTS logs on segments has changed when logging is turned off: orig count %d new count %d" % (context.segment_fts_log_count, segment_fts_log_count))
context.master_fts_log_count = master_fts_log_count
context.segment_fts_log_count = segment_fts_log_count
@then('the count of verbose fts logs is increased on all segments')
def impl(context):
master_fts_log_count = gp_fts_log_in_master_log_count(master_data_dir)
segment_fts_log_count = gp_fts_log_in_segment_log_count()
if master_fts_log_count <= context.master_fts_log_count:
raise Exception("Number of FTS logs on master has not increased changed when logging is turned on: orig count %d new count %d" % (context.master_fts_log_count, master_fts_log_count))
if segment_fts_log_count <= context.segment_fts_log_count:
raise Exception("Number of FTS logs on segments has not increased when logging is turned on: orig count %d new count %d" % (context.segment_fts_log_count, segment_fts_log_count))
context.master_fts_log_count = master_fts_log_count
context.segment_fts_log_count = segment_fts_log_count
| apache-2.0 |
eljost/pysisyphus | tests/test_tsopt/test_augment_coordinates.py | 1 | 1797 | #!/usr/bin/env python3
import pytest
from pysisyphus.calculators import Gaussian16
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.helpers import geom_loader
from pysisyphus.intcoords.augment_bonds import augment_bonds
from pysisyphus.testing import using
from pysisyphus.tsoptimizers.RSIRFOptimizer import RSIRFOptimizer
@using("pyscf")
@pytest.mark.parametrize(
"augment, ref_cycle",
[
(True, 6),
# 57 cycles are needed when 45 < VALID_BEND_DEG < 170
# (False, 57),
(False, 6),
],
)
def test_augment_coordinates_silyl(augment, ref_cycle):
geom = geom_loader("lib:baker_ts/18_silyene_insertion.xyz", coord_type="redund")
opt_kwargs = {
"thresh": "baker",
"max_cycles": 100,
"dump": True,
"trust_radius": 0.3,
"trust_max": 0.3,
"augment_bonds": augment,
}
calc_kwargs = {
"charge": 0,
"mult": 1,
"pal": 4,
}
calc = PySCF(basis="321g", **calc_kwargs)
geom.set_calculator(calc)
opt = RSIRFOptimizer(geom, **opt_kwargs)
opt.run()
assert opt.is_converged
assert opt.cur_cycle == ref_cycle
ref_en = -367.20778
assert opt.geometry.energy == pytest.approx(ref_en)
@using("gaussian16")
@pytest.mark.parametrize(
"augment, ref_cycle",
[
(True, 28),
(False, 40),
],
)
def test_augment_biaryl_bare(augment, ref_cycle):
geom = geom_loader("lib:biaryl_bare_pm6_splined_hei.xyz", coord_type="redund")
calc = Gaussian16("PM6", pal=4)
geom.set_calculator(calc)
opt_kwargs = {
"thresh": "gau_tight",
"augment_bonds": augment,
}
opt = RSIRFOptimizer(geom, **opt_kwargs)
opt.run()
assert opt.is_converged
assert opt.cur_cycle == ref_cycle
| gpl-3.0 |
wolfchase/ardets | languages/ca.py | 72 | 23910 | # -*- coding: utf-8 -*-
{
'!langcode!': 'ca',
'!langname!': 'Català',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualizi" és una expressió opcional com "camp1=\'nou_valor\'". No es poden actualitzar o eliminar resultats de un JOIN',
'%(nrows)s records found': '%(nrows)s registres trobats',
'%s %%{position}': '%s %%{posició}',
'%s %%{row} deleted': '%s %%{fila} %%{eliminada}',
'%s %%{row} updated': '%s %%{fila} %%{actualitzada}',
'%s selected': '%s %%{seleccionat}',
'%Y-%m-%d': '%d/%m/%Y',
'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'(something like "it-it")': '(similar a "això-això")',
'@markmin\x01An error occured, please [[reload %s]] the page': 'Hi ha hagut un error, si us plau [[recarregui %s]] la pàgina',
'@markmin\x01Number of entries: **%s**': "Nombre d'entrades: **%s**",
'A new version of web2py is available': 'Hi ha una nova versió de wep2py disponible',
'A new version of web2py is available: %s': 'Hi ha una nova versió de wep2py disponible: %s',
'About': 'Sobre',
'about': 'sobre',
'About application': "Sobre l'aplicació",
'Access Control': "Control d'Accés",
'Add': 'Afegir',
'Add Record': 'Afegeix registre',
'additional code for your application': '`codi addicional per a la seva aplicació',
'admin disabled because no admin password': 'admin inhabilitat per falta de contrasenya',
'admin disabled because not supported on google app engine': 'admin inhabilitat, no és suportat en GAE',
'admin disabled because unable to access password file': 'admin inhabilitat, impossible accedir al fitxer con la contrasenya',
'Admin is disabled because insecure channel': 'Admin inhabilitat, el canal no és segur',
'Admin is disabled because unsecure channel': 'Admin inhabilitat, el canal no és segur',
'Administrative interface': 'Interfície administrativa',
'Administrative Interface': 'Interfície Administrativa',
'administrative interface': 'interfície administrativa',
'Administrator Password:': 'Contrasenya del Administrador:',
'Ajax Recipes': 'Receptes AJAX',
'An error occured, please %s the page': 'Hi ha hagut un error, per favor %s la pàgina',
'And': 'I',
'and rename it (required):': 'i renombri-la (requerit):',
'and rename it:': " i renombri'l:",
'appadmin': 'appadmin',
'appadmin is disabled because insecure channel': 'admin inhabilitat, el canal no és segur',
'application "%s" uninstalled': 'aplicació "%s" desinstal·lada',
'application compiled': 'aplicació compilada',
'application is compiled and cannot be designed': 'la aplicació està compilada i no pot ser modificada',
'Apply changes': 'Aplicar canvis',
'Appointment': 'Nomenament',
'Are you sure you want to delete file "%s"?': 'Està segur que vol eliminar el arxiu "%s"?',
'Are you sure you want to delete this object?': 'Està segur que vol esborrar aquest objecte?',
'Are you sure you want to uninstall application "%s"': '¿Està segur que vol desinstalar la aplicació "%s"',
'Are you sure you want to uninstall application "%s"?': '¿Està segur que vol desinstalar la aplicació "%s"?',
'at': 'a',
'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCIÓ: Inici de sessió requereix una connexió segura (HTTPS) o localhost.',
'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.',
'ATTENTION: you cannot edit the running application!': 'ATENCIO: no pot modificar la aplicació que està ejecutant-se!',
'Authentication': 'Autenticació',
'Authentication failed at client DB!': '¡La autenticació ha fallat en la BDD client!',
'Authentication failed at main DB!': '¡La autenticació ha fallat en la BDD principal!',
'Available Databases and Tables': 'Bases de dades i taules disponibles',
'Back': 'Endarrera',
'Buy this book': 'Compra aquest lllibre',
'Cache': 'Caché',
'cache': 'caché',
'Cache Cleared': 'Caché Netejada',
'Cache Keys': 'Claus de la Caché',
'cache, errors and sessions cleaned': 'caché, errors i sessions eliminats',
'Cannot be empty': 'No pot estar buit',
'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se pot compilar: hi ha errors en la seva aplicació. Depuri, corregeixi errors i torni a intentar-ho.',
'cannot upload file "%(filename)s"': 'no és possible pujar fitxer "%(filename)s"',
'Change Password': 'Canviï la Contrasenya',
'Change password': 'Canviï la contrasenya',
'change password': 'canviï la contrasenya',
'Changelog': 'Changelog',
'check all': 'marcar tots',
'Check to delete': 'Marqui per a eliminar',
'choose one': 'escolliu un',
'clean': 'neteja',
'Clear': 'Netejar',
'Clear CACHE?': 'Netejar Memòrica Cau?',
'Clear DISK': 'Netejar DISC',
'Clear RAM': 'Netejar RAM',
'Click on the link %(link)s to reset your password': "Cliqui en l'enllaç %(link)s per a reiniciar la seva contrasenya",
'click to check for upgrades': 'feu clic per buscar actualitzacions',
'client': 'cliente',
'Client IP': 'IP del Client',
'Close': 'Tancar',
'Comma-separated export including columns not shown; fields from other tables are exported as raw values for faster export': 'Comma-separated export including columns not shown; fields from other tables are exported as raw values for faster export',
'Comma-separated export of visible columns. Fields from other tables are exported as they appear on-screen but this may be slow for many rows': 'Comma-separated export of visible columns. Fields from other tables are exported as they appear on-screen but this may be slow for many rows',
'Community': 'Comunitat',
'compile': 'compilar',
'compiled application removed': 'aplicació compilada eliminada',
'Components and Plugins': 'Components i Plugins',
'contains': 'conté',
'Controller': 'Controlador',
'Controllers': 'Controladors',
'controllers': 'controladors',
'Copyright': 'Copyright',
'Correo electrónico invàlid': 'Correu electrònic invàlid',
'create file with filename:': 'crear el fitxer amb el nom:',
'Create new application': 'Crear una nova aplicació',
'create new application:': 'crear una nova aplicació:',
'Create New Page': 'Crear Pàgina Nova',
'Create Page from Slug': 'Create Page from Slug',
'Created By': 'Creat Per',
'Created On': 'Creat a',
'CSV': 'CSV',
'CSV (hidden cols)': 'CSV (columnas ocultes)',
'Current request': 'Sol·licitud en curs',
'Current response': 'Resposta en curs',
'Current session': 'Sessió en curs',
'currently saved or': 'actualment guardat o',
'customize me!': "¡Adapta'm!",
'data uploaded': 'dades pujades',
'Database': 'Base de dades',
'Database %s select': 'selecció a base de dades %s',
'database administration': 'administració de base de dades',
'Database Administration (appadmin)': 'Administració de Base de Dades (appadmin)',
'Date and Time': 'Data i Hora',
'DB': 'BDD',
'db': 'bdd',
'DB Model': 'Model BDD',
'defines tables': 'defineix taules',
'Delete': 'Eliminar',
'delete': 'eliminar',
'delete all checked': 'eliminar marcats',
'Delete:': 'Eliminar:',
'Demo': 'Demostració',
'Deploy on Google App Engine': 'Desplegament a Google App Engine',
'Deployment Recipes': 'Receptes de desplegament',
'Description': 'Descripció',
'design': 'diseny',
'DESIGN': 'DISENY',
'Design for': 'Diseny per a',
'detecting': 'detectant',
'DISK': 'DISC',
'Disk Cache Keys': 'Claus de Caché en Disc',
'Disk Cleared': 'Disc netejat',
'Documentation': 'Documentació',
"Don't know what to do?": 'No sap què fer?',
'done!': '¡fet!',
'Download': 'Descàrregues',
'E-mail': 'Correu electrònic',
'edit': 'editar',
'EDIT': 'EDITAR',
'Edit': 'Editar',
'Edit application': 'Editar aplicació',
'edit controller': 'editar controlador',
'Edit current record': 'Editar el registre actual',
'Edit Menu': 'Editar Menu',
'Edit Page': 'Editar Pàgina',
'Edit Page Media': 'Edit Page Media',
'Edit Profile': 'Editar Perfil',
'edit profile': 'editar perfil',
'Edit This App': 'Editi aquesta App',
'Editing file': 'Editant fitxer',
'Editing file "%s"': 'Editant fitxer "%s"',
'El fitxer ha de ser PDF': 'El fitxer ha de ser PDF',
'El fitxer ha de ser PDF o XML': 'El fitxer ha de ser PDF o XML',
'Email': 'Email',
'Email and SMS': 'Correu electrònic i SMS',
'Email sent': 'Correu electrònic enviat',
'End of impersonation': 'Fi de suplantació',
'enter a number between %(min)g and %(max)g': 'introdueixi un número entre %(min)g i %(max)g',
'Enter a valid email address': 'Entri una adreça email vàlida',
'enter a value': 'entri un valor',
'Enter a value': 'Entri un valor',
'Enter an integer between %(min)g and %(max)g': 'Entri un numero enter entre %(min)g i %(max)g',
'enter an integer between %(min)g and %(max)g': 'entri numero enter entre %(min)g i %(max)g',
'enter date and time as %(format)s': 'entri data i hora com %(format)s',
'Enter from %(min)g to %(max)g characters': 'Entri des de %(min)g a %(max)g caràcters',
'Enter valid filename': 'Entri nom de fitxer vàlid',
'Error logs for "%(app)s"': 'Bitàcora de errors a "%(app)s"',
'errors': 'errors',
'Errors': 'Errors',
'Errors in form, please check it out.': 'Hi ha errors en el formulari, per favor comprovi-ho.',
'export as csv file': 'exportar com fitxer CSV',
'Export:': 'Exportar:',
'exposes': 'exposa',
'extends': 'extén',
'failed to reload module': 'la recàrrega del mòdul ha fallat',
'FAQ': 'FAQ',
'file': 'fitxer',
'file "%(filename)s" created': 'fitxer "%(filename)s" creat',
'file "%(filename)s" deleted': 'fitxer "%(filename)s" eliminat',
'file "%(filename)s" uploaded': 'fitxer "%(filename)s" pujat',
'file "%(filename)s" was not deleted': 'fitxer "%(filename)s" no fou eliminat',
'file "%s" of %s restored': 'fitxer "%s" de %s restaurat',
'file ## download': 'file ',
'file changed on disk': 'fitxer modificat en el disco',
'file does not exist': 'fitxer no existeix',
'file saved on %(time)s': 'fitxer guardat a %(time)s',
'file saved on %s': 'fitxer guardat a %s',
'First name': 'Nom',
'Forgot username?': 'Ha oblidat el nom de usuari?',
'Forms and Validators': 'Formularis i validadors',
'Free Applications': 'Aplicacions Lliures',
'Functions with no doctests will result in [passed] tests.': 'Funcions sense doctests equivalen a pruebas [aceptades].',
'Group %(group_id)s created': 'Grupo %(group_id)s creat',
'Group ID': 'ID de Grup',
'Group uniquely assigned to user %(id)s': 'Grup assignat únicament al usuari %(id)s',
'Groups': 'Grups',
'Hello': 'Hola',
'Hello World': 'Hola Món',
'help': 'ajuda',
'Home': 'Inici',
'Hosted by': 'Hosted by',
'How did you get here?': 'Com has arribat aquí?',
'HTML': 'HTML',
'HTML export of visible columns': 'HTML export de columnes visibles',
'htmledit': 'htmledit',
'Impersonate': 'Suplantar',
'import': 'importar',
'Import/Export': 'Importar/Exportar',
'in': 'a',
'includes': 'inclou',
'Index': 'Índex',
'insert new': 'inserti nou',
'insert new %s': 'inserti nou %s',
'Installed applications': 'Aplicacions instalades',
'Insufficient privileges': 'Privilegis insuficients',
'internal error': 'error intern',
'Internal State': 'Estat Intern',
'Introduction': 'Introducció',
'Invalid action': 'Acció invàlida',
'Invalid email': 'Correo electrónico invàlid',
'invalid expression': 'expressió invàlida',
'Invalid login': 'Inici de sessió invàlida',
'invalid password': 'contrasenya invàlida',
'Invalid Query': 'Consulta invàlida',
'invalid request': 'sol·licitud invàlida',
'Invalid reset password': 'Reinici de contrasenya invàlid',
'invalid ticket': 'tiquet invàlid',
'Is Active': 'Està Actiu',
'Key': 'Clau',
'language file "%(filename)s" created/updated': 'fitxer de llenguatge "%(filename)s" creat/actualitzat',
'Language files (static strings) updated': 'Fitxers de llenguatge (cadenes estàtiques) actualitzats',
'languages': 'llenguatges',
'Languages': 'Llenguatges',
'languages updated': 'llenguatges actualitzats',
'Last name': 'Cognom',
'Last saved on:': 'Guardat a:',
'Layout': 'Diseny de pàgina',
'Layout Plugins': 'Plugins de disseny',
'Layouts': 'Dissenys de pàgines',
'License for': 'Llicència per a',
'Live Chat': 'Xat en viu',
'loading...': 'carregant...',
'Log In': 'Log In',
'Log Out': 'Log Out',
'Logged in': 'Sessió iniciada',
'Logged out': 'Sessió finalitzada',
'Login': 'Inici de sessió',
'login': 'inici de sessió',
'Login disabled by administrator': 'Inici de sessió inhabilitat pel administrador',
'Login to the Administrative Interface': 'Inici de sessió per a la Interfície Administrativa',
'logout': 'fi de sessió',
'Logout': 'Fi de sessió',
'Lost Password': 'Contrasenya perdida',
'Lost password?': 'Ha oblidat la contrasenya?',
'lost password?': '¿ha oblidat la contrasenya?',
'Main Menu': 'Menú principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gestionar la Caché',
'Menu Model': 'Model "menu"',
'merge': 'combinar',
'Models': 'Models',
'models': 'models',
'Modified By': 'Modificat Per',
'Modified On': 'Modificat A',
'Modules': 'Mòduls',
'modules': 'mòduls',
'must be YYYY-MM-DD HH:MM:SS!': '¡debe ser DD/MM/YYYY HH:MM:SS!',
'must be YYYY-MM-DD!': '¡debe ser DD/MM/YYYY!',
'My Sites': 'Els Meus Llocs',
'Name': 'Nombre',
'New': 'Nuevo',
'New %(entity)s': 'Nou %(entity)s',
'new application "%s" created': 'nova aplicació "%s" creada',
'New password': 'Contrasenya nova',
'New Record': 'Registre nou',
'new record inserted': 'nou registre insertat',
'New Search': 'Cerca nova',
'next %s rows': 'següents %s files',
'next 100 rows': '100 files següents',
'NO': 'NO',
'No databases in this application': 'No hi ha bases de dades en esta aplicació',
'No records found': "No s'han trobat registres",
'Not authorized': 'No autoritzat',
'not in': 'no a',
'Object or table name': 'Nom del objecte o taula',
'Old password': 'Contrasenya anterior',
'Online examples': 'Ejemples en línia',
'Or': 'O',
'or import from csv file': 'o importar desde fitxer CSV',
'or provide application url:': 'o proveeix URL de la aplicació:',
'Origin': 'Origen',
'Original/Translation': 'Original/Traducció',
'Other Plugins': 'Altres Plugins',
'Other Recipes': 'Altres Receptes',
'Overview': 'Resum',
'pack all': 'empaquetar tot',
'pack compiled': 'empaquetar compilats',
'Password': 'Contrasenya',
'Password changed': 'Contrasenya cambiada',
"Password fields don't match": 'Els camps de contrasenya no coincideixen',
'Password reset': 'Reinici de contrasenya',
'Peeking at file': 'Visualitzant fitxer',
'Permission': 'Permís',
'Permissions': 'Permisos',
'Phone': 'Telèfon',
'please input your password again': 'si us plau, entri un altre cop la seva contrasenya',
'Plugins': 'Plugins',
'Powered by': 'Aquest lloc utilitza',
'Preface': 'Prefaci',
'Presentar Factures': 'Presentar Factures',
'Presentar factures': 'Presentar factures',
'previous %s rows': '%s files prèvies',
'previous 100 rows': '100 files anteriors',
'Profile': 'Perfil',
'Profile updated': 'Perfil actualitzat',
'pygraphviz library not found': 'pygraphviz library not found',
'Python': 'Python',
'Query Not Supported: %s': 'Consulta No Suportada: %s',
'Query:': 'Consulta:',
'Quick Examples': 'Exemple Ràpids',
'RAM': 'RAM',
'RAM Cache Keys': 'Claus de la Caché en RAM',
'Ram Cleared': 'Ram Netjeda',
'Recipes': 'Receptes',
'Record': 'Registre',
'Record %(id)s created': 'Registre %(id)s creat',
'Record Created': 'Registre Creat',
'record does not exist': 'el registre no existe',
'Record ID': 'ID de Registre',
'Record id': 'Id de registre',
'Ref APB': 'Ref APB',
'register': "registri's",
'Register': "Registri's",
'Registration identifier': 'Identificador de Registre',
'Registration key': 'Clau de registre',
'Registration successful': 'Registre amb èxit',
'reload': 'recarregar',
'Remember me (for 30 days)': "Recordi'm (durant 30 dies)",
'remove compiled': 'eliminar compilades',
'Request reset password': 'Sol·licitud de restabliment de contrasenya',
'Reset password': 'Reiniciar contrasenya',
'Reset Password key': 'Restaurar Clau de la Contrasenya',
'Resolve Conflict file': 'Resolgui el Conflicte de fitxer',
'restore': 'restaurar',
'Retrieve username': 'Recuperar nom de usuari',
'revert': 'revertir',
'Role': 'Rol',
'Roles': 'Rols',
'Rows in Table': 'Files a la taula',
'Rows selected': 'Files seleccionades',
'save': 'guardar',
'Save model as...': 'Save model as...',
'Saved file hash:': 'Hash del fitxer guardat:',
'Search': 'Buscar',
'Search Pages': 'Search Pages',
'Semantic': 'Semàntica',
'Services': 'Serveis',
'session expired': 'sessió expirada',
'shell': 'terminal',
'Sign Up': 'Sign Up',
'site': 'lloc',
'Size of cache:': 'Mida de la Caché:',
'Slug': 'Slug',
'some files could not be removed': 'algunos archivos no pudieron ser removidos',
'Spreadsheet-optimised export of tab-separated content including hidden columns. May be slow': 'Spreadsheet-optimised export of tab-separated content including hidden columns. May be slow',
'Spreadsheet-optimised export of tab-separated content, visible columns only. May be slow.': 'Spreadsheet-optimised export of tab-separated content, visible columns only. May be slow.',
'start': 'inici',
'Start building a new search': 'Start building a new search',
'starts with': 'comença per',
'state': 'estat',
'static': 'estàtics',
'Static files': 'Fitxers estàtics',
'Statistics': 'Estadístiques',
'Stylesheet': "Fulla d'estil",
'Submit': 'Enviar',
'submit': 'enviar',
'Success!': 'Correcte!',
'Support': 'Suport',
'Sure you want to delete this object?': '¿Està segur que vol eliminar aquest objecte?',
'Table': 'taula',
'Table name': 'Nom de la taula',
'test': 'provar',
'Testing application': 'Provant aplicació',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" és una condición com "db.tabla1.campo1==\'valor\'". Algo com "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.',
'the application logic, each URL path is mapped in one exposed function in the controller': 'la lògica de la aplicació, cada ruta URL es mapeja en una funció exposada en el controlador',
'The Core': 'El Nucli',
'the data representation, define database tables and sets': 'la representació de dades, defineix taules i conjunts de base de dades',
'The output of the file is a dictionary that was rendered by the view %s': 'El resultat de aquesta funció és un diccionari que és desplegat per la vista %s',
'the presentations layer, views are also known as templates': 'la capa de presentació, les vistes també són anomenades plantilles',
'The Views': 'Les Vistes',
'There are no controllers': 'No hi ha controladors',
'There are no models': 'No hi ha models',
'There are no modules': 'No hi ha mòduls',
'There are no static files': 'No hi ha fitxers estàtics',
'There are no translators, only default language is supported': 'No hi ha traductors, només el llenguatge per defecte és suportat',
'There are no views': 'No hi ha vistes',
'these files are served without processing, your images go here': 'aquests fitxers són servits sense processar, les seves imatges van aquí',
'This App': 'Aquesta Aplicació',
'This email already has an account': 'Aquest correu electrònic ja té un compte',
'This is a copy of the scaffolding application': 'Aquesta és una còpia de la aplicació de bastiment',
'This is the %(filename)s template': 'Aquesta és la plantilla %(filename)s',
'Ticket': 'Tiquet',
'Time in Cache (h:m:s)': 'Temps en Caché (h:m:s)',
'Timestamp': 'Marca de temps',
'Title': 'Títol',
'to previous version.': 'a la versió prèvia.',
'To emulate a breakpoint programatically, write:': 'Emular un punto de ruptura programàticament, escribir:',
'to use the debugger!': 'usar el depurador!',
'toggle breakpoint': 'alternar punt de ruptura',
'Toggle comment': 'Alternar comentari',
'Toggle Fullscreen': 'Alternar pantalla completa',
'too short': 'massa curt',
'Traceback': 'Traceback',
'translation strings for the application': 'cadenes de caracters de traducció per a la aplicació',
'try': 'intenti',
'try something like': 'intenti algo com',
'TSV (Excel compatible)': 'TSV (compatible Excel)',
'TSV (Excel compatible, hidden cols)': 'TSV (compatible Excel, columnes ocultes)',
'TSV (Spreadsheets)': 'TSV (Fulls de càlcul)',
'TSV (Spreadsheets, hidden cols)': 'TSV (Fulls de càlcul, columnes amagades)',
'Twitter': 'Twitter',
'Unable to check for upgrades': 'No és possible verificar la existencia de actualitzacions',
'unable to create application "%s"': 'no és possible crear la aplicació "%s"',
'unable to delete file "%(filename)s"': 'no és possible eliminar el fitxer "%(filename)s"',
'Unable to download': 'No és possible la descàrrega',
'Unable to download app': 'No és possible descarregar la aplicació',
'unable to parse csv file': 'no és possible analitzar el fitxer CSV',
'unable to uninstall "%s"': 'no és possible instalar "%s"',
'uncheck all': 'desmarcar tots',
'uninstall': 'desinstalar',
'unknown': 'desconocido',
'update': 'actualitzar',
'update all languages': 'actualitzar tots els llenguatges',
'Update:': 'Actualizi:',
'upload application:': 'pujar aplicació:',
'Upload existing application': 'Puji aquesta aplicació',
'upload file:': 'puji fitxer:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, i ~(...) para NOT, para crear consultas més complexes.',
'User': 'Usuari',
'User %(id)s is impersonating %(other_id)s': 'El usuari %(id)s està suplantant %(other_id)s',
'User %(id)s Logged-in': 'El usuari %(id)s inicià la sessió',
'User %(id)s Logged-out': 'El usuari %(id)s finalitzà la sessió',
'User %(id)s Password changed': 'Contrasenya del usuari %(id)s canviada',
'User %(id)s Password reset': 'Contrasenya del usuari %(id)s reiniciada',
'User %(id)s Profile updated': 'Actualitzat el perfil del usuari %(id)s',
'User %(id)s Registered': 'Usuari %(id)s Registrat',
'User %(id)s Username retrieved': 'Se ha recuperat el nom de usuari del usuari %(id)s',
'User %(username)s Logged-in': 'El usuari %(username)s inicià la sessió',
"User '%(username)s' Logged-in": "El usuari '%(username)s' inicià la sessió",
"User '%(username)s' Logged-out": "El usuari '%(username)s' finalitzà la sessió",
'User Id': 'Id de Usuari',
'User ID': 'ID de Usuari',
'User Logged-out': 'El usuari finalitzà la sessió',
'Username': 'Nom de usuari',
'Username retrieve': 'Recuperar nom de usuari',
'Users': 'Usuaris',
'Value already in database or empty': 'El valor ya existeix en la base de dades o està buit',
'value already in database or empty': 'el valor ya existeix en la base de dades o està buit',
'value not allowed': 'valor no permès',
'Value not in database': 'El valor no està a la base de dades',
'value not in database': 'el valor no està a la base de dades',
'Verify Password': 'Verificar Contrasenya',
'Version': 'Versió',
'versioning': 'versions',
'Videos': 'Videos',
'View': 'Vista',
'view': 'vista',
'View %(entity)s': 'Veure %(entity)s',
'View Page': 'View Page',
'Views': 'Vistes',
'views': 'vistes',
'web2py is up to date': 'web2py està actualitzat',
'web2py Recent Tweets': 'Tweets Recents de web2py',
'Welcome': 'Benvingut',
'Welcome %s': 'Benvingut %s',
'Welcome to web2py': 'Benvingut a web2py',
'Welcome to web2py!': '¡Benvingut a web2py!',
'Which called the function %s located in the file %s': 'La qual va cridar la funció %s localitzada en el fitxer %s',
'Wiki Page': 'Wiki Page',
'Working...': 'Treballant ...',
'XML': 'XML',
'XML export of columns shown': 'XML export of columns shown',
'YES': 'SÍ',
'You are successfully running web2py': 'Vostè està executant web2py amb èxit',
'You can modify this application and adapt it to your needs': 'Vostè pot modificar aquesta aplicació i adaptar-la a les seves necessitats',
'You visited the url %s': 'Vostè va visitar la url %s',
'Your username is: %(username)s': 'El seu nom de usuari és: %(username)s',
}
| agpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/operations/__init__.py | 5 | 4760 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_gateways_operations import ApplicationGatewaysOperations
from .application_security_groups_operations import ApplicationSecurityGroupsOperations
from .available_endpoint_services_operations import AvailableEndpointServicesOperations
from .express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from .express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from .express_route_circuits_operations import ExpressRouteCircuitsOperations
from .express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from .load_balancers_operations import LoadBalancersOperations
from .load_balancer_backend_address_pools_operations import LoadBalancerBackendAddressPoolsOperations
from .load_balancer_frontend_ip_configurations_operations import LoadBalancerFrontendIPConfigurationsOperations
from .inbound_nat_rules_operations import InboundNatRulesOperations
from .load_balancer_load_balancing_rules_operations import LoadBalancerLoadBalancingRulesOperations
from .load_balancer_network_interfaces_operations import LoadBalancerNetworkInterfacesOperations
from .load_balancer_probes_operations import LoadBalancerProbesOperations
from .network_interfaces_operations import NetworkInterfacesOperations
from .network_interface_ip_configurations_operations import NetworkInterfaceIPConfigurationsOperations
from .network_interface_load_balancers_operations import NetworkInterfaceLoadBalancersOperations
from .network_security_groups_operations import NetworkSecurityGroupsOperations
from .security_rules_operations import SecurityRulesOperations
from .default_security_rules_operations import DefaultSecurityRulesOperations
from .network_watchers_operations import NetworkWatchersOperations
from .packet_captures_operations import PacketCapturesOperations
from .connection_monitors_operations import ConnectionMonitorsOperations
from .operations import Operations
from .public_ip_addresses_operations import PublicIPAddressesOperations
from .route_filters_operations import RouteFiltersOperations
from .route_filter_rules_operations import RouteFilterRulesOperations
from .route_tables_operations import RouteTablesOperations
from .routes_operations import RoutesOperations
from .bgp_service_communities_operations import BgpServiceCommunitiesOperations
from .usages_operations import UsagesOperations
from .virtual_networks_operations import VirtualNetworksOperations
from .subnets_operations import SubnetsOperations
from .virtual_network_peerings_operations import VirtualNetworkPeeringsOperations
from .virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from .virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from .local_network_gateways_operations import LocalNetworkGatewaysOperations
__all__ = [
'ApplicationGatewaysOperations',
'ApplicationSecurityGroupsOperations',
'AvailableEndpointServicesOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'LoadBalancerBackendAddressPoolsOperations',
'LoadBalancerFrontendIPConfigurationsOperations',
'InboundNatRulesOperations',
'LoadBalancerLoadBalancingRulesOperations',
'LoadBalancerNetworkInterfacesOperations',
'LoadBalancerProbesOperations',
'NetworkInterfacesOperations',
'NetworkInterfaceIPConfigurationsOperations',
'NetworkInterfaceLoadBalancersOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'DefaultSecurityRulesOperations',
'NetworkWatchersOperations',
'PacketCapturesOperations',
'ConnectionMonitorsOperations',
'Operations',
'PublicIPAddressesOperations',
'RouteFiltersOperations',
'RouteFilterRulesOperations',
'RouteTablesOperations',
'RoutesOperations',
'BgpServiceCommunitiesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkPeeringsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
]
| mit |
rsmz/copyright | test/test_app.py | 1 | 3137 | import os
import shlex
import shutil
import unittest
from copyright import App, diffdir
DATA_DIR = 'test/data'
INPUT_DIR = os.path.join(DATA_DIR, 'input')
TMP_DIR = os.path.join(DATA_DIR, 'tmp')
TRUTH_DIR = os.path.join(DATA_DIR, 'truth')
def datadir(suffix):
return os.path.join(DATA_DIR, suffix)
def inputdir(suffix):
return os.path.join(INPUT_DIR, suffix)
def tmpdir(suffix):
return os.path.join(TMP_DIR, suffix)
def truthdir(suffix):
return os.path.join(TRUTH_DIR, suffix)
class TestApp(unittest.TestCase):
def diff_dir(self, dir1, dir2):
diffs = diffdir(dir1, dir2)
self.assertEqual([], diffs, 'Files differ: {0}'.format(diffs))
def diff_file_dir(self, dir1, dir2, name):
self.diff_file(os.path.join(dir1, name), os.path.join(dir2, name))
def diff_file(self, name1, name2):
with open(name1) as f1:
with open(name2) as f2:
lines1, lines2 = f1.readlines(), f2.readlines()
msg = '{0} and {1} differ.'
self.assertEqual(lines1, lines2, msg.format(name1, name2))
def run_test_force_lang(self, lang):
dir1 = truthdir('force/' + lang)
dir2 = tmpdir(lang)
self.setup(dir2)
temp = '-L {lang} -a Foo -p Bar -P 4 -s App -y 2016 {d}/empty'
args = temp.format(lang=lang, d=dir2).split()
App.main(args)
file = 'empty'
self.diff_file_dir(dir1, dir2, file)
def setup(self, dst):
self.teardown(dst)
shutil.copytree(INPUT_DIR, dst)
def teardown(self, dst):
shutil.rmtree(dst, True)
def test_back_single(self):
tree = 'back_single'
dir1 = truthdir(tree)
dir2 = tmpdir(tree)
self.setup(dir2)
temp = '-a Foo -p Bar -P 1 -s App -y 2016 --back --single {d}'
args = temp.format(d=dir2).split()
App.main(args)
self.diff_dir(dir1, dir2)
def test_force_lang_c(self):
self.run_test_force_lang('c')
def test_force_lang_sh(self):
self.run_test_force_lang('sh')
def test_front_block_include(self):
tree = 'front_block_include'
dir1 = truthdir(tree)
dir2 = tmpdir(tree)
self.setup(dir2)
temp = '''-a Foo -p Bar -P 1 -s App -y 2016 --include *.py,f*.h,script? {d}'''
args = temp.format(d=dir2).split()
App.main(args)
self.diff_dir(dir1, dir2)
def test_config_exclude(self):
tree = 'config_exclude'
dir1 = truthdir(tree)
dir2 = tmpdir(tree)
self.setup(dir2)
cfg = datadir(tree + '.json')
temp = '''-a "Over Ride" -c {cfg} {d}'''
args = shlex.split(temp.format(cfg=cfg, d=dir2))
App.main(args)
self.diff_dir(dir1, dir2)
def test_no_recurse(self):
tree = 'no_recurse'
dir1 = truthdir(tree)
dir2 = tmpdir(tree)
self.setup(dir2)
temp = '''-a Foo -s 'Best app.' -p MyApp -y 2016 -R {d}'''
args = shlex.split(temp.format(d=dir2))
App.main(args)
self.diff_dir(dir1, dir2)
if '__main__' == __name__:
unittest.main(verbosity=2)
| gpl-3.0 |
PlayUAV/MissionPlanner | Lib/site-packages/numpy/matlib.py | 90 | 9494 | import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
# need * as we're copying the numpy namespace
from numpy import *
__version__ = np.__version__
__all__ = np.__all__[:] # copy numpy namespace
__all__ += ['rand', 'randn', 'repmat']
def empty(shape, dtype=None, order='C'):
"""
Return a new matrix of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty matrix.
dtype : data-type, optional
Desired output data-type.
order : {'C', 'F'}, optional
Whether to store multi-dimensional data in C (row-major) or
Fortran (column-major) order in memory.
See Also
--------
empty_like, zeros
Notes
-----
`empty`, unlike `zeros`, does not set the matrix values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
matrix([[ 6.76425276e-320, 9.79033856e-307],
[ 7.39337286e-309, 3.22135945e-309]]) #random
>>> np.matlib.empty((2, 2), dtype=int)
matrix([[ 6600475, 0],
[ 6586976, 22740995]]) #random
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
def ones(shape, dtype=None, order='C'):
"""
Matrix of ones.
Return a matrix of given shape and type, filled with ones.
Parameters
----------
shape : {sequence of ints, int}
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is np.float64.
order : {'C', 'F'}, optional
Whether to store matrix in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Matrix of ones of given shape, dtype, and order.
See Also
--------
ones : Array of ones.
matlib.zeros : Zero matrix.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> np.matlib.ones((2,3))
matrix([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> np.matlib.ones(2)
matrix([[ 1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(1)
return a
def zeros(shape, dtype=None, order='C'):
"""
Return a matrix of given shape and type, filled with zeros.
Parameters
----------
shape : int or sequence of ints
Shape of the matrix
dtype : data-type, optional
The desired data-type for the matrix, default is float.
order : {'C', 'F'}, optional
Whether to store the result in C- or Fortran-contiguous order,
default is 'C'.
Returns
-------
out : matrix
Zero matrix of given shape, dtype, and order.
See Also
--------
numpy.zeros : Equivalent array function.
matlib.ones : Return a matrix of ones.
Notes
-----
If `shape` has length one i.e. ``(N,)``, or is a scalar ``N``,
`out` becomes a single row matrix of shape ``(1,N)``.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
matrix([[ 0., 0., 0.],
[ 0., 0., 0.]])
>>> np.matlib.zeros(2)
matrix([[ 0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
a.fill(0)
return a
def identity(n,dtype=None):
"""
Returns the square identity matrix of given size.
Parameters
----------
n : int
Size of the returned identity matrix.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
Returns
-------
out : matrix
`n` x `n` matrix with its main diagonal set to one,
and all other elements zero.
See Also
--------
numpy.identity : Equivalent array function.
matlib.eye : More general matrix identity function.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.identity(3, dtype=int)
matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
"""
a = array([1]+n*[0],dtype=dtype)
b = empty((n,n),dtype=dtype)
b.flat = a
return b
def eye(n,M=None, k=0, dtype=float):
"""
Return a matrix with ones on the diagonal and zeros elsewhere.
Parameters
----------
n : int
Number of rows in the output.
M : int, optional
Number of columns in the output, defaults to `n`.
k : int, optional
Index of the diagonal: 0 refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : dtype, optional
Data-type of the returned matrix.
Returns
-------
I : matrix
A `n` x `M` matrix where all elements are equal to zero,
except for the `k`-th diagonal, whose values are equal to one.
See Also
--------
numpy.eye : Equivalent array function.
identity : Square identity matrix.
Examples
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
matrix([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return asmatrix(np.eye(n,M,k,dtype))
def rand(*args):
"""
Return a matrix of random values with given shape.
Create a matrix of the given shape and propagate it with
random samples from a uniform distribution over ``[0, 1)``.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension.
If given as a tuple, this tuple gives the complete shape.
Returns
-------
out : ndarray
The matrix of random values with shape given by `\\*args`.
See Also
--------
randn, numpy.random.rand
Examples
--------
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
matrix([[ 0.68340382, 0.67926887, 0.83271405],
[ 0.00793551, 0.20468222, 0.95253525]]) #random
>>> np.matlib.rand((2, 3))
matrix([[ 0.84682055, 0.73626594, 0.11308016],
[ 0.85429008, 0.3294825 , 0.89139555]]) #random
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
matrix([[ 0.46898646, 0.15163588, 0.95188261],
[ 0.59208621, 0.09561818, 0.00583606]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.rand(*args))
def randn(*args):
"""
Return a random matrix with data from the "standard normal" distribution.
`randn` generates a matrix filled with random floats sampled from a
univariate "normal" (Gaussian) distribution of mean 0 and variance 1.
Parameters
----------
\\*args : Arguments
Shape of the output.
If given as N integers, each integer specifies the size of one
dimension. If given as a tuple, this tuple gives the complete shape.
Returns
-------
Z : matrix of floats
A matrix of floating-point samples drawn from the standard normal
distribution.
See Also
--------
rand, random.randn
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
``sigma * np.matlib.randn(...) + mu``
Examples
--------
>>> import numpy.matlib
>>> np.matlib.randn(1)
matrix([[-0.09542833]]) #random
>>> np.matlib.randn(1, 2, 3)
matrix([[ 0.16198284, 0.0194571 , 0.18312985],
[-0.7509172 , 1.61055 , 0.45298599]]) #random
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
[ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
"""
if isinstance(args[0], tuple):
args = args[0]
return asmatrix(np.random.randn(*args))
def repmat(a, m, n):
"""
Repeat a 0-D to 2-D array or matrix MxN times.
Parameters
----------
a : array_like
The array or matrix to be repeated.
m, n : int
The number of times `a` is repeated along the first and second axes.
Returns
-------
out : ndarray
The result of repeating `a`.
Examples
--------
>>> import numpy.matlib
>>> a0 = np.array(1)
>>> np.matlib.repmat(a0, 2, 3)
array([[1, 1, 1],
[1, 1, 1]])
>>> a1 = np.arange(4)
>>> np.matlib.repmat(a1, 2, 2)
array([[0, 1, 2, 3, 0, 1, 2, 3],
[0, 1, 2, 3, 0, 1, 2, 3]])
>>> a2 = np.asmatrix(np.arange(6).reshape(2, 3))
>>> np.matlib.repmat(a2, 2, 3)
matrix([[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5]])
"""
a = asanyarray(a)
ndim = a.ndim
if ndim == 0:
origrows, origcols = (1,1)
elif ndim == 1:
origrows, origcols = (1, a.shape[0])
else:
origrows, origcols = a.shape
rows = origrows * m
cols = origcols * n
c = a.reshape(1,a.size).repeat(m, 0).reshape(rows, origcols).repeat(n,0)
return c.reshape(rows, cols)
| gpl-3.0 |
glorizen/nupic | nupic/regions/PictureSensorExplorers/rotate.py | 17 | 3479 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines RotatePictureExplorer, an explorer for
PictureSensor.
"""
from nupic.regions.PictureSensor import PictureSensor
class RotatePictureExplorer(PictureSensor.PictureExplorer):
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return ( 'numRepetitions',
'minAngularPosn', 'maxAngularPosn',
'minAngularVelocity', 'maxAngularVelocity',
)
def notifyParamUpdate(self, params):
"""
A callback that will be invoked if/when any of the explorer's
relevant parameters have their values changed.
@param params: a dict containing the new values of all parameters
that are relevant to the explorer's operation
(as specified by a call to queryRelevantParams()).
"""
# Parameter checks
if params['minAngularVelocity'] != params['maxAngularVelocity']:
raise NotImplementedError("'rotate' explorer currently supports " \
"only a fixed angular velocity; i.e., 'minAngularVelocity' (%d) " \
"must be identical to 'maxAngularVelocity' (%d)" \
% (params['minAngularVelocity'], params['maxAngularVelocity']))
super(RotatePictureExplorer, self).notifyParamUpdate(params)
def initSequence(self, state, params):
self._presentNextRotation(state, params)
def updateSequence(self, state, params):
self._presentNextRotation(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextRotation(self, state, params):
"""
Compute the appropriate category and rotational angle
deterministically based on the current iteration count.
"""
# These don't change
state['posnX'] = 0
state['posnY'] = 0
state['velocityX'] = 0
state['velocityY'] = 0
state['angularVelocity'] = params['minAngularVelocity']
# These do change
sequenceLength = 1 + int((params['maxAngularPosn'] - params['minAngularPosn'])
/ params['minAngularVelocity'])
state['catIndex'] = self._getIterCount() / (sequenceLength * params['numRepetitions'])
seqIndex = self._getIterCount() % (sequenceLength * params['numRepetitions'])
state['angularPosn'] = params['maxAngularPosn'] \
- state['angularVelocity'] * seqIndex
| agpl-3.0 |
ramitalat/odoo | addons/account/wizard/account_move_line_reconcile_select.py | 385 | 2362 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_move_line_reconcile_select(osv.osv_memory):
_name = "account.move.line.reconcile.select"
_description = "Move line reconcile select"
_columns = {
'account_id': fields.many2one('account.account', 'Account', \
domain = [('reconcile', '=', 1)], required=True),
}
def action_open_window(self, cr, uid, ids, context=None):
"""
This function Open account move line window for reconcile on given account id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: account move line reconcile select’s ID or list of IDs
@return: dictionary of Open account move line window for reconcile on given account id
"""
data = self.read(cr, uid, ids, context=context)[0]
return {
'domain': "[('account_id','=',%d),('reconcile_id','=',False),('state','<>','draft')]" % data['account_id'],
'name': _('Reconciliation'),
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'account.move.line',
'type': 'ir.actions.act_window'
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
0x0FFF/gpdb | gpMgmt/bin/gpgenfsmap.py | 21 | 8809 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2011. All Rights Reserved.
'''
gpgenfsmap.py Generates a mapping of Servers, Filesystems, GPDB Filespaces and Storage Pools
Options:
-h, -?, --help Print this usage.
-p --port Port to use to connect to DB. Defaults to $PGPORT
-u --username Username to connect to DB. Defaaults to $PGUSER
-n --host Hostname to connect to DB. Defaults to $PGHOST
-w --password Password to connect to DB.
-v, --verbose Enable verbose Logging. Adds messages to stdout that will may break programs that parse the fsmap output.
-q, --quiet Disable most log messages. This mode is on by default.
'''
import os, sys, subprocess
try:
from optparse import Option, OptionParser
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib.commands.base import WorkerPool, Command, REMOTE
from gppylib import gphostcache
from gppylib.commands import unix
from gppylib.db import dbconn
from gppylib.db import catalog
from pygresql import pg # Database interaction
from gppylib import gplog # Greenplum logging facility
from gppylib.gpcoverage import GpCoverage
from getpass import getpass
except ImportError, e:
sys.exit('Cannot import modules. Please check that you have sourced greenplum_path.sh. Detail: ' + str(e))
#Data Structures to be populated
class FSMap:
def __init__(self):
self.servers = []
self.filespaces = []
def addServer(self, name):
newSvr = Server(name)
self.servers.append(newSvr)
return newSvr
def getServer(self, name):
for server in self.servers:
if server.name == name:
return server
return None
def getAllServers(self):
return self.servers
def addFilespace(self, filespace):
self.filespaces.append(filespace)
def toString(self):
mapStr = ""
for svr in self.servers:
mapStr += svr.toString()
return mapStr
class Server:
def __init__(self, name):
self.name = name
self.filesystems = []
def addFilesystem(self, name):
newFs = Filesystem(name)
self.filesystems.append(newFs)
return newFs
def getFilesystem(self, name):
for filesystem in self.filesystems:
if filesystem.name == name:
return filesystem
return None
def toString(self):
svrStr = ""
for fs in self.filesystems:
fsStrList = fs.toString()
for fsStr in fsStrList:
svrStr += "%s:%s:%s\n" %(self.name, fs.name, fsStr)
return svrStr
#Global Variables
systemFSMap = None
#Keyed on servername, each value is a list of devices & directories
serverFSDict = None
serverFSMap = dict()
fsDetailsMap = dict()
dbConn = None
logger = None
def runPoolCommand(host, commandStr, pool):
output = ""
cmd = Command(host, commandStr, REMOTE, host)
pool.addCommand(cmd)
pool.join()
items = pool.getCompletedItems()
for i in items:
if i.results.rc or i.results.halt or not i.results.completed:
logger.info("Error running command on host: " + host + " Command: " + commandStr)
logger.info(i.results.stderr.strip())
logger.info(i.results.stdout.strip())
output = i.results.stdout
return output
def dbConnect(options):
global dbConn
if dbConn != None:
return
try:
user = options.username
host = options.host
port = options.port
db = 'template1'
password = None
dburl = dbconn.DbURL(username=user, hostname=host,
port=port, dbname=db, password=password)
logger.info("Connecting to DB at: %s " %(str(dburl)) )
conn = dbconn.connect(dburl)
logger.info( "Connected to DB: %s" %(str(conn)) )
dbConn = conn
except Exception, e:
logger.error ("Error Connecting to Database: %s" % (str(e)) )
sys.exit(1)
def runQuery(query):
global dbConn
if dbConn == None:
logger.error ( "Error: Not connected to database.")
sys.exit(1)
try:
rows = catalog.basicSQLExec(dbConn, query)
except Exception, e:
logger.error ("Error: Failed to run query: %s" % (str(e)) )
sys.exit(1)
return rows
def parseargs(args):
global logger
pguser = os.environ.get("PGUSER") or unix.getUserName()
pghost = os.environ.get("PGHOST") or unix.getLocalHostname()
pgport = os.environ.get("PGPORT") or 5432
parser = OptParser(option_class=OptChecker)
parser.remove_option('-h')
parser.add_option('-?', '--help', '-h', action='store_true', default=False)
parser.add_option('-n', '--host', default=pghost)
parser.add_option('-p', '--port', default=pgport)
parser.add_option('-u', '--username', default=pguser)
parser.add_option('-w', '--password', default=False, action='store_true')
parser.add_option('-v', '--verbose', default=False, action='store_true')
parser.add_option('-q', '--quiet', default=True, action='store_true')
(options, args) = parser.parse_args()
if options.help:
print __doc__
sys.exit(1)
try:
options.port = int(options.port)
except:
logger.error("Invalid PORT: '%s'" % options.port)
sys.exit(1)
if options.verbose:
gplog.enable_verbose_logging()
elif options.quiet:
gplog.quiet_stdout_logging()
return options
def findFsDetails():
global serverFSMap
try:
#find the mount points in parallel
pool = WorkerPool()
for hname in serverFSMap.keys():
hname.strip()
subCmd = "df -P %s" %(serverFSMap[hname])
cmdStr = 'ssh -o PasswordAuthentication=no %s "%s"' % (hname, subCmd)
pool.addCommand( Command(hname, cmdStr, REMOTE, hname) )
pool.join()
items = pool.getCompletedItems()
for i in items:
if i.results.rc == 0:
df_with_header = i.results.stdout.strip()
df_list = df_with_header.splitlines()
df_list.pop(0)
fsList = serverFSMap[i.remoteHost].split()
if len(df_list) != len(fsList):
print "Mismatch"
continue
for df_vals in df_list:
df_val = df_vals.split()
fsDetailsMap[fsList.pop(0).strip()] = [i.remoteHost, df_val[0], df_val[5]]
else:
print("Failure in talking to host %s" %(i.remoteHost))
pool.join()
pool.haltWork()
pool.joinWorkers()
except Exception, e:
print e.__str__()
pool.join()
pool.haltWork()
pool.joinWorkers()
except KeyboardInterrupt:
pool.join()
pool.haltWork()
pool.joinWorkers()
sys.exit(1)
except:
pool.join()
pool.haltWork()
pool.joinWorkers()
def genFSMap():
results = runQuery("SELECT pgfs.oid as oid, fsedbid as seg_dbid, fselocation as datadir, hostname \
FROM pg_filespace pgfs, pg_filespace_entry pgfse, gp_segment_configuration gpsec \
WHERE pgfse.fsefsoid=pgfs.oid AND pgfse.fsedbid=gpsec.dbid ORDER BY seg_dbid;")
if len(results) == 0:
logger.error( "No data directories found. Exiting.")
sys.exit(1)
logger.debug( "Results from query:" + str(results) )
#result columns will be: [0] filespace name, [1] dbid, [2] datadir, [3] hostname
for result in results:
filespaceOid= result[0]
datadir=result[2].strip()
hostname=result[3]
if fsDetailsMap.has_key(datadir):
print fsDetailsMap[datadir][0], fsDetailsMap[datadir][2], filespaceOid
def genServerFsList():
results = runQuery("select hostname, array_to_string(array_agg(fselocation), ' ') as fs from pg_filespace_entry a , gp_segment_configuration b where a.fsedbid = b.dbid group by hostname")
if len(results) == 0:
logger.error( "Error: gp_segment_configuration empty" )
sys.exit(1)
for item in results:
if len(item) == 2:
serverFSMap[item[0]] = item[1]
def main (argv):
global systemFSMap
global logger
logger = gplog.get_default_logger()
options = parseargs(argv)
dbConnect(options)
genServerFsList()
findFsDetails()
genFSMap()
sys.exit(0)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
ukanga/SickRage | lib/github/Authorization.py | 72 | 7574 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.AuthorizationApplication
class Authorization(github.GithubObject.CompletableGithubObject):
"""
This class represents Authorizations as returned for example by http://developer.github.com/v3/todo
"""
@property
def app(self):
"""
:type: :class:`github.AuthorizationApplication.AuthorizationApplication`
"""
self._completeIfNotSet(self._app)
return self._app.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def note(self):
"""
:type: string
"""
self._completeIfNotSet(self._note)
return self._note.value
@property
def note_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._note_url)
return self._note_url.value
@property
def scopes(self):
"""
:type: list of string
"""
self._completeIfNotSet(self._scopes)
return self._scopes.value
@property
def token(self):
"""
:type: string
"""
self._completeIfNotSet(self._token)
return self._token.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def delete(self):
"""
:calls: `DELETE /authorizations/:id <http://developer.github.com/v3/oauth>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, scopes=github.GithubObject.NotSet, add_scopes=github.GithubObject.NotSet, remove_scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet):
"""
:calls: `PATCH /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param add_scopes: list of string
:param remove_scopes: list of string
:param note: string
:param note_url: string
:rtype: None
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in scopes), scopes
assert add_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in add_scopes), add_scopes
assert remove_scopes is github.GithubObject.NotSet or all(isinstance(element, (str, unicode)) for element in remove_scopes), remove_scopes
assert note is github.GithubObject.NotSet or isinstance(note, (str, unicode)), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, (str, unicode)), note_url
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if add_scopes is not github.GithubObject.NotSet:
post_parameters["add_scopes"] = add_scopes
if remove_scopes is not github.GithubObject.NotSet:
post_parameters["remove_scopes"] = remove_scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._app = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._note = github.GithubObject.NotSet
self._note_url = github.GithubObject.NotSet
self._scopes = github.GithubObject.NotSet
self._token = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "app" in attributes: # pragma no branch
self._app = self._makeClassAttribute(github.AuthorizationApplication.AuthorizationApplication, attributes["app"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "note" in attributes: # pragma no branch
self._note = self._makeStringAttribute(attributes["note"])
if "note_url" in attributes: # pragma no branch
self._note_url = self._makeStringAttribute(attributes["note_url"])
if "scopes" in attributes: # pragma no branch
self._scopes = self._makeListOfStringsAttribute(attributes["scopes"])
if "token" in attributes: # pragma no branch
self._token = self._makeStringAttribute(attributes["token"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
BorgERP/borg-erp-6of3 | l10n_coa/l10n_ve/__init__.py | 975 | 1058 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
domob1812/crowncoin | share/qt/extract_strings_qt.py | 4 | 1854 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/crownstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *crown_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("crown-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| mit |
pdufour/sqlalchemy | test/ext/test_automap.py | 25 | 9295 | from sqlalchemy.testing import fixtures
from ..orm._fixtures import FixtureTest
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import relationship, interfaces, configure_mappers
from sqlalchemy.ext.automap import generate_relationship
from sqlalchemy.testing.mock import Mock
from sqlalchemy import String, Integer, ForeignKey
from sqlalchemy import testing
from sqlalchemy.testing.schema import Table, Column
class AutomapTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
FixtureTest.define_tables(metadata)
def test_relationship_o2m_default(self):
Base = automap_base(metadata=self.metadata)
Base.prepare()
User = Base.classes.users
Address = Base.classes.addresses
a1 = Address(email_address='e1')
u1 = User(name='u1', addresses_collection=[a1])
assert a1.users is u1
def test_relationship_explicit_override_o2m(self):
Base = automap_base(metadata=self.metadata)
prop = relationship("addresses", collection_class=set)
class User(Base):
__tablename__ = 'users'
addresses_collection = prop
Base.prepare()
assert User.addresses_collection.property is prop
Address = Base.classes.addresses
a1 = Address(email_address='e1')
u1 = User(name='u1', addresses_collection=set([a1]))
assert a1.user is u1
def test_relationship_explicit_override_m2o(self):
Base = automap_base(metadata=self.metadata)
prop = relationship("users")
class Address(Base):
__tablename__ = 'addresses'
users = prop
Base.prepare()
User = Base.classes.users
assert Address.users.property is prop
a1 = Address(email_address='e1')
u1 = User(name='u1', address_collection=[a1])
assert a1.users is u1
def test_relationship_self_referential(self):
Base = automap_base(metadata=self.metadata)
Base.prepare()
Node = Base.classes.nodes
n1 = Node()
n2 = Node()
n1.nodes_collection.append(n2)
assert n2.nodes is n1
def test_naming_schemes(self):
Base = automap_base(metadata=self.metadata)
def classname_for_table(base, tablename, table):
return str("cls_" + tablename)
def name_for_scalar_relationship(
base, local_cls, referred_cls, constraint):
return "scalar_" + referred_cls.__name__
def name_for_collection_relationship(
base, local_cls, referred_cls, constraint):
return "coll_" + referred_cls.__name__
Base.prepare(
classname_for_table=classname_for_table,
name_for_scalar_relationship=name_for_scalar_relationship,
name_for_collection_relationship=name_for_collection_relationship
)
User = Base.classes.cls_users
Address = Base.classes.cls_addresses
u1 = User()
a1 = Address()
u1.coll_cls_addresses.append(a1)
assert a1.scalar_cls_users is u1
def test_relationship_m2m(self):
Base = automap_base(metadata=self.metadata)
Base.prepare()
Order, Item = Base.classes.orders, Base.classes['items']
o1 = Order()
i1 = Item()
o1.items_collection.append(i1)
assert o1 in i1.orders_collection
def test_relationship_explicit_override_forwards_m2m(self):
Base = automap_base(metadata=self.metadata)
class Order(Base):
__tablename__ = 'orders'
items_collection = relationship(
"items",
secondary="order_items",
collection_class=set)
Base.prepare()
Item = Base.classes['items']
o1 = Order()
i1 = Item()
o1.items_collection.add(i1)
# it is 'order_collection' because the class name is
# "Order" !
assert isinstance(i1.order_collection, list)
assert o1 in i1.order_collection
def test_relationship_pass_params(self):
Base = automap_base(metadata=self.metadata)
mock = Mock()
def _gen_relationship(
base, direction, return_fn, attrname,
local_cls, referred_cls, **kw):
mock(base, direction, attrname)
return generate_relationship(
base, direction, return_fn,
attrname, local_cls, referred_cls, **kw)
Base.prepare(generate_relationship=_gen_relationship)
assert set(tuple(c[1]) for c in mock.mock_calls).issuperset([
(Base, interfaces.MANYTOONE, "nodes"),
(Base, interfaces.MANYTOMANY, "keywords_collection"),
(Base, interfaces.MANYTOMANY, "items_collection"),
(Base, interfaces.MANYTOONE, "users"),
(Base, interfaces.ONETOMANY, "addresses_collection"),
])
class CascadeTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"a", metadata,
Column('id', Integer, primary_key=True)
)
Table(
"b", metadata,
Column('id', Integer, primary_key=True),
Column('aid', ForeignKey('a.id'), nullable=True)
)
Table(
"c", metadata,
Column('id', Integer, primary_key=True),
Column('aid', ForeignKey('a.id'), nullable=False)
)
Table(
"d", metadata,
Column('id', Integer, primary_key=True),
Column(
'aid', ForeignKey('a.id', ondelete="cascade"), nullable=False)
)
Table(
"e", metadata,
Column('id', Integer, primary_key=True),
Column(
'aid', ForeignKey('a.id', ondelete="set null"),
nullable=True)
)
def test_o2m_relationship_cascade(self):
Base = automap_base(metadata=self.metadata)
Base.prepare()
configure_mappers()
b_rel = Base.classes.a.b_collection
assert not b_rel.property.cascade.delete
assert not b_rel.property.cascade.delete_orphan
assert not b_rel.property.passive_deletes
assert b_rel.property.cascade.save_update
c_rel = Base.classes.a.c_collection
assert c_rel.property.cascade.delete
assert c_rel.property.cascade.delete_orphan
assert not c_rel.property.passive_deletes
assert c_rel.property.cascade.save_update
d_rel = Base.classes.a.d_collection
assert d_rel.property.cascade.delete
assert d_rel.property.cascade.delete_orphan
assert d_rel.property.passive_deletes
assert d_rel.property.cascade.save_update
e_rel = Base.classes.a.e_collection
assert not e_rel.property.cascade.delete
assert not e_rel.property.cascade.delete_orphan
assert e_rel.property.passive_deletes
assert e_rel.property.cascade.save_update
class AutomapInhTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
'single', metadata,
Column('id', Integer, primary_key=True),
Column('type', String(10)),
test_needs_fk=True
)
Table(
'joined_base', metadata,
Column('id', Integer, primary_key=True),
Column('type', String(10)),
test_needs_fk=True
)
Table(
'joined_inh', metadata,
Column(
'id', Integer,
ForeignKey('joined_base.id'), primary_key=True),
test_needs_fk=True
)
FixtureTest.define_tables(metadata)
def test_single_inheritance_reflect(self):
Base = automap_base()
class Single(Base):
__tablename__ = 'single'
type = Column(String)
__mapper_args__ = {
"polymorphic_identity": "u0",
"polymorphic_on": type}
class SubUser1(Single):
__mapper_args__ = {"polymorphic_identity": "u1"}
class SubUser2(Single):
__mapper_args__ = {"polymorphic_identity": "u2"}
Base.prepare(engine=testing.db, reflect=True)
assert SubUser2.__mapper__.inherits is Single.__mapper__
def test_joined_inheritance_reflect(self):
Base = automap_base()
class Joined(Base):
__tablename__ = 'joined_base'
type = Column(String)
__mapper_args__ = {
"polymorphic_identity": "u0",
"polymorphic_on": type}
class SubJoined(Joined):
__tablename__ = 'joined_inh'
__mapper_args__ = {"polymorphic_identity": "u1"}
Base.prepare(engine=testing.db, reflect=True)
assert SubJoined.__mapper__.inherits is Joined.__mapper__
assert not Joined.__mapper__.relationships
assert not SubJoined.__mapper__.relationships
def test_conditional_relationship(self):
Base = automap_base()
def _gen_relationship(*arg, **kw):
return None
Base.prepare(
engine=testing.db, reflect=True,
generate_relationship=_gen_relationship)
| mit |
ModdedPA/android_external_chromium_org | tools/code_coverage/coverage_posix.py | 24 | 48901 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate and process code coverage.
TODO(jrg): rename this from coverage_posix.py to coverage_all.py!
Written for and tested on Mac, Linux, and Windows. To use this script
to generate coverage numbers, please run from within a gyp-generated
project.
All platforms, to set up coverage:
cd ...../chromium ; src/tools/gyp/gyp_dogfood -Dcoverage=1 src/build/all.gyp
Run coverage on...
Mac:
( cd src/chrome ; xcodebuild -configuration Debug -target coverage )
Linux:
( cd src/chrome ; hammer coverage )
# In particular, don't try and run 'coverage' from src/build
--directory=DIR: specify directory that contains gcda files, and where
a "coverage" directory will be created containing the output html.
Example name: ..../chromium/src/xcodebuild/Debug.
If not specified (e.g. buildbot) we will try and figure it out based on
other options (e.g. --target and --build-dir; see below).
--genhtml: generate html output. If not specified only lcov is generated.
--all_unittests: if present, run all files named *_unittests that we
can find.
--fast_test: make the tests run real fast (just for testing)
--strict: if a test fails, we continue happily. --strict will cause
us to die immediately.
--trim=False: by default we trim away tests known to be problematic on
specific platforms. If set to false we do NOT trim out tests.
--xvfb=True: By default we use Xvfb to make sure DISPLAY is valid
(Linux only). if set to False, do not use Xvfb. TODO(jrg): convert
this script from the compile stage of a builder to a
RunPythonCommandInBuildDir() command to avoid the need for this
step.
--timeout=SECS: if a subprocess doesn't have output within SECS,
assume it's a hang. Kill it and give up.
--bundles=BUNDLEFILE: a file containing a python list of coverage
bundles to be eval'd. Example contents of the bundlefile:
['../base/base.gyp:base_unittests']
This is used as part of the coverage bot.
If no other bundlefile-finding args are used (--target,
--build-dir), this is assumed to be an absolute path.
If those args are used, find BUNDLEFILE in a way consistent with
other scripts launched by buildbot. Example of another script
launched by buildbot:
http://src.chromium.org/viewvc/chrome/trunk/tools/buildbot/scripts/slave/runtest.py
--target=NAME: specify the build target (e.g. 'Debug' or 'Release').
This is used by buildbot scripts to help us find the output directory.
Must be used with --build-dir.
--build-dir=DIR: According to buildbot comments, this is the name of
the directory within the buildbot working directory in which the
solution, Debug, and Release directories are found.
It's usually "src/build", but on mac it's $DIR/../xcodebuild and on
Linux it's $DIR/out.
This is used by buildbot scripts to help us find the output directory.
Must be used with --target.
--no_exclusions: Do NOT use the exclusion list. This script keeps a
list of tests known to be problematic under coverage. For example,
ProcessUtilTest.SpawnChild will crash inside __gcov_fork() when
using the MacOS 10.6 SDK. Use of --no_exclusions prevents the use
of this exclusion list.
--dont-clear-coverage-data: Normally we clear coverage data from
previous runs. If this arg is used we do NOT clear the coverage
data.
Strings after all options are considered tests to run. Test names
have all text before a ':' stripped to help with gyp compatibility.
For example, ../base/base.gyp:base_unittests is interpreted as a test
named "base_unittests".
"""
import glob
import logging
import optparse
import os
import Queue
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
"""Global list of child PIDs to kill when we die."""
gChildPIDs = []
"""Exclusion list. Format is
{ platform: { testname: (exclusion1, exclusion2, ... ), ... } }
Platform is a match for sys.platform and can be a list.
Matching code does an 'if sys.platform in (the key):'.
Similarly, matching does an 'if testname in thefulltestname:'
The Chromium convention has traditionally been to place the
exclusion list in a distinct file. Unlike valgrind (which has
frequent changes when things break and are fixed), the expectation
here is that exclusions remain relatively constant (e.g. OS bugs).
If that changes, revisit the decision to place inclusions in this
script.
Details:
ProcessUtilTest.SpawnChild: chokes in __gcov_fork on 10.6
IPCFuzzingTest.MsgBadPayloadArgs: ditto
PanelBrowserNavigatorTest.NavigateFromCrashedPanel: Fails on coverage bot.
WebGLConformanceTests.conformance_attribs_gl_enable_vertex_attrib: Fails
with timeout (45000 ms) exceeded error. crbug.com/143248
WebGLConformanceTests.conformance_attribs_gl_disabled_vertex_attrib:
ditto.
WebGLConformanceTests.conformance_attribs_gl_vertex_attrib_zero_issues:
ditto.
WebGLConformanceTests.conformance_attribs_gl_vertex_attrib: ditto.
WebGLConformanceTests.conformance_attribs_gl_vertexattribpointer_offsets:
ditto.
WebGLConformanceTests.conformance_attribs_gl_vertexattribpointer: ditto.
WebGLConformanceTests.conformance_buffers_buffer_bind_test: After
disabling WebGLConformanceTests specified above, this test fails when run
on local machine.
WebGLConformanceTests.conformance_buffers_buffer_data_array_buffer: ditto.
WebGLConformanceTests.conformance_buffers_index_validation_copies_indices:
ditto.
WebGLConformanceTests.
conformance_buffers_index_validation_crash_with_buffer_sub_data: ditto.
WebGLConformanceTests.
conformance_buffers_index_validation_verifies_too_many_indices: ditto.
WebGLConformanceTests.
conformance_buffers_index_validation_with_resized_buffer: ditto.
WebGLConformanceTests.conformance_canvas_buffer_offscreen_test: ditto.
WebGLConformanceTests.conformance_canvas_buffer_preserve_test: ditto.
WebGLConformanceTests.conformance_canvas_canvas_test: ditto.
WebGLConformanceTests.conformance_canvas_canvas_zero_size: ditto.
WebGLConformanceTests.
conformance_canvas_drawingbuffer_static_canvas_test: ditto.
WebGLConformanceTests.conformance_canvas_drawingbuffer_test: ditto.
PageCycler*.*: Fails on coverage bot with "Missing test directory
/....../slave/coverage-dbg-linux/build/src/data/page_cycler/moz" error.
*FrameRateCompositingTest.*: Fails with
"FATAL:chrome_content_browser_client.cc(893)] Check failed:
command_line->HasSwitch(switches::kEnableStatsTable)."
*FrameRateNoVsyncCanvasInternalTest.*: ditto.
*FrameRateGpuCanvasInternalTest.*: ditto.
IndexedDBTest.Perf: Fails with 'Timeout reached in WaitUntilCookieValue'
error.
TwoClientPasswordsSyncTest.DeleteAll: Fails on coverage bot.
MigrationTwoClientTest.MigrationHellWithoutNigori: Fails with timeout
(45000 ms) exceeded error.
TwoClientSessionsSyncTest.DeleteActiveSession: ditto.
MultipleClientSessionsSyncTest.EncryptedAndChanged: ditto.
MigrationSingleClientTest.AllTypesIndividuallyTriggerNotification: ditto.
*OldPanelResizeBrowserTest.*: crbug.com/143247
*OldPanelDragBrowserTest.*: ditto.
*OldPanelBrowserTest.*: ditto.
*OldPanelAndDesktopNotificationTest.*: ditto.
*OldDockedPanelBrowserTest.*: ditto.
*OldDetachedPanelBrowserTest.*: ditto.
PanelDragBrowserTest.AttachWithSqueeze: ditto.
*PanelBrowserTest.*: ditto.
*DockedPanelBrowserTest.*: ditto.
*DetachedPanelBrowserTest.*: ditto.
AutomatedUITest.TheOneAndOnlyTest: crbug.com/143419
AutomatedUITestBase.DragOut: ditto
"""
gTestExclusions = {
'darwin2': { 'base_unittests': ('ProcessUtilTest.SpawnChild',),
'ipc_tests': ('IPCFuzzingTest.MsgBadPayloadArgs',), },
'linux2': {
'gpu_tests':
('WebGLConformanceTests.conformance_attribs_gl_enable_vertex_attrib',
'WebGLConformanceTests.'
'conformance_attribs_gl_disabled_vertex_attrib',
'WebGLConformanceTests.'
'conformance_attribs_gl_vertex_attrib_zero_issues',
'WebGLConformanceTests.conformance_attribs_gl_vertex_attrib',
'WebGLConformanceTests.'
'conformance_attribs_gl_vertexattribpointer_offsets',
'WebGLConformanceTests.conformance_attribs_gl_vertexattribpointer',
'WebGLConformanceTests.conformance_buffers_buffer_bind_test',
'WebGLConformanceTests.'
'conformance_buffers_buffer_data_array_buffer',
'WebGLConformanceTests.'
'conformance_buffers_index_validation_copies_indices',
'WebGLConformanceTests.'
'conformance_buffers_index_validation_crash_with_buffer_sub_data',
'WebGLConformanceTests.'
'conformance_buffers_index_validation_verifies_too_many_indices',
'WebGLConformanceTests.'
'conformance_buffers_index_validation_with_resized_buffer',
'WebGLConformanceTests.conformance_canvas_buffer_offscreen_test',
'WebGLConformanceTests.conformance_canvas_buffer_preserve_test',
'WebGLConformanceTests.conformance_canvas_canvas_test',
'WebGLConformanceTests.conformance_canvas_canvas_zero_size',
'WebGLConformanceTests.'
'conformance_canvas_drawingbuffer_static_canvas_test',
'WebGLConformanceTests.conformance_canvas_drawingbuffer_test',),
'performance_ui_tests':
('*PageCycler*.*',
'*FrameRateCompositingTest.*',
'*FrameRateNoVsyncCanvasInternalTest.*',
'*FrameRateGpuCanvasInternalTest.*',
'IndexedDBTest.Perf',),
'sync_integration_tests':
('TwoClientPasswordsSyncTest.DeleteAll',
'MigrationTwoClientTest.MigrationHellWithoutNigori',
'TwoClientSessionsSyncTest.DeleteActiveSession',
'MultipleClientSessionsSyncTest.EncryptedAndChanged',
'MigrationSingleClientTest.'
'AllTypesIndividuallyTriggerNotification',),
'interactive_ui_tests':
('*OldPanelResizeBrowserTest.*',
'*OldPanelDragBrowserTest.*',
'*OldPanelBrowserTest.*',
'*OldPanelAndDesktopNotificationTest.*',
'*OldDockedPanelBrowserTest.*',
'*OldDetachedPanelBrowserTest.*',
'PanelDragBrowserTest.AttachWithSqueeze',
'*PanelBrowserTest.*',
'*DockedPanelBrowserTest.*',
'*DetachedPanelBrowserTest.*',),
'automated_ui_tests':
('AutomatedUITest.TheOneAndOnlyTest',
'AutomatedUITestBase.DragOut',), },
}
"""Since random tests are failing/hanging on coverage bot, we are enabling
tests feature by feature. crbug.com/159748
"""
gTestInclusions = {
'linux2': {
'browser_tests':
(# 'src/chrome/browser/downloads'
'SavePageBrowserTest.*',
'SavePageAsMHTMLBrowserTest.*',
'DownloadQueryTest.*',
'DownloadDangerPromptTest.*',
'DownloadTest.*',
# 'src/chrome/browser/net'
'CookiePolicyBrowserTest.*',
'FtpBrowserTest.*',
'LoadTimingObserverTest.*',
'PredictorBrowserTest.*',
'ProxyBrowserTest.*',
# 'src/chrome/browser/extensions'
'Extension*.*',
'WindowOpenPanelDisabledTest.*',
'WindowOpenPanelTest.*',
'WebstoreStandalone*.*',
'CommandLineWebstoreInstall.*',
'WebViewTest.*',
'RequirementsCheckerBrowserTest.*',
'ProcessManagementTest.*',
'PlatformAppBrowserTest.*',
'PlatformAppDevToolsBrowserTest.*',
'LazyBackgroundPageApiTest.*',
'IsolatedAppTest.*',
'PanelMessagingTest.*',
'GeolocationApiTest.*',
'ClipboardApiTest.*',
'ExecuteScriptApiTest.*',
'CalculatorBrowserTest.*',
'ChromeAppAPITest.*',
'AppApiTest.*',
'BlockedAppApiTest.*',
'AppBackgroundPageApiTest.*',
'WebNavigationApiTest.*',
'UsbApiTest.*',
'TabCaptureApiTest.*',
'SystemInfo*.*',
'SyncFileSystemApiTest.*',
'SocketApiTest.*',
'SerialApiTest.*',
'RecordApiTest.*',
'PushMessagingApiTest.*',
'ProxySettingsApiTest.*',
'ExperimentalApiTest.*',
'OmniboxApiTest.*',
'OffscreenTabsApiTest.*',
'NotificationApiTest.*',
'MediaGalleriesPrivateApiTest.*',
'PlatformAppMediaGalleriesBrowserTest.*',
'GetAuthTokenFunctionTest.*',
'LaunchWebAuthFlowFunctionTest.*',
'FileSystemApiTest.*',
'ScriptBadgeApiTest.*',
'PageAsBrowserActionApiTest.*',
'PageActionApiTest.*',
'BrowserActionApiTest.*',
'DownloadExtensionTest.*',
'DnsApiTest.*',
'DeclarativeApiTest.*',
'BluetoothApiTest.*',
'AllUrlsApiTest.*',
# 'src/chrome/browser/nacl_host'
'nacl_host.*',
# 'src/chrome/browser/automation'
'AutomationMiscBrowserTest.*',
# 'src/chrome/browser/autofill'
'FormStructureBrowserTest.*',
'AutofillPopupViewBrowserTest.*',
'AutofillTest.*',
# 'src/chrome/browser/autocomplete'
'AutocompleteBrowserTest.*',
# 'src/chrome/browser/captive_portal'
'CaptivePortalBrowserTest.*',
# 'src/chrome/browser/geolocation'
'GeolocationAccessTokenStoreTest.*',
'GeolocationBrowserTest.*',
# 'src/chrome/browser/nacl_host'
'NaClGdbTest.*',
# 'src/chrome/browser/devtools'
'DevToolsSanityTest.*',
'DevToolsExtensionTest.*',
'DevToolsExperimentalExtensionTest.*',
'WorkerDevToolsSanityTest.*',
# 'src/chrome/browser/first_run'
'FirstRunBrowserTest.*',
# 'src/chrome/browser/importer'
'ToolbarImporterUtilsTest.*',
# 'src/chrome/browser/page_cycler'
'PageCyclerBrowserTest.*',
'PageCyclerCachedBrowserTest.*',
# 'src/chrome/browser/performance_monitor'
'PerformanceMonitorBrowserTest.*',
'PerformanceMonitorUncleanExitBrowserTest.*',
'PerformanceMonitorSessionRestoreBrowserTest.*',
# 'src/chrome/browser/prerender'
'PrerenderBrowserTest.*',
'PrerenderBrowserTestWithNaCl.*',
'PrerenderBrowserTestWithExtensions.*',
'PrefetchBrowserTest.*',
'PrefetchBrowserTestNoPrefetching.*', ),
},
}
def TerminateSignalHandler(sig, stack):
"""When killed, try and kill our child processes."""
signal.signal(sig, signal.SIG_DFL)
for pid in gChildPIDs:
if 'kill' in os.__all__: # POSIX
os.kill(pid, sig)
else:
subprocess.call(['taskkill.exe', '/PID', str(pid)])
sys.exit(0)
class RunTooLongException(Exception):
"""Thrown when a command runs too long without output."""
pass
class BadUserInput(Exception):
"""Thrown when arguments from the user are incorrectly formatted."""
pass
class RunProgramThread(threading.Thread):
"""A thread to run a subprocess.
We want to print the output of our subprocess in real time, but also
want a timeout if there has been no output for a certain amount of
time. Normal techniques (e.g. loop in select()) aren't cross
platform enough. the function seems simple: "print output of child, kill it
if there is no output by timeout. But it was tricky to get this right
in a x-platform way (see warnings about deadlock on the python
subprocess doc page).
"""
# Constants in our queue
PROGRESS = 0
DONE = 1
def __init__(self, cmd):
super(RunProgramThread, self).__init__()
self._cmd = cmd
self._process = None
self._queue = Queue.Queue()
self._retcode = None
def run(self):
if sys.platform in ('win32', 'cygwin'):
return self._run_windows()
else:
self._run_posix()
def _run_windows(self):
# We need to save stdout to a temporary file because of a bug on the
# windows implementation of python which can deadlock while waiting
# for the IO to complete while writing to the PIPE and the pipe waiting
# on us and us waiting on the child process.
stdout_file = tempfile.TemporaryFile()
try:
self._process = subprocess.Popen(self._cmd,
stdin=subprocess.PIPE,
stdout=stdout_file,
stderr=subprocess.STDOUT)
gChildPIDs.append(self._process.pid)
try:
# To make sure that the buildbot don't kill us if we run too long
# without any activity on the console output, we look for progress in
# the length of the temporary file and we print what was accumulated so
# far to the output console to make the buildbot know we are making some
# progress.
previous_tell = 0
# We will poll the process until we get a non-None return code.
self._retcode = None
while self._retcode is None:
self._retcode = self._process.poll()
current_tell = stdout_file.tell()
if current_tell > previous_tell:
# Report progress to our main thread so we don't timeout.
self._queue.put(RunProgramThread.PROGRESS)
# And print what was accumulated to far.
stdout_file.seek(previous_tell)
print stdout_file.read(current_tell - previous_tell),
previous_tell = current_tell
# Don't be selfish, let other threads do stuff while we wait for
# the process to complete.
time.sleep(0.5)
# OK, the child process has exited, let's print its output to our
# console to create debugging logs in case they get to be needed.
stdout_file.flush()
stdout_file.seek(previous_tell)
print stdout_file.read(stdout_file.tell() - previous_tell)
except IOError, e:
logging.exception('%s', e)
pass
finally:
stdout_file.close()
# If we get here the process is done.
gChildPIDs.remove(self._process.pid)
self._queue.put(RunProgramThread.DONE)
def _run_posix(self):
"""No deadlock problem so use the simple answer. The windows solution
appears to add extra buffering which we don't want on other platforms."""
self._process = subprocess.Popen(self._cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
gChildPIDs.append(self._process.pid)
try:
while True:
line = self._process.stdout.readline()
if not line: # EOF
break
print line,
self._queue.put(RunProgramThread.PROGRESS, True)
except IOError:
pass
# If we get here the process is done.
gChildPIDs.remove(self._process.pid)
self._queue.put(RunProgramThread.DONE)
def stop(self):
self.kill()
def kill(self):
"""Kill our running process if needed. Wait for kill to complete.
Should be called in the PARENT thread; we do not self-kill.
Returns the return code of the process.
Safe to call even if the process is dead.
"""
if not self._process:
return self.retcode()
if 'kill' in os.__all__: # POSIX
os.kill(self._process.pid, signal.SIGKILL)
else:
subprocess.call(['taskkill.exe', '/PID', str(self._process.pid)])
return self.retcode()
def retcode(self):
"""Return the return value of the subprocess.
Waits for process to die but does NOT kill it explicitly.
"""
if self._retcode == None: # must be none, not 0/False
self._retcode = self._process.wait()
return self._retcode
def RunUntilCompletion(self, timeout):
"""Run thread until completion or timeout (in seconds).
Start the thread. Let it run until completion, or until we've
spent TIMEOUT without seeing output. On timeout throw
RunTooLongException.
"""
self.start()
while True:
try:
x = self._queue.get(True, timeout)
if x == RunProgramThread.DONE:
return self.retcode()
except Queue.Empty, e: # timed out
logging.info('TIMEOUT (%d seconds exceeded with no output): killing' %
timeout)
self.kill()
raise RunTooLongException()
class Coverage(object):
"""Doitall class for code coverage."""
def __init__(self, options, args):
super(Coverage, self).__init__()
logging.basicConfig(level=logging.DEBUG)
self.directory = options.directory
self.options = options
self.args = args
self.ConfirmDirectory()
self.directory_parent = os.path.dirname(self.directory)
self.output_directory = os.path.join(self.directory, 'coverage')
if not os.path.exists(self.output_directory):
os.mkdir(self.output_directory)
# The "final" lcov-format file
self.coverage_info_file = os.path.join(self.directory, 'coverage.info')
# If needed, an intermediate VSTS-format file
self.vsts_output = os.path.join(self.directory, 'coverage.vsts')
# Needed for Windows.
self.src_root = options.src_root
self.FindPrograms()
self.ConfirmPlatformAndPaths()
self.tests = [] # This can be a list of strings, lists or both.
self.xvfb_pid = 0
self.test_files = [] # List of files with test specifications.
self.test_filters = {} # Mapping from testname->--gtest_filter arg.
logging.info('self.directory: ' + self.directory)
logging.info('self.directory_parent: ' + self.directory_parent)
def FindInPath(self, program):
"""Find program in our path. Return abs path to it, or None."""
if not 'PATH' in os.environ:
logging.fatal('No PATH environment variable?')
sys.exit(1)
paths = os.environ['PATH'].split(os.pathsep)
for path in paths:
fullpath = os.path.join(path, program)
if os.path.exists(fullpath):
return fullpath
return None
def FindPrograms(self):
"""Find programs we may want to run."""
if self.IsPosix():
self.lcov_directory = os.path.join(sys.path[0],
'../../third_party/lcov/bin')
self.lcov = os.path.join(self.lcov_directory, 'lcov')
self.mcov = os.path.join(self.lcov_directory, 'mcov')
self.genhtml = os.path.join(self.lcov_directory, 'genhtml')
self.programs = [self.lcov, self.mcov, self.genhtml]
else:
# Hack to get the buildbot working.
os.environ['PATH'] += r';c:\coverage\coverage_analyzer'
os.environ['PATH'] += r';c:\coverage\performance_tools'
# (end hack)
commands = ['vsperfcmd.exe', 'vsinstr.exe', 'coverage_analyzer.exe']
self.perf = self.FindInPath('vsperfcmd.exe')
self.instrument = self.FindInPath('vsinstr.exe')
self.analyzer = self.FindInPath('coverage_analyzer.exe')
if not self.perf or not self.instrument or not self.analyzer:
logging.fatal('Could not find Win performance commands.')
logging.fatal('Commands needed in PATH: ' + str(commands))
sys.exit(1)
self.programs = [self.perf, self.instrument, self.analyzer]
def PlatformBuildPrefix(self):
"""Return a platform specific build directory prefix.
This prefix is prepended to the build target (Debug, Release) to
identify output as relative to the build directory.
These values are specific to Chromium's use of gyp.
"""
if self.IsMac():
return '../xcodebuild'
if self.IsWindows():
return ''
else: # Linux
return '../out' # assumes make, unlike runtest.py
def ConfirmDirectory(self):
"""Confirm correctness of self.directory.
If it exists, happiness. If not, try and figure it out in a
manner similar to FindBundlesFile(). The 'figure it out' case
happens with buildbot where the directory isn't specified
explicitly.
"""
if (not self.directory and
not (self.options.target and self.options.build_dir)):
logging.fatal('Must use --directory or (--target and --build-dir)')
sys.exit(1)
if not self.directory:
self.directory = os.path.join(self.options.build_dir,
self.PlatformBuildPrefix(),
self.options.target)
if os.path.exists(self.directory):
logging.info('Directory: ' + self.directory)
return
else:
logging.fatal('Directory ' +
self.directory + ' doesn\'t exist')
sys.exit(1)
def FindBundlesFile(self):
"""Find the bundlesfile.
The 'bundles' file can be either absolute path, or (if we are run
from buildbot) we need to find it based on other hints (--target,
--build-dir, etc).
"""
# If no bundle file, no problem!
if not self.options.bundles:
return
# If true, we're buildbot. Form a path.
# Else assume absolute.
if self.options.target and self.options.build_dir:
fullpath = os.path.join(self.options.build_dir,
self.PlatformBuildPrefix(),
self.options.target,
self.options.bundles)
self.options.bundles = fullpath
if os.path.exists(self.options.bundles):
logging.info('BundlesFile: ' + self.options.bundles)
return
else:
logging.fatal('bundlefile ' +
self.options.bundles + ' doesn\'t exist')
sys.exit(1)
def FindTests(self):
"""Find unit tests to run; set self.tests to this list.
Assume all non-option items in the arg list are tests to be run.
"""
# Before we begin, find the bundles file if not an absolute path.
self.FindBundlesFile()
# Small tests: can be run in the "chromium" directory.
# If asked, run all we can find.
if self.options.all_unittests:
self.tests += glob.glob(os.path.join(self.directory, '*_unittests'))
self.tests += glob.glob(os.path.join(self.directory, '*unit_tests'))
elif self.options.all_browsertests:
# Run all tests in browser_tests and content_browsertests.
self.tests += glob.glob(os.path.join(self.directory, 'browser_tests'))
self.tests += glob.glob(os.path.join(self.directory,
'content_browsertests'))
# Tests can come in as args directly, indirectly (through a file
# of test lists) or as a file of bundles.
all_testnames = self.args[:] # Copy since we might modify
for test_file in self.options.test_files:
f = open(test_file)
for line in f:
line = re.sub(r"#.*$", "", line)
line = re.sub(r"\s*", "", line)
if re.match("\s*$"):
continue
all_testnames.append(line)
f.close()
tests_from_bundles = None
if self.options.bundles:
try:
tests_from_bundles = eval(open(self.options.bundles).read())
except IOError:
logging.fatal('IO error in bundle file ' +
self.options.bundles + ' (doesn\'t exist?)')
except (NameError, SyntaxError):
logging.fatal('Parse or syntax error in bundle file ' +
self.options.bundles)
if hasattr(tests_from_bundles, '__iter__'):
all_testnames += tests_from_bundles
else:
logging.fatal('Fatal error with bundle file; could not get list from' +
self.options.bundles)
sys.exit(1)
# If told explicit tests, run those (after stripping the name as
# appropriate)
for testname in all_testnames:
mo = re.search(r"(.*)\[(.*)\]$", testname)
gtest_filter = None
if mo:
gtest_filter = mo.group(2)
testname = mo.group(1)
if ':' in testname:
testname = testname.split(':')[1]
# We need 'pyautolib' to run pyauto tests and 'pyautolib' itself is not an
# executable. So skip this test from adding into coverage_bundles.py.
if testname == 'pyautolib':
continue
self.tests += [os.path.join(self.directory, testname)]
if gtest_filter:
self.test_filters[testname] = gtest_filter
# Add 'src/test/functional/pyauto_functional.py' to self.tests.
# This file with '-v --suite=CODE_COVERAGE' arguments runs all pyauto tests.
# Pyauto tests are failing randomly on coverage bots. So excluding them.
# self.tests += [['src/chrome/test/functional/pyauto_functional.py',
# '-v',
# '--suite=CODE_COVERAGE']]
# Medium tests?
# Not sure all of these work yet (e.g. page_cycler_tests)
# self.tests += glob.glob(os.path.join(self.directory, '*_tests'))
# If needed, append .exe to tests since vsinstr.exe likes it that
# way.
if self.IsWindows():
for ind in range(len(self.tests)):
test = self.tests[ind]
test_exe = test + '.exe'
if not test.endswith('.exe') and os.path.exists(test_exe):
self.tests[ind] = test_exe
def TrimTests(self):
"""Trim specific tests for each platform."""
if self.IsWindows():
return
# TODO(jrg): remove when not needed
inclusion = ['unit_tests']
keep = []
for test in self.tests:
for i in inclusion:
if i in test:
keep.append(test)
self.tests = keep
logging.info('After trimming tests we have ' + ' '.join(self.tests))
return
if self.IsLinux():
# self.tests = filter(lambda t: t.endswith('base_unittests'), self.tests)
return
if self.IsMac():
exclusion = ['automated_ui_tests']
punted = []
for test in self.tests:
for e in exclusion:
if test.endswith(e):
punted.append(test)
self.tests = filter(lambda t: t not in punted, self.tests)
if punted:
logging.info('Tests trimmed out: ' + str(punted))
def ConfirmPlatformAndPaths(self):
"""Confirm OS and paths (e.g. lcov)."""
for program in self.programs:
if not os.path.exists(program):
logging.fatal('Program missing: ' + program)
sys.exit(1)
def Run(self, cmdlist, ignore_error=False, ignore_retcode=None,
explanation=None):
"""Run the command list; exit fatally on error.
Args:
cmdlist: a list of commands (e.g. to pass to subprocess.call)
ignore_error: if True log an error; if False then exit.
ignore_retcode: if retcode is non-zero, exit unless we ignore.
Returns: process return code.
Throws: RunTooLongException if the process does not produce output
within TIMEOUT seconds; timeout is specified as a command line
option to the Coverage class and is set on init.
"""
logging.info('Running ' + str(cmdlist))
t = RunProgramThread(cmdlist)
retcode = t.RunUntilCompletion(self.options.timeout)
if retcode:
if ignore_error or retcode == ignore_retcode:
logging.warning('COVERAGE: %s unhappy but errors ignored %s' %
(str(cmdlist), explanation or ''))
else:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(str(cmdlist), retcode))
sys.exit(retcode)
return retcode
def IsPosix(self):
"""Return True if we are POSIX."""
return self.IsMac() or self.IsLinux()
def IsMac(self):
return sys.platform == 'darwin'
def IsLinux(self):
return sys.platform.startswith('linux')
def IsWindows(self):
"""Return True if we are Windows."""
return sys.platform in ('win32', 'cygwin')
def ClearData(self):
"""Clear old gcda files and old coverage info files."""
if self.options.dont_clear_coverage_data:
print 'Clearing of coverage data NOT performed.'
return
print 'Clearing coverage data from previous runs.'
if os.path.exists(self.coverage_info_file):
os.remove(self.coverage_info_file)
if self.IsPosix():
subprocess.call([self.lcov,
'--directory', self.directory_parent,
'--zerocounters'])
shutil.rmtree(os.path.join(self.directory, 'coverage'))
if self.options.all_unittests:
if os.path.exists(os.path.join(self.directory, 'unittests_coverage')):
shutil.rmtree(os.path.join(self.directory, 'unittests_coverage'))
elif self.options.all_browsertests:
if os.path.exists(os.path.join(self.directory,
'browsertests_coverage')):
shutil.rmtree(os.path.join(self.directory, 'browsertests_coverage'))
else:
if os.path.exists(os.path.join(self.directory, 'total_coverage')):
shutil.rmtree(os.path.join(self.directory, 'total_coverage'))
def BeforeRunOneTest(self, testname):
"""Do things before running each test."""
if not self.IsWindows():
return
# Stop old counters if needed
cmdlist = [self.perf, '-shutdown']
self.Run(cmdlist, ignore_error=True)
# Instrument binaries
for fulltest in self.tests:
if os.path.exists(fulltest):
# See http://support.microsoft.com/kb/939818 for details on args
cmdlist = [self.instrument, '/d:ignorecverr', '/COVERAGE', fulltest]
self.Run(cmdlist, ignore_retcode=4,
explanation='OK with a multiple-instrument')
# Start new counters
cmdlist = [self.perf, '-start:coverage', '-output:' + self.vsts_output]
self.Run(cmdlist)
def BeforeRunAllTests(self):
"""Called right before we run all tests."""
if self.IsLinux() and self.options.xvfb:
self.StartXvfb()
def GtestFilter(self, fulltest, excl=None):
"""Return a --gtest_filter=BLAH for this test.
Args:
fulltest: full name of test executable
exclusions: the exclusions list. Only set in a unit test;
else uses gTestExclusions.
Returns:
String of the form '--gtest_filter=BLAH', or None.
"""
positive_gfilter_list = []
negative_gfilter_list = []
# Exclude all flaky, failing, disabled and maybe tests;
# they don't count for code coverage.
negative_gfilter_list += ('*.FLAKY_*', '*.FAILS_*',
'*.DISABLED_*', '*.MAYBE_*')
if not self.options.no_exclusions:
exclusions = excl or gTestExclusions
excldict = exclusions.get(sys.platform)
if excldict:
for test in excldict.keys():
# example: if base_unittests in ../blah/blah/base_unittests.exe
if test in fulltest:
negative_gfilter_list += excldict[test]
inclusions = gTestInclusions
include_dict = inclusions.get(sys.platform)
if include_dict:
for test in include_dict.keys():
if test in fulltest:
positive_gfilter_list += include_dict[test]
fulltest_basename = os.path.basename(fulltest)
if fulltest_basename in self.test_filters:
specific_test_filters = self.test_filters[fulltest_basename].split('-')
if len(specific_test_filters) > 2:
logging.error('Multiple "-" symbols in filter list: %s' %
self.test_filters[fulltest_basename])
raise BadUserInput()
if len(specific_test_filters) == 2:
# Remove trailing ':'
specific_test_filters[0] = specific_test_filters[0][:-1]
if specific_test_filters[0]: # Test for no positive filters.
positive_gfilter_list += specific_test_filters[0].split(':')
if len(specific_test_filters) > 1:
negative_gfilter_list += specific_test_filters[1].split(':')
if not positive_gfilter_list and not negative_gfilter_list:
return None
result = '--gtest_filter='
if positive_gfilter_list:
result += ':'.join(positive_gfilter_list)
if negative_gfilter_list:
if positive_gfilter_list: result += ':'
result += '-' + ':'.join(negative_gfilter_list)
return result
def RunTests(self):
"""Run all unit tests and generate appropriate lcov files."""
self.BeforeRunAllTests()
for fulltest in self.tests:
if type(fulltest) is str:
if not os.path.exists(fulltest):
logging.info(fulltest + ' does not exist')
if self.options.strict:
sys.exit(2)
else:
logging.info('%s path exists' % fulltest)
cmdlist = [fulltest, '--gtest_print_time']
# If asked, make this REAL fast for testing.
if self.options.fast_test:
logging.info('Running as a FAST test for testing')
# cmdlist.append('--gtest_filter=RenderWidgetHost*')
# cmdlist.append('--gtest_filter=CommandLine*')
cmdlist.append('--gtest_filter=C*')
# Possibly add a test-specific --gtest_filter
filter = self.GtestFilter(fulltest)
if filter:
cmdlist.append(filter)
elif type(fulltest) is list:
cmdlist = fulltest
self.BeforeRunOneTest(fulltest)
logging.info('Running test ' + str(cmdlist))
try:
retcode = self.Run(cmdlist, ignore_retcode=True)
except SystemExit: # e.g. sys.exit() was called somewhere in here
raise
except: # can't "except WindowsError" since script runs on non-Windows
logging.info('EXCEPTION while running a unit test')
logging.info(traceback.format_exc())
retcode = 999
self.AfterRunOneTest(fulltest)
if retcode:
logging.info('COVERAGE: test %s failed; return code: %d.' %
(fulltest, retcode))
if self.options.strict:
logging.fatal('Test failure is fatal.')
sys.exit(retcode)
self.AfterRunAllTests()
def AfterRunOneTest(self, testname):
"""Do things right after running each test."""
if not self.IsWindows():
return
# Stop counters
cmdlist = [self.perf, '-shutdown']
self.Run(cmdlist)
full_output = self.vsts_output + '.coverage'
shutil.move(full_output, self.vsts_output)
# generate lcov!
self.GenerateLcovWindows(testname)
def AfterRunAllTests(self):
"""Do things right after running ALL tests."""
# On POSIX we can do it all at once without running out of memory.
# This contrasts with Windows where we must do it after each test.
if self.IsPosix():
self.GenerateLcovPosix()
# Only on Linux do we have the Xvfb step.
if self.IsLinux() and self.options.xvfb:
self.StopXvfb()
def StartXvfb(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from http://src.chromium.org/viewvc/chrome/trunk/tools/buildbot/
scripts/slave/slave_utils.py?view=markup
with some simplifications (e.g. no need to use xdisplaycheck, save
pid in var not file, etc)
"""
logging.info('Xvfb: starting')
proc = subprocess.Popen(["Xvfb", ":9", "-screen", "0", "1024x768x24",
"-ac"],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.xvfb_pid = proc.pid
if not self.xvfb_pid:
logging.info('Could not start Xvfb')
return
os.environ['DISPLAY'] = ":9"
# Now confirm, giving a chance for it to start if needed.
logging.info('Xvfb: confirming')
for test in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
logging.info('Warning: could not confirm Xvfb happiness')
else:
logging.info('Xvfb: OK')
def StopXvfb(self):
"""Stop Xvfb if needed. Linux only."""
if self.xvfb_pid:
logging.info('Xvfb: killing')
try:
os.kill(self.xvfb_pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self.xvfb_pid = 0
def CopyCoverageFileToDestination(self, coverage_folder):
coverage_dir = os.path.join(self.directory, coverage_folder)
if not os.path.exists(coverage_dir):
os.makedirs(coverage_dir)
shutil.copyfile(self.coverage_info_file, os.path.join(coverage_dir,
'coverage.info'))
def GenerateLcovPosix(self):
"""Convert profile data to lcov on Mac or Linux."""
start_dir = os.getcwd()
logging.info('GenerateLcovPosix: start_dir=' + start_dir)
if self.IsLinux():
# With Linux/make (e.g. the coverage_run target), the current
# directory for this command is .../build/src/chrome but we need
# to be in .../build/src for the relative path of source files
# to be correct. However, when run from buildbot, the current
# directory is .../build. Accommodate.
# On Mac source files are compiled with abs paths so this isn't
# a problem.
# This is a bit of a hack. The best answer is to require this
# script be run in a specific directory for all cases (from
# Makefile or from buildbot).
if start_dir.endswith('chrome'):
logging.info('coverage_posix.py: doing a "cd .." '
'to accomodate Linux/make PWD')
os.chdir('..')
elif start_dir.endswith('build'):
logging.info('coverage_posix.py: doing a "cd src" '
'to accomodate buildbot PWD')
os.chdir('src')
else:
logging.info('coverage_posix.py: NOT changing directory.')
elif self.IsMac():
pass
command = [self.mcov,
'--directory',
os.path.join(start_dir, self.directory_parent),
'--output',
os.path.join(start_dir, self.coverage_info_file)]
logging.info('Assembly command: ' + ' '.join(command))
retcode = subprocess.call(command)
if retcode:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(command[0], retcode))
if self.options.strict:
sys.exit(retcode)
if self.IsLinux():
os.chdir(start_dir)
# Copy the unittests coverage information to a different folder.
if self.options.all_unittests:
self.CopyCoverageFileToDestination('unittests_coverage')
elif self.options.all_browsertests:
# Save browsertests only coverage information.
self.CopyCoverageFileToDestination('browsertests_coverage')
else:
# Save the overall coverage information.
self.CopyCoverageFileToDestination('total_coverage')
if not os.path.exists(self.coverage_info_file):
logging.fatal('%s was not created. Coverage run failed.' %
self.coverage_info_file)
sys.exit(1)
def GenerateLcovWindows(self, testname=None):
"""Convert VSTS format to lcov. Appends coverage data to sum file."""
lcov_file = self.vsts_output + '.lcov'
if os.path.exists(lcov_file):
os.remove(lcov_file)
# generates the file (self.vsts_output + ".lcov")
cmdlist = [self.analyzer,
'-sym_path=' + self.directory,
'-src_root=' + self.src_root,
'-noxml',
self.vsts_output]
self.Run(cmdlist)
if not os.path.exists(lcov_file):
logging.fatal('Output file %s not created' % lcov_file)
sys.exit(1)
logging.info('Appending lcov for test %s to %s' %
(testname, self.coverage_info_file))
size_before = 0
if os.path.exists(self.coverage_info_file):
size_before = os.stat(self.coverage_info_file).st_size
src = open(lcov_file, 'r')
dst = open(self.coverage_info_file, 'a')
dst.write(src.read())
src.close()
dst.close()
size_after = os.stat(self.coverage_info_file).st_size
logging.info('Lcov file growth for %s: %d --> %d' %
(self.coverage_info_file, size_before, size_after))
def GenerateHtml(self):
"""Convert lcov to html."""
# TODO(jrg): This isn't happy when run with unit_tests since V8 has a
# different "base" so V8 includes can't be found in ".". Fix.
command = [self.genhtml,
self.coverage_info_file,
'--output-directory',
self.output_directory]
print >>sys.stderr, 'html generation command: ' + ' '.join(command)
retcode = subprocess.call(command)
if retcode:
logging.fatal('COVERAGE: %s failed; return code: %d' %
(command[0], retcode))
if self.options.strict:
sys.exit(retcode)
def CoverageOptionParser():
"""Return an optparse.OptionParser() suitable for Coverage object creation."""
parser = optparse.OptionParser()
parser.add_option('-d',
'--directory',
dest='directory',
default=None,
help='Directory of unit test files')
parser.add_option('-a',
'--all_unittests',
dest='all_unittests',
default=False,
help='Run all tests we can find (*_unittests)')
parser.add_option('-b',
'--all_browsertests',
dest='all_browsertests',
default=False,
help='Run all tests in browser_tests '
'and content_browsertests')
parser.add_option('-g',
'--genhtml',
dest='genhtml',
default=False,
help='Generate html from lcov output')
parser.add_option('-f',
'--fast_test',
dest='fast_test',
default=False,
help='Make the tests run REAL fast by doing little.')
parser.add_option('-s',
'--strict',
dest='strict',
default=False,
help='Be strict and die on test failure.')
parser.add_option('-S',
'--src_root',
dest='src_root',
default='.',
help='Source root (only used on Windows)')
parser.add_option('-t',
'--trim',
dest='trim',
default=True,
help='Trim out tests? Default True.')
parser.add_option('-x',
'--xvfb',
dest='xvfb',
default=True,
help='Use Xvfb for tests? Default True.')
parser.add_option('-T',
'--timeout',
dest='timeout',
default=5.0 * 60.0,
type="int",
help='Timeout before bailing if a subprocess has no output.'
' Default is 5min (Buildbot is 10min.)')
parser.add_option('-B',
'--bundles',
dest='bundles',
default=None,
help='Filename of bundles for coverage.')
parser.add_option('--build-dir',
dest='build_dir',
default=None,
help=('Working directory for buildbot build.'
'used for finding bundlefile.'))
parser.add_option('--target',
dest='target',
default=None,
help=('Buildbot build target; '
'used for finding bundlefile (e.g. Debug)'))
parser.add_option('--no_exclusions',
dest='no_exclusions',
default=None,
help=('Disable the exclusion list.'))
parser.add_option('--dont-clear-coverage-data',
dest='dont_clear_coverage_data',
default=False,
action='store_true',
help=('Turn off clearing of cov data from a prev run'))
parser.add_option('-F',
'--test-file',
dest="test_files",
default=[],
action='append',
help=('Specify a file from which tests to be run will ' +
'be extracted'))
return parser
def main():
# Print out the args to help someone do it by hand if needed
print >>sys.stderr, sys.argv
# Try and clean up nice if we're killed by buildbot, Ctrl-C, ...
signal.signal(signal.SIGINT, TerminateSignalHandler)
signal.signal(signal.SIGTERM, TerminateSignalHandler)
parser = CoverageOptionParser()
(options, args) = parser.parse_args()
if options.all_unittests and options.all_browsertests:
print 'Error! Can not have all_unittests and all_browsertests together!'
sys.exit(1)
coverage = Coverage(options, args)
coverage.ClearData()
coverage.FindTests()
if options.trim:
coverage.TrimTests()
coverage.RunTests()
if options.genhtml:
coverage.GenerateHtml()
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
w1ll1am23/home-assistant | tests/components/plex/test_device_handling.py | 4 | 4613 | """Tests for handling the device registry."""
from homeassistant.components.media_player.const import DOMAIN as MP_DOMAIN
from homeassistant.components.plex.const import DOMAIN
async def test_cleanup_orphaned_devices(hass, entry, setup_plex_server):
"""Test cleaning up orphaned devices on startup."""
test_device_id = {(DOMAIN, "temporary_device_123")}
device_registry = await hass.helpers.device_registry.async_get_registry()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
test_device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=test_device_id,
)
assert test_device is not None
test_entity = entity_registry.async_get_or_create(
MP_DOMAIN, DOMAIN, "entity_unique_id_123", device_id=test_device.id
)
assert test_entity is not None
# Ensure device is not removed with an entity
await setup_plex_server()
device = device_registry.async_get_device(identifiers=test_device_id)
assert device is not None
await hass.config_entries.async_unload(entry.entry_id)
# Ensure device is removed without an entity
entity_registry.async_remove(test_entity.entity_id)
await setup_plex_server()
device = device_registry.async_get_device(identifiers=test_device_id)
assert device is None
async def test_migrate_transient_devices(
hass, entry, setup_plex_server, requests_mock, player_plexweb_resources
):
"""Test cleaning up transient devices on startup."""
plexweb_device_id = {(DOMAIN, "plexweb_id")}
non_plexweb_device_id = {(DOMAIN, "1234567890123456-com-plexapp-android")}
plex_client_service_device_id = {(DOMAIN, "plex.tv-clients")}
device_registry = await hass.helpers.device_registry.async_get_registry()
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Pre-create devices and entities to test device migration
plexweb_device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=plexweb_device_id,
model="Plex Web",
)
# plexweb_entity = entity_registry.async_get_or_create(MP_DOMAIN, DOMAIN, "unique_id_123:plexweb_id", suggested_object_id="plex_plex_web_chrome", device_id=plexweb_device.id)
entity_registry.async_get_or_create(
MP_DOMAIN,
DOMAIN,
"unique_id_123:plexweb_id",
suggested_object_id="plex_plex_web_chrome",
device_id=plexweb_device.id,
)
non_plexweb_device = device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers=non_plexweb_device_id,
model="Plex for Android (TV)",
)
entity_registry.async_get_or_create(
MP_DOMAIN,
DOMAIN,
"unique_id_123:1234567890123456-com-plexapp-android",
suggested_object_id="plex_plex_for_android_tv_shield_android_tv",
device_id=non_plexweb_device.id,
)
# Ensure the Plex Web client is available
requests_mock.get("/resources", text=player_plexweb_resources)
plexweb_device = device_registry.async_get_device(identifiers=plexweb_device_id)
non_plexweb_device = device_registry.async_get_device(
identifiers=non_plexweb_device_id
)
plex_service_device = device_registry.async_get_device(
identifiers=plex_client_service_device_id
)
assert (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id=plexweb_device.id
)
)
== 1
)
assert (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id=non_plexweb_device.id
)
)
== 1
)
assert plex_service_device is None
# Ensure Plex Web entity is migrated to a service
await setup_plex_server()
plex_service_device = device_registry.async_get_device(
identifiers=plex_client_service_device_id
)
assert (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id=plexweb_device.id
)
)
== 0
)
assert (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id=non_plexweb_device.id
)
)
== 1
)
assert (
len(
hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id=plex_service_device.id
)
)
== 1
)
| apache-2.0 |
j0holo/library-management-system | app.py | 1 | 2905 | import os
from flask import Flask, render_template, request
from flask import redirect, url_for, flash
from models import *
app = Flask(__name__)
app.config.from_object(__name__)
app.config.update(dict(
SECRET_KEY='development key',
# USER='development',
# PASSWORD='devpassword',
# DATABASE='devdatabase',
# CHARSET='utf8',
# HOST=os.getenv('DB_HOST', 'localhost'),
DEBUG=True
))
@app.before_request
def before_request():
db.connect()
@app.after_request
def after_request(response):
db.close()
return response
@app.route('/admin/publisher')
def view_publishers():
publishers = Publisher.select_all()
return render_template('admin_publisher.html',
publishers=publishers)
@app.route('/admin/publisher/update/<int:publisher_id>',
methods=['GET', 'POST'])
def update_publisher(publisher_id):
if request.method == 'POST':
publisher = Publisher.update_selected(
publisher_id,
request.form['name'],
request.form['city']
)
if publisher:
return redirect(url_for('view_publishers'))
elif publisher is None:
flash("Publisher %d does not exist" % publisher_id)
return redirect(url_for('view_publishers'))
else:
flash("Input should be less than 266 characters")
return render_template('update_publisher.html',
publisher_id=publisher_id)
else:
try:
publisher = Publisher.get(Publisher.id == publisher_id)
except Publisher.DoesNotExist:
flash("Publisher %d does not exist" % publisher_id)
return redirect(url_for('view_publishers'))
return render_template('update_publisher.html',
publisher_id=publisher_id)
@app.route('/admin/publisher/add', methods=['GET', 'POST'])
def add_new_publisher():
if request.method == 'POST':
if request.form['name'] and request.form['city']:
Publisher.add_publisher(request.form['name'],
request.form['city'])
return redirect(url_for('view_publishers'))
else:
flash("Both fields are required")
return render_template('add_publisher.html')
@app.route('/admin/publisher/delete/<int:publisher_id>')
def delete_publisher(publisher_id):
if Publisher.delete_selected(publisher_id):
flash("Publisher %d has been deleted" % publisher_id)
return redirect(url_for('view_publishers'))
flash("Publisher %d does not exist" % publisher_id)
return redirect(url_for('view_publishers'))
if __name__ == '__main__':
db.init(host=os.getenv('DB_HOST', 'localhost'),
user='development',
password='devpassword',
database='devdatabase',
charset='utf8')
app.run()
| mit |
pannal/Subliminal.bundle | Contents/Libraries/Shared/babelfish/converters/countryname.py | 89 | 1077 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from . import CountryReverseConverter, CaseInsensitiveDict
from ..country import COUNTRY_MATRIX
from ..exceptions import CountryConvertError, CountryReverseError
class CountryNameConverter(CountryReverseConverter):
def __init__(self):
self.codes = set()
self.to_name = {}
self.from_name = CaseInsensitiveDict()
for country in COUNTRY_MATRIX:
self.codes.add(country.name)
self.to_name[country.alpha2] = country.name
self.from_name[country.name] = country.alpha2
def convert(self, alpha2):
if alpha2 not in self.to_name:
raise CountryConvertError(alpha2)
return self.to_name[alpha2]
def reverse(self, name):
if name not in self.from_name:
raise CountryReverseError(name)
return self.from_name[name]
| mit |
simpeg/simpegpf | simpegPF/MagAnalytics.py | 1 | 9270 | from scipy.constants import mu_0
from SimPEG import *
from SimPEG.Utils import kron3, speye, sdiag
import matplotlib.pyplot as plt
def spheremodel(mesh, x0, y0, z0, r):
"""
Generate model indicies for sphere
- (x0, y0, z0 ): is the center location of sphere
- r: is the radius of the sphere
- it returns logical indicies of cell-center model
"""
ind = np.sqrt( (mesh.gridCC[:,0]-x0)**2+(mesh.gridCC[:,1]-y0)**2+(mesh.gridCC[:,2]-z0)**2 ) < r
return ind
def MagSphereAnaFun(x, y, z, R, x0, y0, z0, mu1, mu2, H0, flag='total'):
"""
test
Analytic function for Magnetics problem. The set up here is
magnetic sphere in whole-space assuming that the inducing field is oriented in the x-direction.
* (x0,y0,z0)
* (x0, y0, z0 ): is the center location of sphere
* r: is the radius of the sphere
.. math::
\mathbf{H}_0 = H_0\hat{x}
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
dim = x.shape
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
ind = np.sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2 ) < R
r = Utils.mkvc(np.sqrt((x-x0)**2+(y-y0)**2+(z-z0)**2 ))
Bx = np.zeros(x.size)
By = np.zeros(x.size)
Bz = np.zeros(x.size)
# Inside of the sphere
rf2 = 3*mu1/(mu2+2*mu1)
if flag is 'total' and any(ind):
Bx[ind] = mu2*H0*(rf2)
elif (flag == 'secondary'):
Bx[ind] = mu2*H0*(rf2)-mu1*H0
By[ind] = 0.
Bz[ind] = 0.
# Outside of the sphere
rf1 = (mu2-mu1)/(mu2+2*mu1)
if (flag == 'total'):
Bx[~ind] = mu1*(H0+H0/r[~ind]**5*(R**3)*rf1*(2*(x[~ind]-x0)**2-(y[~ind]-y0)**2-(z[~ind]-z0)**2))
elif (flag == 'secondary'):
Bx[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(2*(x[~ind]-x0)**2-(y[~ind]-y0)**2-(z[~ind]-z0)**2))
By[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(3*(x[~ind]-x0)*(y[~ind]-y0)))
Bz[~ind] = mu1*(H0/r[~ind]**5*(R**3)*rf1*(3*(x[~ind]-x0)*(z[~ind]-z0)))
return np.reshape(Bx, x.shape, order='F'), np.reshape(By, x.shape, order='F'), np.reshape(Bz, x.shape, order='F')
def CongruousMagBC(mesh, Bo, chi):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
* mesh: Mesh class
* Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
* chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi} \\frac{m}{ \| \\vec{r} - \\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
ind = chi > 0.
V = mesh.vol[ind].sum()
gamma = 1/V*(chi*mesh.vol).sum() # like a mass!
Bot = np.sqrt(sum(Bo**2))
mx = Bo[0]/Bot
my = Bo[1]/Bot
mz = Bo[2]/Bot
mom = 1/mu_0*Bot*gamma*V/(1+gamma/3)
xc = sum(chi[ind]*mesh.gridCC[:,0][ind])/sum(chi[ind])
yc = sum(chi[ind]*mesh.gridCC[:,1][ind])/sum(chi[ind])
zc = sum(chi[ind]*mesh.gridCC[:,2][ind])/sum(chi[ind])
indxd, indxu, indyd, indyu, indzd, indzu = mesh.faceBoundaryInd
const = mu_0/(4*np.pi)*mom
rfun = lambda x: np.sqrt((x[:,0]-xc)**2 + (x[:,1]-yc)**2 + (x[:,2]-zc)**2)
mdotrx = (mx*(mesh.gridFx[(indxd|indxu),0]-xc)/rfun(mesh.gridFx[(indxd|indxu),:]) +
my*(mesh.gridFx[(indxd|indxu),1]-yc)/rfun(mesh.gridFx[(indxd|indxu),:]) +
mz*(mesh.gridFx[(indxd|indxu),2]-zc)/rfun(mesh.gridFx[(indxd|indxu),:]))
Bbcx = const/(rfun(mesh.gridFx[(indxd|indxu),:])**3)*(3*mdotrx*(mesh.gridFx[(indxd|indxu),0]-xc)/rfun(mesh.gridFx[(indxd|indxu),:])-mx)
mdotry = (mx*(mesh.gridFy[(indyd|indyu),0]-xc)/rfun(mesh.gridFy[(indyd|indyu),:]) +
my*(mesh.gridFy[(indyd|indyu),1]-yc)/rfun(mesh.gridFy[(indyd|indyu),:]) +
mz*(mesh.gridFy[(indyd|indyu),2]-zc)/rfun(mesh.gridFy[(indyd|indyu),:]))
Bbcy = const/(rfun(mesh.gridFy[(indyd|indyu),:])**3)*(3*mdotry*(mesh.gridFy[(indyd|indyu),1]-yc)/rfun(mesh.gridFy[(indyd|indyu),:])-my)
mdotrz = (mx*(mesh.gridFz[(indzd|indzu),0]-xc)/rfun(mesh.gridFz[(indzd|indzu),:]) +
my*(mesh.gridFz[(indzd|indzu),1]-yc)/rfun(mesh.gridFz[(indzd|indzu),:]) +
mz*(mesh.gridFz[(indzd|indzu),2]-zc)/rfun(mesh.gridFz[(indzd|indzu),:]))
Bbcz = const/(rfun(mesh.gridFz[(indzd|indzu),:])**3)*(3*mdotrz*(mesh.gridFz[(indzd|indzu),2]-zc)/rfun(mesh.gridFz[(indzd|indzu),:])-mz)
return np.r_[Bbcx, Bbcy, Bbcz], (1/gamma-1/(3+gamma))*1/V
def MagSphereAnaFunA(x, y, z, R, xc, yc, zc, chi, Bo, flag):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
mesh: Mesh class
Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
Chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi}\\frac{m}{\| \\vec{r}-\\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
dim = x.shape
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
Bot = np.sqrt(sum(Bo**2))
mx = Bo[0]/Bot
my = Bo[1]/Bot
mz = Bo[2]/Bot
ind = np.sqrt((x-xc)**2+(y-yc)**2+(z-zc)**2 ) < R
Bx = np.zeros(x.size)
By = np.zeros(x.size)
Bz = np.zeros(x.size)
# Inside of the sphere
rf2 = 3/(chi+3)*(1+chi)
if (flag == 'total'):
Bx[ind] = Bo[0]*(rf2)
By[ind] = Bo[1]*(rf2)
Bz[ind] = Bo[2]*(rf2)
elif (flag == 'secondary'):
Bx[ind] = Bo[0]*(rf2)-Bo[0]
By[ind] = Bo[1]*(rf2)-Bo[1]
Bz[ind] = Bo[2]*(rf2)-Bo[2]
r = Utils.mkvc(np.sqrt((x-xc)**2+(y-yc)**2+(z-zc)**2 ))
V = 4*np.pi*R**3/3
mom = Bot/mu_0*chi/(1+chi/3)*V
const = mu_0/(4*np.pi)*mom
mdotr = (mx*(x[~ind]-xc)/r[~ind] + my*(y[~ind]-yc)/r[~ind] + mz*(z[~ind]-zc)/r[~ind])
Bx[~ind] = const/(r[~ind]**3)*(3*mdotr*(x[~ind]-xc)/r[~ind]-mx)
By[~ind] = const/(r[~ind]**3)*(3*mdotr*(y[~ind]-yc)/r[~ind]-my)
Bz[~ind] = const/(r[~ind]**3)*(3*mdotr*(z[~ind]-zc)/r[~ind]-mz)
return Bx, By, Bz
def IDTtoxyz(Inc, Dec, Btot):
"""
Convert from Inclination, Declination, Total intensity of earth field to x, y, z
"""
Bx = Btot*np.cos(Inc/180.*np.pi)*np.sin(Dec/180.*np.pi)
By = Btot*np.cos(Inc/180.*np.pi)*np.cos(Dec/180.*np.pi)
Bz = -Btot*np.sin(Inc/180.*np.pi)
return np.r_[Bx, By, Bz]
def MagSphereFreeSpace(x, y, z, R, xc, yc, zc, chi, Bo):
"""
Computing boundary condition using Congrous sphere method.
This is designed for secondary field formulation.
>> Input
mesh: Mesh class
Bo: np.array([Box, Boy, Boz]): Primary magnetic flux
Chi: susceptibility at cell volume
.. math::
\\vec{B}(r) = \\frac{\mu_0}{4\pi}\\frac{m}{\| \\vec{r}-\\vec{r}_0\|^3}[3\hat{m}\cdot\hat{r}-\hat{m}]
"""
if (~np.size(x)==np.size(y)==np.size(z)):
print "Specify same size of x, y, z"
return
x = Utils.mkvc(x)
y = Utils.mkvc(y)
z = Utils.mkvc(z)
nobs = len(x)
Bot = np.sqrt(sum(Bo**2))
mx = np.ones([nobs]) * Bo[0,0] * R**3 / 3. * chi
my = np.ones([nobs]) * Bo[0,1] * R**3 / 3. * chi
mz = np.ones([nobs]) * Bo[0,2] * R**3 / 3. * chi
M = np.c_[mx, my, mz]
rx = (x - xc)
ry = (y - yc)
rz = (zc - z)
rvec = np.c_[rx, ry, rz]
r = np.sqrt((rx)**2+(ry)**2+(rz)**2 )
B = -Utils.sdiag(1./r**3)*M + Utils.sdiag((3 * np.sum(M*rvec,axis=1))/r**5)*rvec
Bx = B[:,0]
By = B[:,1]
Bz = B[:,2]
return Bx, By, Bz
if __name__ == '__main__':
hxind = [(0,25,1.3),(21, 12.5),(0,25,1.3)]
hyind = [(0,25,1.3),(21, 12.5),(0,25,1.3)]
hzind = [(0,25,1.3),(20, 12.5),(0,25,1.3)]
# hx, hy, hz = Utils.meshTensors(hxind, hyind, hzind)
M3 = Mesh.TensorMesh([hxind, hyind, hzind], "CCC")
indxd, indxu, indyd, indyu, indzd, indzu = M3.faceBoundaryInd
mu0 = 4*np.pi*1e-7
chibkg = 0.
chiblk = 0.01
chi = np.ones(M3.nC)*chibkg
sph_ind = spheremodel(M3, 0, 0, 0, 100)
chi[sph_ind] = chiblk
mu = (1.+chi)*mu0
Bbc, const = CongruousMagBC(M3, np.array([1., 0., 0.]), chi)
flag = 'secondary'
Box = 1.
H0 = Box/mu_0
Bbcxx, Bbcxy, Bbcxz = MagSphereAnaFun(M3.gridFx[(indxd|indxu),0], M3.gridFx[(indxd|indxu),1], M3.gridFx[(indxd|indxu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbcyx, Bbcyy, Bbcyz = MagSphereAnaFun(M3.gridFy[(indyd|indyu),0], M3.gridFy[(indyd|indyu),1], M3.gridFy[(indyd|indyu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbczx, Bbczy, Bbczz = MagSphereAnaFun(M3.gridFz[(indzd|indzu),0], M3.gridFz[(indzd|indzu),1], M3.gridFz[(indzd|indzu),2], 100, 0., 0., 0., mu_0, mu_0*(1+chiblk), H0, flag)
Bbc_ana = np.r_[Bbcxx, Bbcyy, Bbczz]
# fig, ax = plt.subplots(1,1, figsize = (10, 10))
# ax.plot(Bbc_ana)
# ax.plot(Bbc)
# plt.show()
err = np.linalg.norm(Bbc-Bbc_ana)/np.linalg.norm(Bbc_ana)
if err < 0.1:
print 'Mag Boundary computation is valid, err = ', err
else:
print 'Mag Boundary computation is wrong!!, err = ', err
pass
| mit |
ubirch/aws-tools | virtual-env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 482 | 2055 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.10.4'
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s' % __name__)
return handler
# ... Clean up.
del NullHandler
import warnings
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| apache-2.0 |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/python/test/test_shellcomp.py | 40 | 21795 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.python._shellcomp
"""
import sys
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import _shellcomp, usage, reflect
from twisted.python.usage import Completions, Completer, CompleteFiles
from twisted.python.usage import CompleteList
class ZshScriptTestMeta(type):
"""
Metaclass of ZshScriptTestMixin.
"""
def __new__(cls, name, bases, attrs):
def makeTest(cmdName, optionsFQPN):
def runTest(self):
return test_genZshFunction(self, cmdName, optionsFQPN)
return runTest
# add test_ methods to the class for each script
# we are testing.
if 'generateFor' in attrs:
for cmdName, optionsFQPN in attrs['generateFor']:
test = makeTest(cmdName, optionsFQPN)
attrs['test_genZshFunction_' + cmdName] = test
return type.__new__(cls, name, bases, attrs)
class ZshScriptTestMixin(object):
"""
Integration test helper to show that C{usage.Options} classes can have zsh
completion functions generated for them without raising errors.
In your subclasses set a class variable like so:
# | cmd name | Fully Qualified Python Name of Options class |
#
generateFor = [('conch', 'twisted.conch.scripts.conch.ClientOptions'),
('twistd', 'twisted.scripts.twistd.ServerOptions'),
]
Each package that contains Twisted scripts should contain one TestCase
subclass which also inherits from this mixin, and contains a C{generateFor}
list appropriate for the scripts in that package.
"""
__metaclass__ = ZshScriptTestMeta
def test_genZshFunction(self, cmdName, optionsFQPN):
"""
Generate completion functions for given twisted command - no errors
should be raised
@type cmdName: C{str}
@param cmdName: The name of the command-line utility e.g. 'twistd'
@type optionsFQPN: C{str}
@param optionsFQPN: The Fully Qualified Python Name of the C{Options}
class to be tested.
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
# some scripts won't import or instantiate because of missing
# dependencies (PyCrypto, etc) so we have to skip them.
try:
o = reflect.namedAny(optionsFQPN)()
except Exception, e:
raise unittest.SkipTest("Couldn't import or instantiate "
"Options class: %s" % (e,))
try:
o.parseOptions(["", "--_shell-completion", "zsh:2"])
except ImportError, e:
# this can happen for commands which don't have all
# the necessary dependencies installed. skip test.
# skip
raise unittest.SkipTest("ImportError calling parseOptions(): %s", (e,))
except SystemExit:
pass # expected
else:
self.fail('SystemExit not raised')
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
outputFile.seek(0)
outputFile.truncate()
# now, if it has sub commands, we have to test those too
if hasattr(o, 'subCommands'):
for (cmd, short, parser, doc) in o.subCommands:
try:
o.parseOptions([cmd, "", "--_shell-completion",
"zsh:3"])
except ImportError, e:
# this can happen for commands which don't have all
# the necessary dependencies installed. skip test.
raise unittest.SkipTest("ImportError calling parseOptions() "
"on subcommand: %s", (e,))
except SystemExit:
pass # expected
else:
self.fail('SystemExit not raised')
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
outputFile.seek(0)
outputFile.truncate()
# flushed because we don't want DeprecationWarnings to be printed when
# running these test cases.
self.flushWarnings()
class ZshTestCase(unittest.TestCase):
"""
Tests for zsh completion code
"""
def test_accumulateMetadata(self):
"""
Are `compData' attributes you can place on Options classes
picked up correctly?
"""
opts = FighterAceExtendedOptions()
ag = _shellcomp.ZshArgumentsGenerator(opts, 'ace', 'dummy_value')
descriptions = FighterAceOptions.compData.descriptions.copy()
descriptions.update(FighterAceExtendedOptions.compData.descriptions)
self.assertEqual(ag.descriptions, descriptions)
self.assertEqual(ag.multiUse,
set(FighterAceOptions.compData.multiUse))
self.assertEqual(ag.mutuallyExclusive,
FighterAceOptions.compData.mutuallyExclusive)
optActions = FighterAceOptions.compData.optActions.copy()
optActions.update(FighterAceExtendedOptions.compData.optActions)
self.assertEqual(ag.optActions, optActions)
self.assertEqual(ag.extraActions,
FighterAceOptions.compData.extraActions)
def test_mutuallyExclusiveCornerCase(self):
"""
Exercise a corner-case of ZshArgumentsGenerator.makeExcludesDict()
where the long option name already exists in the `excludes` dict being
built.
"""
class OddFighterAceOptions(FighterAceExtendedOptions):
# since "fokker", etc, are already defined as mutually-
# exclusive on the super-class, defining them again here forces
# the corner-case to be exercised.
optFlags = [['anatra', None,
'Select the Anatra DS as your dogfighter aircraft']]
compData = Completions(
mutuallyExclusive=[['anatra', 'fokker', 'albatros',
'spad', 'bristol']])
opts = OddFighterAceOptions()
ag = _shellcomp.ZshArgumentsGenerator(opts, 'ace', 'dummy_value')
expected = {
'albatros': set(['anatra', 'b', 'bristol', 'f',
'fokker', 's', 'spad']),
'anatra': set(['a', 'albatros', 'b', 'bristol',
'f', 'fokker', 's', 'spad']),
'bristol': set(['a', 'albatros', 'anatra', 'f',
'fokker', 's', 'spad']),
'fokker': set(['a', 'albatros', 'anatra', 'b',
'bristol', 's', 'spad']),
'spad': set(['a', 'albatros', 'anatra', 'b',
'bristol', 'f', 'fokker'])}
self.assertEqual(ag.excludes, expected)
def test_accumulateAdditionalOptions(self):
"""
We pick up options that are only defined by having an
appropriately named method on your Options class,
e.g. def opt_foo(self, foo)
"""
opts = FighterAceExtendedOptions()
ag = _shellcomp.ZshArgumentsGenerator(opts, 'ace', 'dummy_value')
self.assertIn('nocrash', ag.flagNameToDefinition)
self.assertIn('nocrash', ag.allOptionsNameToDefinition)
self.assertIn('difficulty', ag.paramNameToDefinition)
self.assertIn('difficulty', ag.allOptionsNameToDefinition)
def test_verifyZshNames(self):
"""
Using a parameter/flag name that doesn't exist
will raise an error
"""
class TmpOptions(FighterAceExtendedOptions):
# Note typo of detail
compData = Completions(optActions={'detaill' : None})
self.assertRaises(ValueError, _shellcomp.ZshArgumentsGenerator,
TmpOptions(), 'ace', 'dummy_value')
class TmpOptions2(FighterAceExtendedOptions):
# Note that 'foo' and 'bar' are not real option
# names defined in this class
compData = Completions(
mutuallyExclusive=[("foo", "bar")])
self.assertRaises(ValueError, _shellcomp.ZshArgumentsGenerator,
TmpOptions2(), 'ace', 'dummy_value')
def test_zshCode(self):
"""
Generate a completion function, and test the textual output
against a known correct output
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
self.patch(sys, 'argv', ["silly", "", "--_shell-completion", "zsh:2"])
opts = SimpleProgOptions()
self.assertRaises(SystemExit, opts.parseOptions)
self.assertEqual(testOutput1, outputFile.getvalue())
def test_zshCodeWithSubs(self):
"""
Generate a completion function with subcommands,
and test the textual output against a known correct output
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
self.patch(sys, 'argv', ["silly2", "", "--_shell-completion", "zsh:2"])
opts = SimpleProgWithSubcommands()
self.assertRaises(SystemExit, opts.parseOptions)
self.assertEqual(testOutput2, outputFile.getvalue())
def test_incompleteCommandLine(self):
"""
Completion still happens even if a command-line is given
that would normally throw UsageError.
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
opts = FighterAceOptions()
self.assertRaises(SystemExit, opts.parseOptions,
["--fokker", "server", "--unknown-option",
"--unknown-option2",
"--_shell-completion", "zsh:5"])
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
def test_incompleteCommandLine_case2(self):
"""
Completion still happens even if a command-line is given
that would normally throw UsageError.
The existance of --unknown-option prior to the subcommand
will break subcommand detection... but we complete anyway
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
opts = FighterAceOptions()
self.assertRaises(SystemExit, opts.parseOptions,
["--fokker", "--unknown-option", "server",
"--list-server", "--_shell-completion", "zsh:5"])
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
outputFile.seek(0)
outputFile.truncate()
def test_incompleteCommandLine_case3(self):
"""
Completion still happens even if a command-line is given
that would normally throw UsageError.
Break subcommand detection in a different way by providing
an invalid subcommand name.
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
opts = FighterAceOptions()
self.assertRaises(SystemExit, opts.parseOptions,
["--fokker", "unknown-subcommand",
"--list-server", "--_shell-completion", "zsh:4"])
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
def test_skipSubcommandList(self):
"""
Ensure the optimization which skips building the subcommand list
under certain conditions isn't broken.
"""
outputFile = StringIO()
self.patch(usage.Options, '_shellCompFile', outputFile)
opts = FighterAceOptions()
self.assertRaises(SystemExit, opts.parseOptions,
["--alba", "--_shell-completion", "zsh:2"])
outputFile.seek(0)
# test that we got some output
self.assertEqual(1, len(outputFile.read(1)))
def test_poorlyDescribedOptMethod(self):
"""
Test corner case fetching an option description from a method docstring
"""
opts = FighterAceOptions()
argGen = _shellcomp.ZshArgumentsGenerator(opts, 'ace', None)
descr = argGen.getDescription('silly')
# docstring for opt_silly is useless so it should just use the
# option name as the description
self.assertEqual(descr, 'silly')
def test_brokenActions(self):
"""
A C{Completer} with repeat=True may only be used as the
last item in the extraActions list.
"""
class BrokenActions(usage.Options):
compData = usage.Completions(
extraActions=[usage.Completer(repeat=True),
usage.Completer()]
)
outputFile = StringIO()
opts = BrokenActions()
self.patch(opts, '_shellCompFile', outputFile)
self.assertRaises(ValueError, opts.parseOptions,
["", "--_shell-completion", "zsh:2"])
def test_optMethodsDontOverride(self):
"""
opt_* methods on Options classes should not override the
data provided in optFlags or optParameters.
"""
class Options(usage.Options):
optFlags = [['flag', 'f', 'A flag']]
optParameters = [['param', 'p', None, 'A param']]
def opt_flag(self):
""" junk description """
def opt_param(self, param):
""" junk description """
opts = Options()
argGen = _shellcomp.ZshArgumentsGenerator(opts, 'ace', None)
self.assertEqual(argGen.getDescription('flag'), 'A flag')
self.assertEqual(argGen.getDescription('param'), 'A param')
class EscapeTestCase(unittest.TestCase):
def test_escape(self):
"""
Verify _shellcomp.escape() function
"""
esc = _shellcomp.escape
test = "$"
self.assertEqual(esc(test), "'$'")
test = 'A--\'$"\\`--B'
self.assertEqual(esc(test), '"A--\'\\$\\"\\\\\\`--B"')
class CompleterNotImplementedTestCase(unittest.TestCase):
"""
Test that using an unknown shell constant with SubcommandAction
raises NotImplementedError
The other Completer() subclasses are tested in test_usage.py
"""
def test_unknownShell(self):
"""
Using an unknown shellType should raise NotImplementedError
"""
action = _shellcomp.SubcommandAction()
self.assertRaises(NotImplementedError, action._shellCode,
None, "bad_shell_type")
class FighterAceServerOptions(usage.Options):
"""
Options for FighterAce 'server' subcommand
"""
optFlags = [['list-server', None,
'List this server with the online FighterAce network']]
optParameters = [['packets-per-second', None,
'Number of update packets to send per second', '20']]
class FighterAceOptions(usage.Options):
"""
Command-line options for an imaginary `Fighter Ace` game
"""
optFlags = [['fokker', 'f',
'Select the Fokker Dr.I as your dogfighter aircraft'],
['albatros', 'a',
'Select the Albatros D-III as your dogfighter aircraft'],
['spad', 's',
'Select the SPAD S.VII as your dogfighter aircraft'],
['bristol', 'b',
'Select the Bristol Scout as your dogfighter aircraft'],
['physics', 'p',
'Enable secret Twisted physics engine'],
['jam', 'j',
'Enable a small chance that your machine guns will jam!'],
['verbose', 'v',
'Verbose logging (may be specified more than once)'],
]
optParameters = [['pilot-name', None, "What's your name, Ace?",
'Manfred von Richthofen'],
['detail', 'd',
'Select the level of rendering detail (1-5)', '3'],
]
subCommands = [['server', None, FighterAceServerOptions,
'Start FighterAce game-server.'],
]
compData = Completions(
descriptions={'physics' : 'Twisted-Physics',
'detail' : 'Rendering detail level'},
multiUse=['verbose'],
mutuallyExclusive=[['fokker', 'albatros', 'spad',
'bristol']],
optActions={'detail' : CompleteList(['1' '2' '3'
'4' '5'])},
extraActions=[CompleteFiles(descr='saved game file to load')]
)
def opt_silly(self):
# A silly option which nobody can explain
""" """
class FighterAceExtendedOptions(FighterAceOptions):
"""
Extend the options and zsh metadata provided by FighterAceOptions.
_shellcomp must accumulate options and metadata from all classes in the
hiearchy so this is important to test.
"""
optFlags = [['no-stalls', None,
'Turn off the ability to stall your aircraft']]
optParameters = [['reality-level', None,
'Select the level of physics reality (1-5)', '5']]
compData = Completions(
descriptions={'no-stalls' : 'Can\'t stall your plane'},
optActions={'reality-level' :
Completer(descr='Physics reality level')}
)
def opt_nocrash(self):
"""
Select that you can't crash your plane
"""
def opt_difficulty(self, difficulty):
"""
How tough are you? (1-10)
"""
def _accuracyAction():
# add tick marks just to exercise quoting
return CompleteList(['1', '2', '3'], descr='Accuracy\'`?')
class SimpleProgOptions(usage.Options):
"""
Command-line options for a `Silly` imaginary program
"""
optFlags = [['color', 'c', 'Turn on color output'],
['gray', 'g', 'Turn on gray-scale output'],
['verbose', 'v',
'Verbose logging (may be specified more than once)'],
]
optParameters = [['optimization', None, '5',
'Select the level of optimization (1-5)'],
['accuracy', 'a', '3',
'Select the level of accuracy (1-3)'],
]
compData = Completions(
descriptions={'color' : 'Color on',
'optimization' : 'Optimization level'},
multiUse=['verbose'],
mutuallyExclusive=[['color', 'gray']],
optActions={'optimization' : CompleteList(['1', '2', '3', '4', '5'],
descr='Optimization?'),
'accuracy' : _accuracyAction},
extraActions=[CompleteFiles(descr='output file')]
)
def opt_X(self):
"""
usage.Options does not recognize single-letter opt_ methods
"""
class SimpleProgSub1(usage.Options):
optFlags = [['sub-opt', 's', 'Sub Opt One']]
class SimpleProgSub2(usage.Options):
optFlags = [['sub-opt', 's', 'Sub Opt Two']]
class SimpleProgWithSubcommands(SimpleProgOptions):
optFlags = [['some-option'],
['other-option', 'o']]
optParameters = [['some-param'],
['other-param', 'p'],
['another-param', 'P', 'Yet Another Param']]
subCommands = [ ['sub1', None, SimpleProgSub1, 'Sub Command 1'],
['sub2', None, SimpleProgSub2, 'Sub Command 2']]
testOutput1 = """#compdef silly
_arguments -s -A "-*" \\
':output file (*):_files -g "*"' \\
"(--accuracy)-a[Select the level of accuracy (1-3)]:Accuracy'\`?:(1 2 3)" \\
"(-a)--accuracy=[Select the level of accuracy (1-3)]:Accuracy'\`?:(1 2 3)" \\
'(--color --gray -g)-c[Color on]' \\
'(--gray -c -g)--color[Color on]' \\
'(--color --gray -c)-g[Turn on gray-scale output]' \\
'(--color -c -g)--gray[Turn on gray-scale output]' \\
'--help[Display this help and exit.]' \\
'--optimization=[Optimization level]:Optimization?:(1 2 3 4 5)' \\
'*-v[Verbose logging (may be specified more than once)]' \\
'*--verbose[Verbose logging (may be specified more than once)]' \\
'--version[Display Twisted version and exit.]' \\
&& return 0
"""
# with sub-commands
testOutput2 = """#compdef silly2
_arguments -s -A "-*" \\
'*::subcmd:->subcmd' \\
':output file (*):_files -g "*"' \\
"(--accuracy)-a[Select the level of accuracy (1-3)]:Accuracy'\`?:(1 2 3)" \\
"(-a)--accuracy=[Select the level of accuracy (1-3)]:Accuracy'\`?:(1 2 3)" \\
'(--another-param)-P[another-param]:another-param:_files' \\
'(-P)--another-param=[another-param]:another-param:_files' \\
'(--color --gray -g)-c[Color on]' \\
'(--gray -c -g)--color[Color on]' \\
'(--color --gray -c)-g[Turn on gray-scale output]' \\
'(--color -c -g)--gray[Turn on gray-scale output]' \\
'--help[Display this help and exit.]' \\
'--optimization=[Optimization level]:Optimization?:(1 2 3 4 5)' \\
'(--other-option)-o[other-option]' \\
'(-o)--other-option[other-option]' \\
'(--other-param)-p[other-param]:other-param:_files' \\
'(-p)--other-param=[other-param]:other-param:_files' \\
'--some-option[some-option]' \\
'--some-param=[some-param]:some-param:_files' \\
'*-v[Verbose logging (may be specified more than once)]' \\
'*--verbose[Verbose logging (may be specified more than once)]' \\
'--version[Display Twisted version and exit.]' \\
&& return 0
local _zsh_subcmds_array
_zsh_subcmds_array=(
"sub1:Sub Command 1"
"sub2:Sub Command 2"
)
_describe "sub-command" _zsh_subcmds_array
"""
| gpl-3.0 |
40223138/2015cd_0505 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| agpl-3.0 |
selboo/flask | flask/app.py | 4 | 77447 | # -*- coding: utf-8 -*-
"""
flask.app
~~~~~~~~~
This module implements the central WSGI application object.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock
from datetime import timedelta
from itertools import chain
from functools import update_wrapper
from werkzeug.datastructures import ImmutableDict
from werkzeug.routing import Map, Rule, RequestRedirect, BuildError
from werkzeug.exceptions import HTTPException, InternalServerError, \
MethodNotAllowed, BadRequest
from .helpers import _PackageBoundObject, url_for, get_flashed_messages, \
locked_cached_property, _endpoint_from_view_func, find_package
from . import json
from .wrappers import Request, Response
from .config import ConfigAttribute, Config
from .ctx import RequestContext, AppContext, _AppCtxGlobals
from .globals import _request_ctx_stack, request, session, g
from .sessions import SecureCookieSessionInterface
from .module import blueprint_is_module
from .templating import DispatchingJinjaLoader, Environment, \
_default_template_ctx_processor
from .signals import request_started, request_finished, got_request_exception, \
request_tearing_down, appcontext_tearing_down
from ._compat import reraise, string_types, text_type, integer_types
# a lock used for logger initialization
_logger_lock = Lock()
def _make_timedelta(value):
if not isinstance(value, timedelta):
return timedelta(seconds=value)
return value
def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f)
class Flask(_PackageBoundObject):
"""The flask object implements a WSGI application and acts as the central
object. It is passed the name of the module or package of the
application. Once it is created it will act as a central registry for
the view functions, the URL rules, template configuration and much more.
The name of the package is used to resolve resources from inside the
package or the folder the module is contained in depending on if the
package parameter resolves to an actual python package (a folder with
an `__init__.py` file inside) or a standard module (just a `.py` file).
For more information about resource loading, see :func:`open_resource`.
Usually you create a :class:`Flask` instance in your main module or
in the `__init__.py` file of your package like this::
from flask import Flask
app = Flask(__name__)
.. admonition:: About the First Parameter
The idea of the first parameter is to give Flask an idea what
belongs to your application. This name is used to find resources
on the file system, can be used by extensions to improve debugging
information and a lot more.
So it's important what you provide there. If you are using a single
module, `__name__` is always the correct value. If you however are
using a package, it's usually recommended to hardcode the name of
your package there.
For example if your application is defined in `yourapplication/app.py`
you should create it with one of the two versions below::
app = Flask('yourapplication')
app = Flask(__name__.split('.')[0])
Why is that? The application will work even with `__name__`, thanks
to how resources are looked up. However it will make debugging more
painful. Certain extensions can make assumptions based on the
import name of your application. For example the Flask-SQLAlchemy
extension will look for the code in your application that triggered
an SQL query in debug mode. If the import name is not properly set
up, that debugging information is lost. (For example it would only
pick up SQL queries in `yourapplication.app` and not
`yourapplication.views.frontend`)
.. versionadded:: 0.7
The `static_url_path`, `static_folder`, and `template_folder`
parameters were added.
.. versionadded:: 0.8
The `instance_path` and `instance_relative_config` parameters were
added.
:param import_name: the name of the application package
:param static_url_path: can be used to specify a different path for the
static files on the web. Defaults to the name
of the `static_folder` folder.
:param static_folder: the folder with static files that should be served
at `static_url_path`. Defaults to the ``'static'``
folder in the root path of the application.
:param template_folder: the folder that contains the templates that should
be used by the application. Defaults to
``'templates'`` folder in the root path of the
application.
:param instance_path: An alternative instance path for the application.
By default the folder ``'instance'`` next to the
package or module is assumed to be the instance
path.
:param instance_relative_config: if set to `True` relative filenames
for loading the config are assumed to
be relative to the instance path instead
of the application root.
"""
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class = Response
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on expected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: In Flask 0.9 this property was called `request_globals_class` but it
#: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
#: flask.g object is not application context scoped.
#:
#: .. versionadded:: 0.10
app_ctx_globals_class = _AppCtxGlobals
# Backwards compatibility support
def _get_request_globals_class(self):
return self.app_ctx_globals_class
def _set_request_globals_class(self, value):
from warnings import warn
warn(DeprecationWarning('request_globals_class attribute is now '
'called app_ctx_globals_class'))
self.app_ctx_globals_class = value
request_globals_class = property(_get_request_globals_class,
_set_request_globals_class)
del _get_request_globals_class, _set_request_globals_class
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 1.0
config_class = Config
#: The debug flag. Set this to `True` to enable debugging of the
#: application. In debug mode the debugger will kick in when an unhandled
#: exception occurs and the integrated server will automatically reload
#: the application if changes in the code are detected.
#:
#: This attribute can also be configured from the config with the `DEBUG`
#: configuration key. Defaults to `False`.
debug = ConfigAttribute('DEBUG')
#: The testing flag. Set this to `True` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate unittest helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: `TESTING` configuration key. Defaults to `False`.
testing = ConfigAttribute('TESTING')
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: `SECRET_KEY` configuration key. Defaults to `None`.
secret_key = ConfigAttribute('SECRET_KEY')
#: The secure cookie uses this for the name of the session cookie.
#:
#: This attribute can also be configured from the config with the
#: `SESSION_COOKIE_NAME` configuration key. Defaults to ``'session'``
session_cookie_name = ConfigAttribute('SESSION_COOKIE_NAME')
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: `PERMANENT_SESSION_LIFETIME` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute('PERMANENT_SESSION_LIFETIME',
get_converter=_make_timedelta)
#: Enable this if you want to use the X-Sendfile feature. Keep in
#: mind that the server has to support this. This only affects files
#: sent with the :func:`send_file` method.
#:
#: .. versionadded:: 0.2
#:
#: This attribute can also be configured from the config with the
#: `USE_X_SENDFILE` configuration key. Defaults to `False`.
use_x_sendfile = ConfigAttribute('USE_X_SENDFILE')
#: The name of the logger to use. By default the logger name is the
#: package name passed to the constructor.
#:
#: .. versionadded:: 0.4
logger_name = ConfigAttribute('LOGGER_NAME')
#: Enable the deprecated module support? This is active by default
#: in 0.7 but will be changed to False in 0.8. With Flask 1.0 modules
#: will be removed in favor of Blueprints
enable_modules = True
#: The logging format used for the debug logger. This is only used when
#: the application is in debug mode, otherwise the attached logging
#: handler does the formatting.
#:
#: .. versionadded:: 0.3
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
#: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`.
#:
#: .. versionadded:: 0.10
json_encoder = json.JSONEncoder
#: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`.
#:
#: .. versionadded:: 0.10
json_decoder = json.JSONDecoder
#: Options that are passed directly to the Jinja2 environment.
jinja_options = ImmutableDict(
extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_']
)
#: Default configuration parameters.
default_config = ImmutableDict({
'DEBUG': False,
'TESTING': False,
'PROPAGATE_EXCEPTIONS': None,
'PRESERVE_CONTEXT_ON_EXCEPTION': None,
'SECRET_KEY': None,
'PERMANENT_SESSION_LIFETIME': timedelta(days=31),
'USE_X_SENDFILE': False,
'LOGGER_NAME': None,
'SERVER_NAME': None,
'APPLICATION_ROOT': None,
'SESSION_COOKIE_NAME': 'session',
'SESSION_COOKIE_DOMAIN': None,
'SESSION_COOKIE_PATH': None,
'SESSION_COOKIE_HTTPONLY': True,
'SESSION_COOKIE_SECURE': False,
'SESSION_REFRESH_EACH_REQUEST': True,
'MAX_CONTENT_LENGTH': None,
'SEND_FILE_MAX_AGE_DEFAULT': 12 * 60 * 60, # 12 hours
'TRAP_BAD_REQUEST_ERRORS': False,
'TRAP_HTTP_EXCEPTIONS': False,
'PREFERRED_URL_SCHEME': 'http',
'JSON_AS_ASCII': True,
'JSON_SORT_KEYS': True,
'JSONIFY_PRETTYPRINT_REGULAR': True,
})
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: the test client that is used with when `test_client` is used.
#:
#: .. versionadded:: 0.7
test_client_class = None
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface = SecureCookieSessionInterface()
def __init__(self, import_name, static_path=None, static_url_path=None,
static_folder='static', template_folder='templates',
instance_path=None, instance_relative_config=False):
_PackageBoundObject.__init__(self, import_name,
template_folder=template_folder)
if static_path is not None:
from warnings import warn
warn(DeprecationWarning('static_path is now called '
'static_url_path'), stacklevel=2)
static_url_path = static_path
if static_url_path is not None:
self.static_url_path = static_url_path
if static_folder is not None:
self.static_folder = static_folder
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError('If an instance path is provided it must be '
'absolute. A relative path was given instead.')
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
# Prepare the deferred setup of the logger.
self._logger = None
self.logger_name = self.import_name
#: A dictionary of all view functions registered. The keys will
#: be function names which are also used to generate URLs and
#: the values are the function objects themselves.
#: To register a view function, use the :meth:`route` decorator.
self.view_functions = {}
# support for the now deprecated `error_handlers` attribute. The
# :attr:`error_handler_spec` shall be used now.
self._error_handlers = {}
#: A dictionary of all registered error handlers. The key is `None`
#: for error handlers active on the application, otherwise the key is
#: the name of the blueprint. Each key points to another dictionary
#: where the key is the status code of the http exception. The
#: special key `None` points to a list of tuples where the first item
#: is the class for the instance check and the second the error handler
#: function.
#:
#: To register a error handler, use the :meth:`errorhandler`
#: decorator.
self.error_handler_spec = {None: self._error_handlers}
#: A list of functions that are called when :meth:`url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function registered here
#: is called with `error`, `endpoint` and `values`. If a function
#: returns `None` or raises a `BuildError` the next function is
#: tried.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers = []
#: A dictionary with lists of functions that should be called at the
#: beginning of the request. The key of the dictionary is the name of
#: the blueprint this function is active for, `None` for all requests.
#: This can for example be used to open database connections or
#: getting hold of the currently logged in user. To register a
#: function here, use the :meth:`before_request` decorator.
self.before_request_funcs = {}
#: A lists of functions that should be called at the beginning of the
#: first request to this instance. To register a function here, use
#: the :meth:`before_first_request` decorator.
#:
#: .. versionadded:: 0.8
self.before_first_request_funcs = []
#: A dictionary with lists of functions that should be called after
#: each request. The key of the dictionary is the name of the blueprint
#: this function is active for, `None` for all requests. This can for
#: example be used to open database connections or getting hold of the
#: currently logged in user. To register a function here, use the
#: :meth:`after_request` decorator.
self.after_request_funcs = {}
#: A dictionary with lists of functions that are called after
#: each request, even if an exception has occurred. The key of the
#: dictionary is the name of the blueprint this function is active for,
#: `None` for all requests. These functions are not allowed to modify
#: the request, and their return values are ignored. If an exception
#: occurred while processing the request, it gets passed to each
#: teardown_request function. To register a function here, use the
#: :meth:`teardown_request` decorator.
#:
#: .. versionadded:: 0.7
self.teardown_request_funcs = {}
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs = []
#: A dictionary with lists of functions that can be used as URL
#: value processor functions. Whenever a URL is built these functions
#: are called to modify the dictionary of values in place. The key
#: `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#:
#: .. versionadded:: 0.7
self.url_value_preprocessors = {}
#: A dictionary with lists of functions that can be used as URL value
#: preprocessors. The key `None` here is used for application wide
#: callbacks, otherwise the key is the name of the blueprint.
#: Each of these functions has the chance to modify the dictionary
#: of URL values before they are used as the keyword arguments of the
#: view function. For each function registered this one should also
#: provide a :meth:`url_defaults` function that adds the parameters
#: automatically again that were removed that way.
#:
#: .. versionadded:: 0.7
self.url_default_functions = {}
#: A dictionary with list of functions that are called without argument
#: to populate the template context. The key of the dictionary is the
#: name of the blueprint this function is active for, `None` for all
#: requests. Each returns a dictionary that the template context is
#: updated with. To register a function here, use the
#: :meth:`context_processor` decorator.
self.template_context_processors = {
None: [_default_template_ctx_processor]
}
#: all the attached blueprints in a dictionary by name. Blueprints
#: can be attached multiple times so this dictionary does not tell
#: you how often they got attached.
#:
#: .. versionadded:: 0.7
self.blueprints = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things. For backwards compatibility extensions should register
#: themselves like this::
#:
#: if not hasattr(app, 'extensions'):
#: app.extensions = {}
#: app.extensions['extensionname'] = SomeObject()
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(BaseConverter.to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = Map()
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
self._before_request_lock = Lock()
# register the static folder for the application. Do that even
# if the folder does not exist. First of all it might be created
# while the server is running (usually happens during development)
# but also because google appengine stores static files somewhere
# else when mapped with the .yml file.
if self.has_static_folder:
self.add_url_rule(self.static_url_path + '/<path:filename>',
endpoint='static',
view_func=self.send_static_file)
def _get_error_handlers(self):
from warnings import warn
warn(DeprecationWarning('error_handlers is deprecated, use the '
'new error_handler_spec attribute instead.'), stacklevel=1)
return self._error_handlers
def _set_error_handlers(self, value):
self._error_handlers = value
self.error_handler_spec[None] = value
error_handlers = property(_get_error_handlers, _set_error_handlers)
del _get_error_handlers, _set_error_handlers
@locked_cached_property
def name(self):
"""The name of the application. This is usually the import name
with the difference that it's guessed from the run file if the
import name is main. This name is used as a display name when
Flask needs the name of the application. It can be set and overridden
to change the value.
.. versionadded:: 0.8
"""
if self.import_name == '__main__':
fn = getattr(sys.modules['__main__'], '__file__', None)
if fn is None:
return '__main__'
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@property
def propagate_exceptions(self):
"""Returns the value of the `PROPAGATE_EXCEPTIONS` configuration
value in case it's set, otherwise a sensible default is returned.
.. versionadded:: 0.7
"""
rv = self.config['PROPAGATE_EXCEPTIONS']
if rv is not None:
return rv
return self.testing or self.debug
@property
def preserve_context_on_exception(self):
"""Returns the value of the `PRESERVE_CONTEXT_ON_EXCEPTION`
configuration value in case it's set, otherwise a sensible default
is returned.
.. versionadded:: 0.7
"""
rv = self.config['PRESERVE_CONTEXT_ON_EXCEPTION']
if rv is not None:
return rv
return self.debug
@property
def logger(self):
"""A :class:`logging.Logger` object for this application. The
default configuration is to log to stderr if the application is
in debug mode. This logger can be used to (surprise) log messages.
Here some examples::
app.logger.debug('A value for debugging')
app.logger.warning('A warning occurred (%d apples)', 42)
app.logger.error('An error occurred')
.. versionadded:: 0.3
"""
if self._logger and self._logger.name == self.logger_name:
return self._logger
with _logger_lock:
if self._logger and self._logger.name == self.logger_name:
return self._logger
from flask.logging import create_logger
self._logger = rv = create_logger(self)
return rv
@locked_cached_property
def jinja_env(self):
"""The Jinja2 environment used to load templates."""
return self.create_jinja_environment()
@property
def got_first_request(self):
"""This attribute is set to `True` if the application started
handling the first request.
.. versionadded:: 0.8
"""
return self._got_first_request
def make_config(self, instance_relative=False):
"""Used to create the config attribute by the Flask constructor.
The `instance_relative` parameter is passed in from the constructor
of Flask (there named `instance_relative_config`) and indicates if
the config should be relative to the instance path or the root path
of the application.
.. versionadded:: 0.8
"""
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
return self.config_class(root_path, self.default_config)
def auto_find_instance_path(self):
"""Tries to locate the instance path if it was not provided to the
constructor of the application class. It will basically calculate
the path to a folder named ``instance`` next to your main file or
the package.
.. versionadded:: 0.8
"""
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, 'instance')
return os.path.join(prefix, 'var', self.name + '-instance')
def open_instance_resource(self, resource, mode='rb'):
"""Opens a resource from the application's instance folder
(:attr:`instance_path`). Otherwise works like
:meth:`open_resource`. Instance resources can also be opened for
writing.
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
return open(os.path.join(self.instance_path, resource), mode)
def create_jinja_environment(self):
"""Creates the Jinja2 environment based on :attr:`jinja_options`
and :meth:`select_jinja_autoescape`. Since 0.7 this also adds
the Jinja2 globals and filters after initialization. Override
this function to customize the behavior.
.. versionadded:: 0.5
"""
options = dict(self.jinja_options)
if 'autoescape' not in options:
options['autoescape'] = self.select_jinja_autoescape
rv = Environment(self, **options)
rv.globals.update(
url_for=url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g
)
rv.filters['tojson'] = json.tojson_filter
return rv
def create_global_jinja_loader(self):
"""Creates the loader for the Jinja2 environment. Can be used to
override just the loader and keeping the rest unchanged. It's
discouraged to override this function. Instead one should override
the :meth:`jinja_loader` function instead.
The global loader dispatches between the loaders of the application
and the individual blueprints.
.. versionadded:: 0.7
"""
return DispatchingJinjaLoader(self)
def init_jinja_globals(self):
"""Deprecated. Used to initialize the Jinja2 globals.
.. versionadded:: 0.5
.. versionchanged:: 0.7
This method is deprecated with 0.7. Override
:meth:`create_jinja_environment` instead.
"""
def select_jinja_autoescape(self, filename):
"""Returns `True` if autoescaping should be active for the given
template name.
.. versionadded:: 0.5
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml'))
def update_template_context(self, context):
"""Update the template context with some commonly used variables.
This injects request, session, config and g into the template
context as well as everything template context processors want
to inject. Note that the as of Flask 0.6, the original values
in the context will not be overridden if a context processor
decides to return a value with the same key.
:param context: the context as a dictionary that is updated in place
to add extra variables.
"""
funcs = self.template_context_processors[None]
reqctx = _request_ctx_stack.top
if reqctx is not None:
bp = reqctx.request.blueprint
if bp is not None and bp in self.template_context_processors:
funcs = chain(funcs, self.template_context_processors[bp])
orig_ctx = context.copy()
for func in funcs:
context.update(func())
# make sure the original values win. This makes it possible to
# easier add new variables in context processors without breaking
# existing views.
context.update(orig_ctx)
def run(self, host=None, port=None, debug=None, **options):
"""Runs the application on a local development server. If the
:attr:`debug` flag is set the server will automatically reload
for code changes and show a debugger in case an exception happened.
If you want to run the application in debug mode, but disable the
code execution on the interactive debugger, you can pass
``use_evalex=False`` as parameter. This will keep the debugger's
traceback screen active, but disable code execution.
.. admonition:: Keep in Mind
Flask will suppress any server error with a generic error page
unless it is in debug mode. As such to enable just the
interactive debugger without the code reloading, you have to
invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
Setting ``use_debugger`` to `True` without being in debug mode
won't catch any exceptions because there won't be any to
catch.
.. versionchanged:: 0.10
The default port is now picked from the ``SERVER_NAME`` variable.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000`` or the
port defined in the ``SERVER_NAME`` config variable if
present.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information.
"""
from werkzeug.serving import run_simple
if host is None:
host = '127.0.0.1'
if port is None:
server_name = self.config['SERVER_NAME']
if server_name and ':' in server_name:
port = int(server_name.rsplit(':', 1)[1])
else:
port = 5000
if debug is not None:
self.debug = bool(debug)
options.setdefault('use_reloader', self.debug)
options.setdefault('use_debugger', self.debug)
try:
run_simple(host, port, self, **options)
finally:
# reset the first request information if the development server
# resetted normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies=True):
"""Creates a test client for this application. For information
about unit testing head over to :ref:`testing`.
Note that if you are testing for assertions or exceptions in your
application code, you must set ``app.testing = True`` in order for the
exceptions to propagate to the test client. Otherwise, the exception
will be handled by the application (not visible to the test client) and
the only indication of an AssertionError or other exception will be a
500 status code response to the test client. See the :attr:`testing`
attribute. For example::
app.testing = True
client = app.test_client()
The test client can be used in a `with` block to defer the closing down
of the context until the end of the `with` block. This is useful if
you want to access the context locals for testing::
with app.test_client() as c:
rv = c.get('/?vodka=42')
assert request.args['vodka'] == '42'
See :class:`~flask.testing.FlaskClient` for more information.
.. versionchanged:: 0.4
added support for `with` block usage for the client.
.. versionadded:: 0.7
The `use_cookies` parameter was added as well as the ability
to override the client to be used by setting the
:attr:`test_client_class` attribute.
"""
cls = self.test_client_class
if cls is None:
from flask.testing import FlaskClient as cls
return cls(self, self.response_class, use_cookies=use_cookies)
def open_session(self, request):
"""Creates or opens a new session. Default implementation stores all
session data in a signed cookie. This requires that the
:attr:`secret_key` is set. Instead of overriding this method
we recommend replacing the :class:`session_interface`.
:param request: an instance of :attr:`request_class`.
"""
return self.session_interface.open_session(self, request)
def save_session(self, session, response):
"""Saves the session if it needs updates. For the default
implementation, check :meth:`open_session`. Instead of overriding this
method we recommend replacing the :class:`session_interface`.
:param session: the session to be saved (a
:class:`~werkzeug.contrib.securecookie.SecureCookie`
object)
:param response: an instance of :attr:`response_class`
"""
return self.session_interface.save_session(self, session, response)
def make_null_session(self):
"""Creates a new instance of a missing session. Instead of overriding
this method we recommend replacing the :class:`session_interface`.
.. versionadded:: 0.7
"""
return self.session_interface.make_null_session(self)
def register_module(self, module, **options):
"""Registers a module with this application. The keyword argument
of this function are the same as the ones for the constructor of the
:class:`Module` class and will override the values of the module if
provided.
.. versionchanged:: 0.7
The module system was deprecated in favor for the blueprint
system.
"""
assert blueprint_is_module(module), 'register_module requires ' \
'actual module objects. Please upgrade to blueprints though.'
if not self.enable_modules:
raise RuntimeError('Module support was disabled but code '
'attempted to register a module named %r' % module)
else:
from warnings import warn
warn(DeprecationWarning('Modules are deprecated. Upgrade to '
'using blueprints. Have a look into the documentation for '
'more information. If this module was registered by a '
'Flask-Extension upgrade the extension or contact the author '
'of that extension instead. (Registered %r)' % module),
stacklevel=2)
self.register_blueprint(module, **options)
@setupmethod
def register_blueprint(self, blueprint, **options):
"""Registers a blueprint on the application.
.. versionadded:: 0.7
"""
first_registration = False
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint\'s name collision occurred between %r and ' \
'%r. Both share the same name "%s". Blueprints that ' \
'are created on the fly need unique names.' % \
(blueprint, self.blueprints[blueprint.name], blueprint.name)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, options, first_registration)
@setupmethod
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Connects a URL rule. Works exactly like the :meth:`route`
decorator. If a view_func is provided it will be registered with the
endpoint.
Basically this example::
@app.route('/')
def index():
pass
Is equivalent to the following::
def index():
pass
app.add_url_rule('/', 'index', index)
If the view_func is not provided you will need to connect the endpoint
to a view function like so::
app.view_functions['index'] = index
Internally :meth:`route` invokes :meth:`add_url_rule` so if you want
to customize the behavior via subclassing you only need to change
this method.
For more information refer to :ref:`url-route-registrations`.
.. versionchanged:: 0.2
`view_func` parameter added.
.. versionchanged:: 0.6
`OPTIONS` is added automatically as method.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param view_func: the function to call when serving a request to the
provided endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
options['endpoint'] = endpoint
methods = options.pop('methods', None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only `GET` as default.
if methods is None:
methods = getattr(view_func, 'methods', None) or ('GET',)
if isinstance(methods, string_types):
raise TypeError('Allowed methods have to be iterables of strings, '
'for example: @app.route(..., methods=["POST"])')
methods = set(methods)
# Methods that should always be added
required_methods = set(getattr(view_func, 'required_methods', ()))
# starting with Flask 0.8 the view_func object can disable and
# force-enable the automatic options handling.
provide_automatic_options = getattr(view_func,
'provide_automatic_options', None)
if provide_automatic_options is None:
if 'OPTIONS' not in methods:
provide_automatic_options = True
required_methods.add('OPTIONS')
else:
provide_automatic_options = False
# Add the required methods now.
methods |= required_methods
rule = self.url_rule_class(rule, methods=methods, **options)
rule.provide_automatic_options = provide_automatic_options
self.url_map.add(rule)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError('View function mapping is overwriting an '
'existing endpoint function: %s' % endpoint)
self.view_functions[endpoint] = view_func
def route(self, rule, **options):
"""A decorator that is used to register a view function for a
given URL rule. This does the same thing as :meth:`add_url_rule`
but is intended for decorator usage::
@app.route('/')
def index():
return 'Hello World'
For more information refer to :ref:`url-route-registrations`.
:param rule: the URL rule as string
:param endpoint: the endpoint for the registered URL rule. Flask
itself assumes the name of the view function as
endpoint
:param options: the options to be forwarded to the underlying
:class:`~werkzeug.routing.Rule` object. A change
to Werkzeug is handling of method options. methods
is a list of methods this rule should be limited
to (`GET`, `POST` etc.). By default a rule
just listens for `GET` (and implicitly `HEAD`).
Starting with Flask 0.6, `OPTIONS` is implicitly
added and handled by the standard request handling.
"""
def decorator(f):
endpoint = options.pop('endpoint', None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def endpoint(self, endpoint):
"""A decorator to register a function as an endpoint.
Example::
@app.endpoint('example.endpoint')
def example():
return "example"
:param endpoint: the name of the endpoint
"""
def decorator(f):
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def errorhandler(self, code_or_exception):
"""A decorator that is used to register a function give a given
error code. Example::
@app.errorhandler(404)
def page_not_found(error):
return 'This page does not exist', 404
You can also register handlers for arbitrary exceptions::
@app.errorhandler(DatabaseError)
def special_exception_handler(error):
return 'Database connection failed', 500
You can also register a function as error handler without using
the :meth:`errorhandler` decorator. The following example is
equivalent to the one above::
def page_not_found(error):
return 'This page does not exist', 404
app.error_handler_spec[None][404] = page_not_found
Setting error handlers via assignments to :attr:`error_handler_spec`
however is discouraged as it requires fiddling with nested dictionaries
and the special case for arbitrary exception types.
The first `None` refers to the active blueprint. If the error
handler should be application wide `None` shall be used.
.. versionadded:: 0.7
One can now additionally also register custom exception types
that do not necessarily have to be a subclass of the
:class:`~werkzeug.exceptions.HTTPException` class.
:param code: the code as integer for the handler
"""
def decorator(f):
self._register_error_handler(None, code_or_exception, f)
return f
return decorator
def register_error_handler(self, code_or_exception, f):
"""Alternative error attach function to the :meth:`errorhandler`
decorator that is more straightforward to use for non decorator
usage.
.. versionadded:: 0.7
"""
self._register_error_handler(None, code_or_exception, f)
@setupmethod
def _register_error_handler(self, key, code_or_exception, f):
if isinstance(code_or_exception, HTTPException):
code_or_exception = code_or_exception.code
if isinstance(code_or_exception, integer_types):
assert code_or_exception != 500 or key is None, \
'It is currently not possible to register a 500 internal ' \
'server error on a per-blueprint level.'
self.error_handler_spec.setdefault(key, {})[code_or_exception] = f
else:
self.error_handler_spec.setdefault(key, {}).setdefault(None, []) \
.append((code_or_exception, f))
@setupmethod
def template_filter(self, name=None):
"""A decorator that is used to register custom template filter.
You can specify a name for the filter, otherwise the function
name will be used. Example::
@app.template_filter()
def reverse(s):
return s[::-1]
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(self, f, name=None):
"""Register a custom template filter. Works exactly like the
:meth:`template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
self.jinja_env.filters[name or f.__name__] = f
@setupmethod
def template_test(self, name=None):
"""A decorator that is used to register custom template test.
You can specify a name for the test, otherwise the function
name will be used. Example::
@app.template_test()
def is_prime(n):
if n == 2:
return True
for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
if n % i == 0:
return False
return True
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(self, f, name=None):
"""Register a custom template test. Works exactly like the
:meth:`template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
self.jinja_env.tests[name or f.__name__] = f
@setupmethod
def template_global(self, name=None):
"""A decorator that is used to register a custom template global function.
You can specify a name for the global function, otherwise the function
name will be used. Example::
@app.template_global()
def double(n):
return 2 * n
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
def decorator(f):
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(self, f, name=None):
"""Register a custom template global function. Works exactly like the
:meth:`template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global function, otherwise the
function name will be used.
"""
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def before_request(self, f):
"""Registers a function to run before each request."""
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def before_first_request(self, f):
"""Registers a function to be run before the first request to this
instance of the application.
.. versionadded:: 0.8
"""
self.before_first_request_funcs.append(f)
return f
@setupmethod
def after_request(self, f):
"""Register a function to be run after each request. Your function
must take one parameter, a :attr:`response_class` object and return
a new response object or the same (see :meth:`process_response`).
As of Flask 0.7 this function might not be executed at the end of the
request in case an unhandled exception occurred.
"""
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f):
"""Register a function to be run at the end of each request,
regardless of whether there was an exception or not. These functions
are executed when the request context is popped, even if not an
actual request was performed.
Example::
ctx = app.test_request_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the request context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Generally teardown functions must take every necessary step to avoid
that they will fail. If they do execute code that might fail they
will have to surround the execution of these code by try/except
statements and log occurring errors.
When a teardown function was called because of a exception it will
be passed an error object.
.. admonition:: Debug Note
In debug mode Flask will not tear down a request on an exception
immediately. Instead if will keep it alive so that the interactive
debugger can still access it. This behavior can be controlled
by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable.
"""
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_appcontext(self, f):
"""Registers a function to be called when the application context
ends. These functions are typically also called when the request
context is popped.
Example::
ctx = app.app_context()
ctx.push()
...
ctx.pop()
When ``ctx.pop()`` is executed in the above example, the teardown
functions are called just before the app context moves from the
stack of active contexts. This becomes relevant if you are using
such constructs in tests.
Since a request context typically also manages an application
context it would also be called when you pop a request context.
When a teardown function was called because of an exception it will
be passed an error object.
.. versionadded:: 0.9
"""
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def context_processor(self, f):
"""Registers a template context processor function."""
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for all view
functions of the application. It's called before the view functions
are called and can modify the url values provided.
"""
self.url_value_preprocessors.setdefault(None, []).append(f)
return f
@setupmethod
def url_defaults(self, f):
"""Callback function for URL defaults for all view functions of the
application. It's called with the endpoint and values and should
update the values passed in place.
"""
self.url_default_functions.setdefault(None, []).append(f)
return f
def handle_http_exception(self, e):
"""Handles an HTTP exception. By default this will invoke the
registered error handlers and fall back to returning the
exception as response.
.. versionadded:: 0.3
"""
handlers = self.error_handler_spec.get(request.blueprint)
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
if handlers and e.code in handlers:
handler = handlers[e.code]
else:
handler = self.error_handler_spec[None].get(e.code)
if handler is None:
return e
return handler(e)
def trap_http_exception(self, e):
"""Checks if an HTTP exception should be trapped or not. By default
this will return `False` for all exceptions except for a bad request
key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to `True`. It
also returns `True` if ``TRAP_HTTP_EXCEPTIONS`` is set to `True`.
This is called for all HTTP exceptions raised by a view function.
If it returns `True` for any exception the error handler for this
exception is not called and it shows up as regular exception in the
traceback. This is helpful for debugging implicitly raised HTTP
exceptions.
.. versionadded:: 0.8
"""
if self.config['TRAP_HTTP_EXCEPTIONS']:
return True
if self.config['TRAP_BAD_REQUEST_ERRORS']:
return isinstance(e, BadRequest)
return False
def handle_user_exception(self, e):
"""This method is called whenever an exception occurs that should be
handled. A special case are
:class:`~werkzeug.exception.HTTPException`\s which are forwarded by
this function to the :meth:`handle_http_exception` method. This
function will either return a response value or reraise the
exception with the same traceback.
.. versionadded:: 0.7
"""
exc_type, exc_value, tb = sys.exc_info()
assert exc_value is e
# ensure not to trash sys.exc_info() at that point in case someone
# wants the traceback preserved in handle_http_exception. Of course
# we cannot prevent users from trashing it themselves in a custom
# trap_http_exception method so that's their fault then.
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(e)
blueprint_handlers = ()
handlers = self.error_handler_spec.get(request.blueprint)
if handlers is not None:
blueprint_handlers = handlers.get(None, ())
app_handlers = self.error_handler_spec[None].get(None, ())
for typecheck, handler in chain(blueprint_handlers, app_handlers):
if isinstance(e, typecheck):
return handler(e)
reraise(exc_type, exc_value, tb)
def handle_exception(self, e):
"""Default exception handling that kicks in when an exception
occurs that is not caught. In debug mode the exception will
be re-raised immediately, otherwise it is logged and the handler
for a 500 internal server error is used. If no such handler
exists, a default 500 internal server error message is displayed.
.. versionadded:: 0.3
"""
exc_type, exc_value, tb = sys.exc_info()
got_request_exception.send(self, exception=e)
handler = self.error_handler_spec[None].get(500)
if self.propagate_exceptions:
# if we want to repropagate the exception, we can attempt to
# raise it with the whole traceback in case we can do that
# (the function was actually called from the except part)
# otherwise, we just raise the error again
if exc_value is e:
reraise(exc_type, exc_value, tb)
else:
raise e
self.log_exception((exc_type, exc_value, tb))
if handler is None:
return InternalServerError()
return handler(e)
def log_exception(self, exc_info):
"""Logs an exception. This is called by :meth:`handle_exception`
if debugging is disabled and right before the handler is called.
The default implementation logs the exception as error on the
:attr:`logger`.
.. versionadded:: 0.8
"""
self.logger.error('Exception on %s [%s]' % (
request.path,
request.method
), exc_info=exc_info)
def raise_routing_exception(self, request):
"""Exceptions that are recording during routing are reraised with
this method. During debug we are not reraising redirect requests
for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising
a different error instead to help debug situations.
:internal:
"""
if not self.debug \
or not isinstance(request.routing_exception, RequestRedirect) \
or request.method in ('GET', 'HEAD', 'OPTIONS'):
raise request.routing_exception
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def dispatch_request(self):
"""Does the request dispatching. Matches the URL and returns the
return value of the view or error handler. This does not have to
be a response object. In order to convert the return value to a
proper response object, call :func:`make_response`.
.. versionchanged:: 0.7
This no longer does the exception handling, this code was
moved to the new :meth:`full_dispatch_request`.
"""
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
return self.view_functions[rule.endpoint](**req.view_args)
def full_dispatch_request(self):
"""Dispatches the request and on top of that performs request
pre and postprocessing as well as HTTP exception catching and
error handling.
.. versionadded:: 0.7
"""
self.try_trigger_before_first_request_functions()
try:
request_started.send(self)
rv = self.preprocess_request()
if rv is None:
rv = self.dispatch_request()
except Exception as e:
rv = self.handle_user_exception(e)
response = self.make_response(rv)
response = self.process_response(response)
request_finished.send(self, response=response)
return response
def try_trigger_before_first_request_functions(self):
"""Called before each request and will ensure that it triggers
the :attr:`before_first_request_funcs` and only exactly once per
application instance (which means process usually).
:internal:
"""
if self._got_first_request:
return
with self._before_request_lock:
if self._got_first_request:
return
for func in self.before_first_request_funcs:
func()
self._got_first_request = True
def make_default_options_response(self):
"""This method is called to create the default `OPTIONS` response.
This can be changed through subclassing to change the default
behavior of `OPTIONS` responses.
.. versionadded:: 0.7
"""
adapter = _request_ctx_stack.top.url_adapter
if hasattr(adapter, 'allowed_methods'):
methods = adapter.allowed_methods()
else:
# fallback for Werkzeug < 0.7
methods = []
try:
adapter.match(method='--')
except MethodNotAllowed as e:
methods = e.valid_methods
except HTTPException as e:
pass
rv = self.response_class()
rv.allow.update(methods)
return rv
def should_ignore_error(self, error):
"""This is called to figure out if an error should be ignored
or not as far as the teardown system is concerned. If this
function returns `True` then the teardown handlers will not be
passed the error.
.. versionadded:: 0.10
"""
return False
def make_response(self, rv):
"""Converts the return value from a view function to a real
response object that is an instance of :attr:`response_class`.
The following types are allowed for `rv`:
.. tabularcolumns:: |p{3.5cm}|p{9.5cm}|
======================= ===========================================
:attr:`response_class` the object is returned unchanged
:class:`str` a response object is created with the
string as body
:class:`unicode` a response object is created with the
string encoded to utf-8 as body
a WSGI function the function is called as WSGI application
and buffered as response object
:class:`tuple` A tuple in the form ``(response, status,
headers)`` or ``(response, headers)``
where `response` is any of the
types defined here, `status` is a string
or an integer and `headers` is a list or
a dictionary with header values.
======================= ===========================================
:param rv: the return value from the view function
.. versionchanged:: 0.9
Previously a tuple was interpreted as the arguments for the
response object.
"""
status_or_headers = headers = None
if isinstance(rv, tuple):
rv, status_or_headers, headers = rv + (None,) * (3 - len(rv))
if rv is None:
raise ValueError('View function did not return a response')
if isinstance(status_or_headers, (dict, list)):
headers, status_or_headers = status_or_headers, None
if not isinstance(rv, self.response_class):
# When we create a response object directly, we let the constructor
# set the headers and status. We do this because there can be
# some extra logic involved when creating these objects with
# specific values (like default content type selection).
if isinstance(rv, (text_type, bytes, bytearray)):
rv = self.response_class(rv, headers=headers, status=status_or_headers)
headers = status_or_headers = None
else:
rv = self.response_class.force_type(rv, request.environ)
if status_or_headers is not None:
if isinstance(status_or_headers, string_types):
rv.status = status_or_headers
else:
rv.status_code = status_or_headers
if headers:
rv.headers.extend(headers)
return rv
def create_url_adapter(self, request):
"""Creates a URL adapter for the given request. The URL adapter
is created at a point where the request context is not yet set up
so the request is passed explicitly.
.. versionadded:: 0.6
.. versionchanged:: 0.9
This can now also be called without a request object when the
URL adapter is created for the application context.
"""
if request is not None:
return self.url_map.bind_to_environ(request.environ,
server_name=self.config['SERVER_NAME'])
# We need at the very least the server name to be set for this
# to work.
if self.config['SERVER_NAME'] is not None:
return self.url_map.bind(
self.config['SERVER_NAME'],
script_name=self.config['APPLICATION_ROOT'] or '/',
url_scheme=self.config['PREFERRED_URL_SCHEME'])
def inject_url_defaults(self, endpoint, values):
"""Injects the URL defaults for the given endpoint directly into
the values dictionary passed. This is used internally and
automatically called on URL building.
.. versionadded:: 0.7
"""
funcs = self.url_default_functions.get(None, ())
if '.' in endpoint:
bp = endpoint.rsplit('.', 1)[0]
funcs = chain(funcs, self.url_default_functions.get(bp, ()))
for func in funcs:
func(endpoint, values)
def handle_url_build_error(self, error, endpoint, values):
"""Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`.
"""
exc_type, exc_value, tb = sys.exc_info()
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
if rv is not None:
return rv
except BuildError as error:
pass
# At this point we want to reraise the exception. If the error is
# still the same one we can reraise it with the original traceback,
# otherwise we raise it from here.
if error is exc_value:
reraise(exc_type, exc_value, tb)
raise error
def preprocess_request(self):
"""Called before the actual request dispatching and will
call every as :meth:`before_request` decorated function.
If any of these function returns a value it's handled as
if it was the return value from the view and further
request handling is stopped.
This also triggers the :meth:`url_value_processor` functions before
the actual :meth:`before_request` functions are called.
"""
bp = _request_ctx_stack.top.request.blueprint
funcs = self.url_value_preprocessors.get(None, ())
if bp is not None and bp in self.url_value_preprocessors:
funcs = chain(funcs, self.url_value_preprocessors[bp])
for func in funcs:
func(request.endpoint, request.view_args)
funcs = self.before_request_funcs.get(None, ())
if bp is not None and bp in self.before_request_funcs:
funcs = chain(funcs, self.before_request_funcs[bp])
for func in funcs:
rv = func()
if rv is not None:
return rv
def process_response(self, response):
"""Can be overridden in order to modify the response object
before it's sent to the WSGI server. By default this will
call all the :meth:`after_request` decorated functions.
.. versionchanged:: 0.5
As of Flask 0.5 the functions registered for after request
execution are called in reverse order of registration.
:param response: a :attr:`response_class` object.
:return: a new response object or the same, has to be an
instance of :attr:`response_class`.
"""
ctx = _request_ctx_stack.top
bp = ctx.request.blueprint
funcs = ctx._after_request_functions
if bp is not None and bp in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[bp]))
if None in self.after_request_funcs:
funcs = chain(funcs, reversed(self.after_request_funcs[None]))
for handler in funcs:
response = handler(response)
if not self.session_interface.is_null_session(ctx.session):
self.save_session(ctx.session, response)
return response
def do_teardown_request(self, exc=None):
"""Called after the actual request dispatching and will
call every as :meth:`teardown_request` decorated function. This is
not actually called by the :class:`Flask` object itself but is always
triggered when the request context is popped. That way we have a
tighter control over certain resources under testing environments.
.. versionchanged:: 0.9
Added the `exc` argument. Previously this was always using the
current exception information.
"""
if exc is None:
exc = sys.exc_info()[1]
funcs = reversed(self.teardown_request_funcs.get(None, ()))
bp = _request_ctx_stack.top.request.blueprint
if bp is not None and bp in self.teardown_request_funcs:
funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))
for func in funcs:
func(exc)
request_tearing_down.send(self, exc=exc)
def do_teardown_appcontext(self, exc=None):
"""Called when an application context is popped. This works pretty
much the same as :meth:`do_teardown_request` but for the application
context.
.. versionadded:: 0.9
"""
if exc is None:
exc = sys.exc_info()[1]
for func in reversed(self.teardown_appcontext_funcs):
func(exc)
appcontext_tearing_down.send(self, exc=exc)
def app_context(self):
"""Binds the application only. For as long as the application is bound
to the current context the :data:`flask.current_app` points to that
application. An application context is automatically created when a
request context is pushed if necessary.
Example usage::
with app.app_context():
...
.. versionadded:: 0.9
"""
return AppContext(self)
def request_context(self, environ):
"""Creates a :class:`~flask.ctx.RequestContext` from the given
environment and binds it to the current context. This must be used in
combination with the `with` statement because the request is only bound
to the current context for the duration of the `with` block.
Example usage::
with app.request_context(environ):
do_something_with(request)
The object returned can also be used without the `with` statement
which is useful for working in the shell. The example above is
doing exactly the same as this code::
ctx = app.request_context(environ)
ctx.push()
try:
do_something_with(request)
finally:
ctx.pop()
.. versionchanged:: 0.3
Added support for non-with statement usage and `with` statement
is now passed the ctx object.
:param environ: a WSGI environment
"""
return RequestContext(self, environ)
def test_request_context(self, *args, **kwargs):
"""Creates a WSGI environment from the given values (see
:class:`werkzeug.test.EnvironBuilder` for more information, this
function accepts the same arguments).
"""
from flask.testing import make_test_environ_builder
builder = make_test_environ_builder(self, *args, **kwargs)
try:
return self.request_context(builder.get_environ())
finally:
builder.close()
def wsgi_app(self, environ, start_response):
"""The actual WSGI application. This is not implemented in
`__call__` so that middlewares can be applied without losing a
reference to the class. So instead of doing this::
app = MyMiddleware(app)
It's a better idea to do this instead::
app.wsgi_app = MyMiddleware(app.wsgi_app)
Then you still have the original application object around and
can continue to call methods on it.
.. versionchanged:: 0.7
The behavior of the before and after request callbacks was changed
under error conditions and a new callback was added that will
always execute at the end of the request, independent on if an
error occurred or not. See :ref:`callbacks-and-errors`.
:param environ: a WSGI environment
:param start_response: a callable accepting a status code,
a list of headers and an optional
exception context to start the response
"""
ctx = self.request_context(environ)
ctx.push()
error = None
try:
try:
response = self.full_dispatch_request()
except Exception as e:
error = e
response = self.make_response(self.handle_exception(e))
return response(environ, start_response)
finally:
if self.should_ignore_error(error):
error = None
ctx.auto_pop(error)
@property
def modules(self):
from warnings import warn
warn(DeprecationWarning('Flask.modules is deprecated, use '
'Flask.blueprints instead'), stacklevel=2)
return self.blueprints
def __call__(self, environ, start_response):
"""Shortcut for :attr:`wsgi_app`."""
return self.wsgi_app(environ, start_response)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.name,
)
| bsd-3-clause |
FRBs/FRB | frb/galaxies/defs.py | 1 | 5973 | """ Define allowed quantities for FRB galaxies
Uncertainty is valid for any quantity with '_err' add-on, eg. W1_err
Am also likely to add _flg for each as well
"""
##############################################################
# Redshift
valid_z = [
'z', # Preferred redshift, may derived from one of several ways
'z_phot', # Photometric redshift
'z_spec', # Spectroscopic redshift
'z_FRB', # FRB redshift
]
##############################################################
# Error Ellipse
valid_e = [
'a', # Major axis
'b', # Minor axis
'theta', # Rotation of the major axis E from N (deg)
'cl', # Confidence level of the ellipse
]
##############################################################
# Photometry
# Filters
valid_filters = []
# SDSS
SDSS_bands = ['u', 'g', 'r', 'i', 'z']
for band in SDSS_bands:
valid_filters.append('SDSS_{:s}'.format(band))
# DES
DES_bands = ['g', 'r', 'i', 'z', 'Y']
for band in DES_bands:
valid_filters.append('DES_{:s}'.format(band))
# DECaLS
DECaL_bands = ['g', 'r', 'z']
for band in DECaL_bands:
valid_filters.append('DECaL_{:s}'.format(band))
#PanSTARRS
PanSTARRS_bands = ['g','r','i','z','y']
for band in PanSTARRS_bands:
valid_filters.append('Pan-STARRS_{:s}'.format(band))
# VLT
VLT_bands = ['u', 'g', 'I', 'z']
for band in VLT_bands:
valid_filters.append('VLT_FORS2_{:s}'.format(band))
# GMOS
#south
GMOS_bands = ['u', 'g', 'r', 'i', 'z']
for band in GMOS_bands:
valid_filters.append('GMOS_S_{:s}'.format(band))
#north
for band in GMOS_bands:
valid_filters.append('GMOS_N_{:s}'.format(band))
#NOT
NOT_bands = ['u', 'g','r','i','z']
for band in NOT_bands:
valid_filters.append('NOT_{:s}'.format(band))
#NIRI
NIRI_bands = ['J']
for band in NIRI_bands:
valid_filters.append('NIRI_{:s}'.format(band))
#LRIS
LRISb_bands = ['U', 'G', 'V', 'B']
for band in LRISb_bands:
valid_filters.append('LRISb_{:s}'.format(band))
LRISr_bands = ['V', 'R', 'I']
for band in LRISr_bands:
valid_filters.append('LRISr_{:s}'.format(band))
# VISTA (VIRCAM)
VISTA_bands = ['Y','J','H','Ks']
for band in VISTA_bands:
valid_filters.append('VISTA_{:s}'.format(band))
#MMT
MMIRS_bands = ['J','H','K']
for band in MMIRS_bands:
valid_filters.append('MMIRS_{:s}'.format(band))
#2MASS
MASS_bands = ['J','H','K']
for band in MASS_bands:
valid_filters.append('2MASS_{:s}'.format(band))
# HST instruments
# WFC3
WFC3_bands = ['F300X', 'F110W', 'F160W', 'F763M']
for band in WFC3_bands:
valid_filters.append('WFC3_{:s}'.format(band))
# WISE
WISE_bands = ['W1', 'W2', 'W3', 'W4']
for band in WISE_bands:
valid_filters.append('WISE_{:s}'.format(band))
# Spitzer
Spitzer_bands = ['3.6', '4.5']
for band in Spitzer_bands:
valid_filters.append('Spitzer_{:s}'.format(band))
valid_flux = [entry+'_flux' for entry in valid_filters]
valid_ref = [entry+'_ref' for entry in valid_filters]
valid_photom = valid_filters + ['EBV'] # Galactic
##############################################################
# Line measurements -- Use linetools naming only!!!
valid_neb_lines = [
'Halpha', # Halpha flux erg/s/cm^2; pPXF
'Hbeta', # Hbeta flux erg/s/cm^2; pPXF
'Hgamma', # Hgamma flux erg/s/cm^2; pPXF
'[NII] 6548', # [NII] 6584 flux erg/s/cm^2;
'[NII] 6584', # [NII] 6584 flux erg/s/cm^2; pPXF
'[OII] 3726', # [OII] flux erg/s/cm^2; pPXF
'[OII] 3729', # [OII] flux erg/s/cm^2; pPXF
'[OIII] 4959', # [OII] 4959 flux erg/s/cm^2;
'[OIII] 5007', # [OII] 5007 flux erg/s/cm^2; pPXF
]
valid_neb_ref = [entry+'_ref' for entry in valid_neb_lines]
##############################################################
# Morphology
valid_morphology = [
'reff_ang', # Effective radius in arcsec; Galfit
'reff_kpc', # Effective radius in kpc; Galfit
'n', # Sersic index; Galfit
'PA', # Position angle (deg); Galfit
'b/a', # Ellipticity; Galfit
'ra', # RA centroid inferred from Galfit
'dec', # DEC centroid inferred from Galfit
'n', # Sersic index from Galfit
]
##############################################################
# Offsets
valid_offsets = [
'ang_best', # Angular offset in arcsec from localization centroid to galaxy
'ang_avg', # Angular offset in arcsec averaging over localization
'physical', # Physical offset in kpc; Uses ang_best
]
##############################################################
# Positional (Astrometric and Source) Errors
valid_positional_error = [
'ra_astrometric', # error for astrometric tie in RA; arcsec
'dec_astrometric', # error for astrometric tie in Dec; arcsec
'ra_source', # RA error for source position (e.g. from source extractor); arcsec
'dec_source', # Dec error for source position; arcsec
]
##############################################################
# Derived quantities
valid_derived_photom = [
'Mstar', # Stellar mass; linear in Msun CIGALE
'Mstar_spec', # Stellar mass from pPXF; linear in Msun
'f_AGN', # Fraction of AGN contribution to light; CIGALE
'u-r', # Rest-frame; CIGALE
'Lnu_r', # Specific luminosity (J/s/Hz); CIGALE; cosmology dependent
'M_r', # Absolute magnitude, r-band rest-frame; CIGALE+
'age_mass', # Age weighted mass from CIGALE
'SFR_photom', # SFR in Msun/yr from photometry; CIGALE
'EBV_photom', # E(B-V) from photometry; CIGALE
'EBV_spec', # E(B-V) from spectral SED; pPXF
'Z_photom', # Metallicity from photometry; CIGALE
'Z_spec', # Metallicity from spectra; pPXF
]
valid_derived_nebular = [
'AV_nebular', # AV from nebular line analysis (e.g. Ha/Hb)
'SFR_nebular', # SFR in Msun/yr from nebular emission (e.g. Halpha); pPXF+
]
valid_derived = valid_derived_photom + valid_derived_nebular
| bsd-3-clause |
pombredanne/moksha | moksha.wsgi/moksha/wsgi/middleware/middleware.py | 2 | 13408 | # This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Luke Macken <lmacken@redhat.com>
import os
import moksha.common.utils
import logging
import pkg_resources
import warnings
import types
from collections import defaultdict
from paste.deploy.converters import asbool
from inspect import isclass
from sqlalchemy import create_engine
from moksha.common.exc import MokshaException
from moksha.common.lib.helpers import get_moksha_config_path
from moksha.common.lib.helpers import appconfig
log = logging.getLogger(__name__)
# A list of all the entry points
APPS = 'moksha.application'
WIDGETS = 'moksha.widget'
ROOT = 'moksha.root'
MENUS = 'moksha.menu'
class MokshaMiddleware(object):
"""
A layer of WSGI middleware that is responsible for setting up the moksha
environment, as well as handling every request/response in the application.
If a request for an application comes in (/apps/$NAME), it will dispatch to
the WSGI Application or RootController of that application as defined in
it's egg-info entry-points.
This middleware also sets up the `moksha.livewidgets` StackedObjectProxy,
which acts as a registry for Moksha LiveWidget topic callbacks.
"""
def __init__(self, application, config):
log.info('Creating Moksha Middleware')
self.application = application
self.config = config
moksha.common.utils._apps = {}
moksha.common.utils._widgets = {} # {'widget name': tw.api.Widget}
moksha.common.utils.menus = {} # {'menu name': moksha.api.menus.MokshaMenu}
self.engines = {} # {'app name': sqlalchemy.engine.base.Engine}
self.load_paths()
self.load_configs()
self.load_widgets()
self.load_applications()
self.load_models()
self.load_menus()
self.load_root()
def __call__(self, environ, start_response):
self.register_livewidgets(environ)
return self.application(environ, start_response)
def register_livewidgets(self, environ):
""" Register the `moksha.livewidgets` dictionary.
This is a per-request StackedObjectProxy that is used by the
LiveWidgets to register their own topic callbacks. The Moksha Live
Socket then handles subscribing widgets to their appropriate topics,
decoding the incoming JSON data, and dispatching messages to them as
they arrive.
"""
environ['paste.registry'].register(moksha.wsgi.lib.utils.livewidgets, {
'onopen': [],
'onclose': [],
'onerror': [],
'onerrorframe': [],
'onconnectedframe': [],
'onmessageframe': defaultdict(list) # {topic: [js_callback,]}
})
def load_paths(self):
""" Load the names and paths of all moksha applications and widgets.
We must do this before actually loading the widgets or applications, to
ensure that we parse and load each of their configuration files
beforehand.
"""
for app_entry in pkg_resources.iter_entry_points(APPS):
if app_entry.name in moksha.common.utils._apps:
raise MokshaException('Duplicate application name: %s' %
app_entry.name)
app_path = app_entry.dist.location
moksha.common.utils._apps[app_entry.name] = {
'name': app_entry.name,
'project_name': app_entry.dist.project_name,
'path': app_path,
}
for widget_entry in pkg_resources.iter_entry_points(WIDGETS):
if widget_entry.name in moksha.common.utils._widgets:
raise MokshaException('Duplicate widget name: %s' %
widget_entry.name)
widget_path = widget_entry.dist.location
moksha.common.utils._widgets[widget_entry.name] = {
'name': widget_entry.name,
'project_name': widget_entry.dist.project_name,
'path': widget_path,
}
def load_applications(self):
log.info('Loading moksha applications')
for app_entry in pkg_resources.iter_entry_points(APPS):
log.info('Loading %s application' % app_entry.name)
app_class = app_entry.load()
app_path = app_entry.dist.location
app_name = getattr(app_class, 'name', app_entry.name)
if isclass(app_class):
app_class = app_class()
moksha.common.utils._apps[app_entry.name].update({
'name': app_name,
'controller': app_class,
'path': app_path,
'model': None,
})
try:
# Try to import the 'model' module alongside its 'controllers'
module = '.'.join(app_class.__module__.split('.')[:-2] +
['model'])
model = __import__(module, globals(), locals(),
[app_entry.name])
moksha.common.utils._apps[app_entry.name]['model'] = model
except ImportError, e:
log.debug("Cannot find application model: %r" % module)
def load_widgets(self):
""" Load widgets from entry points. """
log.info('Loading moksha widgets')
import tw2.core.widgets
from moksha.wsgi.widgets.api.live import LiveWidgetMeta
def is_live(widget):
return isinstance(widget, LiveWidgetMeta)
for widget_entry in pkg_resources.iter_entry_points(WIDGETS):
log.info('Loading %s widget' % widget_entry.name)
widget_class = widget_entry.load()
if isinstance(widget_class, types.FunctionType):
widget_class = widget_class(config=self.config)
widget_path = widget_entry.dist.location
moksha.common.utils._widgets[widget_entry.name] = {
'name': getattr(widget_class, 'name', widget_entry.name),
'widget': widget_class,
'path': widget_path,
'live': is_live(widget_class),
}
def load_menus(self):
log.info('Loading moksha menus')
for menu_entry in pkg_resources.iter_entry_points(MENUS):
log.info('Loading %s menu' % menu_entry.name)
menu_class = menu_entry.load()
menu_path = menu_entry.dist.location
moksha.common.utils.menus[menu_entry.name] = menu_class(menu_entry.name)
def load_configs(self):
""" Load the configuration files for all applications.
Here we iterate over all applications, loading their configuration
files and merging their [DEFAULT] configuration into ours. This
requires that applications do not have conflicting configuration
variable names. To mitigate this, applications should use some basic
variable namespacing, such as `myapp.myvariable = myvalue`.
We first make sure to load up Moksha's configuration, for the cases
where it is being run as WSGI middleware in a different environment.
"""
apps = []
loaded_configs = []
conf_d = '/etc/moksha/conf.d/%s/'
moksha_config_path = get_moksha_config_path()
if moksha_config_path:
moksha_config_path = os.path.dirname(moksha_config_path)
apps = [{'path': moksha_config_path}]
apps += moksha.common.utils._apps.values()
for app in apps:
for configfile in ('production.ini', 'development.ini'):
for path in (app['path'], conf_d % app.get('project_name')):
confpath = os.path.join(path, configfile)
if os.path.exists(confpath):
conf = appconfig('config:' + confpath)
if app.get('name'):
moksha.common.utils._apps[app['name']]['config'] = conf
if confpath in loaded_configs:
continue
log.info('Loading configuration: %s' % confpath)
# This is leftover from the days of using paste.deploy.appconfig. Is anything
# using this?
# for entry in conf.global_conf:
# if entry.startswith('_'):
# continue
# if entry in config:
# log.warning('Conflicting variable: %s' % entry)
# continue
# else:
# config[entry] = conf.global_conf[entry]
# log.debug('Set `%s` in global config' % entry)
loaded_configs.append(confpath)
break
def load_models(self):
""" Setup the SQLAlchemy database models for all moksha applications.
This method first looks to see if your application has a
``sqlalchemy.url`` set in it's configuration file, and will create a
SQLAlchemy engine with it. If it does not exist, Moksha will create an
engine for your application based on the ``app_db`` configuration,
which defaults to ``sqlite:///$APPNAME.db``.
It will then bind the engine to your model's
:class:`sqlalchemy.MetaData`, and initialize all of your tables,
if they don't already exist.
"""
for name, app in moksha.common.utils._apps.items():
sa_url = app.get('config', {}).get('sqlalchemy.url', None)
app_db = self.config.get('app_db', 'sqlite:///%s.db')
if sa_url:
if app['config']['__file__'] == get_moksha_config_path():
# Moksha's apps don't specify their own SA url
self.engines[name] = create_engine(app_db % name)
else:
# App has specified its own engine url
self.engines[name] = create_engine(sa_url)
# If a `model` module exists in the application, call it's
# `init_model` method,and bind the engine to it's `metadata`.
if app.get('model'):
if not sa_url:
self.engines[name] = create_engine(app_db % name)
log.debug('Creating database engine for %s' % app['name'])
app['model'].init_model(self.engines[name])
app['model'].metadata.create_all(bind=self.engines[name])
def load_root(self):
""" Load the root controller.
This allows developers to configure Moksha to directly hit their
TurboGears controller or WSGI app. You can also have the root of your
website be a single widget.
This is an example entry-point in your setup.py/pavement.py::
[moksha.root]
root = myproject.controllers.root:RootController
"""
root = None
for root_entry in pkg_resources.iter_entry_points(ROOT):
log.info('Loading the root of the project: %r' %
root_entry.dist.project_name)
if root_entry.name == 'root':
root_class = root_entry.load()
moksha.common.utils.root = root_class
# TODO: support setting a root widget
#if issubclass(root_class, Widget):
# widget = root_class(root_class.__name__)
# moksha.common.utils._widgets[root_entry.name] = {
# 'name': getattr(root_class, 'name', widget_entry.name),
# 'widget': widget,
# 'path': root_entry.dist.location,
# }
# TODO: handle root wsgi apps
else:
log.error('Ignoring [moksha.root] entry %r')
log.error('Please expose at most 1 object on this entry-point,'
' named "root".')
def make_moksha_middleware(app, config):
if asbool(config.get('moksha.connectors', False)):
raise NotImplementedError(
"moksha.connectors has moved to fedora-community"
)
if asbool(config.get('moksha.extensionpoints', True)):
from moksha.wsgi.middleware import MokshaExtensionPointMiddleware
app = MokshaExtensionPointMiddleware(app, config)
app = MokshaMiddleware(app, config)
if asbool(config.get('moksha.csrf_protection', False)):
raise NotImplementedError(
"moksha.csrf_protection has been moved to python-fedora")
if asbool(config.get('moksha.registry', True)):
from paste.registry import RegistryManager
app = RegistryManager(app)
return app
| apache-2.0 |
rajatsingla28/electron | script/cpplint.py | 7 | 2665 | #!/usr/bin/env python
import fnmatch
import os
import sys
from lib.util import execute
IGNORE_FILES = [
os.path.join('atom', 'browser', 'mac', 'atom_application.h'),
os.path.join('atom', 'browser', 'mac', 'atom_application_delegate.h'),
os.path.join('atom', 'browser', 'resources', 'win', 'resource.h'),
os.path.join('atom', 'browser', 'ui', 'cocoa', 'atom_menu_controller.h'),
os.path.join('atom', 'browser', 'ui', 'cocoa', 'atom_touch_bar.h'),
os.path.join('atom', 'browser', 'ui', 'cocoa',
'touch_bar_forward_declarations.h'),
os.path.join('atom', 'common', 'api', 'api_messages.h'),
os.path.join('atom', 'common', 'common_message_generator.cc'),
os.path.join('atom', 'common', 'common_message_generator.h'),
os.path.join('brightray', 'browser', 'mac',
'bry_inspectable_web_contents_view.h'),
os.path.join('brightray', 'browser', 'mac', 'event_dispatching_window.h'),
os.path.join('brightray', 'browser', 'mac',
'notification_center_delegate.h'),
os.path.join('brightray', 'browser', 'win', 'notification_presenter_win7.h'),
os.path.join('brightray', 'browser', 'win', 'win32_desktop_notifications',
'common.h'),
os.path.join('brightray', 'browser', 'win', 'win32_desktop_notifications',
'desktop_notification_controller.cc'),
os.path.join('brightray', 'browser', 'win', 'win32_desktop_notifications',
'desktop_notification_controller.h'),
os.path.join('brightray', 'browser', 'win', 'win32_desktop_notifications',
'toast.h'),
os.path.join('brightray', 'browser', 'win', 'win32_notification.h')
]
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def main():
os.chdir(SOURCE_ROOT)
atom_files = list_files('atom',
['app', 'browser', 'common', 'renderer', 'utility'],
['*.cc', '*.h'])
call_cpplint(list(set(atom_files) - set(IGNORE_FILES)))
brightray_files = list_files('brightray', ['browser', 'common'],
['*.cc', '*.h'])
call_cpplint(list(set(brightray_files) - set(IGNORE_FILES)))
def list_files(parent, directories, filters):
matches = []
for directory in directories:
for root, _, filenames, in os.walk(os.path.join(parent, directory)):
for f in filters:
for filename in fnmatch.filter(filenames, f):
matches.append(os.path.join(root, filename))
return matches
def call_cpplint(files):
cpplint = os.path.join(SOURCE_ROOT, 'vendor', 'depot_tools', 'cpplint.py')
execute([sys.executable, cpplint] + files)
if __name__ == '__main__':
sys.exit(main())
| mit |
tkurfurst/deep-learning | face_generation/Project 5 Submission - Face Generation/problem_unittests.py | 159 | 6094 | from copy import deepcopy
from unittest import mock
import tensorflow as tf
def test_safe(func):
"""
Isolate tests
"""
def func_wrapper(*args):
with tf.Graph().as_default():
result = func(*args)
print('Tests Passed')
return result
return func_wrapper
def _assert_tensor_shape(tensor, shape, display_name):
assert tf.assert_rank(tensor, len(shape), message='{} has wrong rank'.format(display_name))
tensor_shape = tensor.get_shape().as_list() if len(shape) else []
wrong_dimension = [ten_dim for ten_dim, cor_dim in zip(tensor_shape, shape)
if cor_dim is not None and ten_dim != cor_dim]
assert not wrong_dimension, \
'{} has wrong shape. Found {}'.format(display_name, tensor_shape)
def _check_input(tensor, shape, display_name, tf_name=None):
assert tensor.op.type == 'Placeholder', \
'{} is not a Placeholder.'.format(display_name)
_assert_tensor_shape(tensor, shape, 'Real Input')
if tf_name:
assert tensor.name == tf_name, \
'{} has bad name. Found name {}'.format(display_name, tensor.name)
class TmpMock():
"""
Mock a attribute. Restore attribute when exiting scope.
"""
def __init__(self, module, attrib_name):
self.original_attrib = deepcopy(getattr(module, attrib_name))
setattr(module, attrib_name, mock.MagicMock())
self.module = module
self.attrib_name = attrib_name
def __enter__(self):
return getattr(self.module, self.attrib_name)
def __exit__(self, type, value, traceback):
setattr(self.module, self.attrib_name, self.original_attrib)
@test_safe
def test_model_inputs(model_inputs):
image_width = 28
image_height = 28
image_channels = 3
z_dim = 100
input_real, input_z, learn_rate = model_inputs(image_width, image_height, image_channels, z_dim)
_check_input(input_real, [None, image_width, image_height, image_channels], 'Real Input')
_check_input(input_z, [None, z_dim], 'Z Input')
_check_input(learn_rate, [], 'Learning Rate')
@test_safe
def test_discriminator(discriminator, tf_module):
with TmpMock(tf_module, 'variable_scope') as mock_variable_scope:
image = tf.placeholder(tf.float32, [None, 28, 28, 3])
output, logits = discriminator(image)
_assert_tensor_shape(output, [None, 1], 'Discriminator Training(reuse=false) output')
_assert_tensor_shape(logits, [None, 1], 'Discriminator Training(reuse=false) Logits')
assert mock_variable_scope.called,\
'tf.variable_scope not called in Discriminator Training(reuse=false)'
assert mock_variable_scope.call_args == mock.call('discriminator', reuse=False), \
'tf.variable_scope called with wrong arguments in Discriminator Training(reuse=false)'
mock_variable_scope.reset_mock()
output_reuse, logits_reuse = discriminator(image, True)
_assert_tensor_shape(output_reuse, [None, 1], 'Discriminator Inference(reuse=True) output')
_assert_tensor_shape(logits_reuse, [None, 1], 'Discriminator Inference(reuse=True) Logits')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Discriminator Inference(reuse=True)'
assert mock_variable_scope.call_args == mock.call('discriminator', reuse=True), \
'tf.variable_scope called with wrong arguments in Discriminator Inference(reuse=True)'
@test_safe
def test_generator(generator, tf_module):
with TmpMock(tf_module, 'variable_scope') as mock_variable_scope:
z = tf.placeholder(tf.float32, [None, 100])
out_channel_dim = 5
output = generator(z, out_channel_dim)
_assert_tensor_shape(output, [None, 28, 28, out_channel_dim], 'Generator output (is_train=True)')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Generator Training(reuse=false)'
assert mock_variable_scope.call_args == mock.call('generator', reuse=False), \
'tf.variable_scope called with wrong arguments in Generator Training(reuse=false)'
mock_variable_scope.reset_mock()
output = generator(z, out_channel_dim, False)
_assert_tensor_shape(output, [None, 28, 28, out_channel_dim], 'Generator output (is_train=False)')
assert mock_variable_scope.called, \
'tf.variable_scope not called in Generator Inference(reuse=True)'
assert mock_variable_scope.call_args == mock.call('generator', reuse=True), \
'tf.variable_scope called with wrong arguments in Generator Inference(reuse=True)'
@test_safe
def test_model_loss(model_loss):
out_channel_dim = 4
input_real = tf.placeholder(tf.float32, [None, 28, 28, out_channel_dim])
input_z = tf.placeholder(tf.float32, [None, 100])
d_loss, g_loss = model_loss(input_real, input_z, out_channel_dim)
_assert_tensor_shape(d_loss, [], 'Discriminator Loss')
_assert_tensor_shape(d_loss, [], 'Generator Loss')
@test_safe
def test_model_opt(model_opt, tf_module):
with TmpMock(tf_module, 'trainable_variables') as mock_trainable_variables:
with tf.variable_scope('discriminator'):
discriminator_logits = tf.Variable(tf.zeros([3, 3]))
with tf.variable_scope('generator'):
generator_logits = tf.Variable(tf.zeros([3, 3]))
mock_trainable_variables.return_value = [discriminator_logits, generator_logits]
d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=discriminator_logits,
labels=[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]))
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=generator_logits,
labels=[[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]))
learning_rate = 0.001
beta1 = 0.9
d_train_opt, g_train_opt = model_opt(d_loss, g_loss, learning_rate, beta1)
assert mock_trainable_variables.called,\
'tf.mock_trainable_variables not called'
| mit |
nikdoof/posmaster | posmaster/poscore/models/types.py | 1 | 3205 | from django.conf import settings
from django.db import models
class TypeCategory(models.Model):
id = models.BigIntegerField('Type Category ID', primary_key=True)
name = models.CharField('Type Category Name', max_length=200)
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.name
class TypeGroup(models.Model):
id = models.BigIntegerField('Type Group ID', primary_key=True)
category = models.ForeignKey(TypeCategory, related_name='groups')
name = models.CharField('Type Group Name', max_length=200)
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.name
class Type(models.Model):
"""Represents a EVE InvType"""
id = models.BigIntegerField('Type ID', primary_key=True)
group = models.ForeignKey(TypeGroup, related_name='types')
name = models.CharField('Type Name', max_length=200)
capacity = models.BigIntegerField('Capacity')
@property
def image(self):
return '%s/Type/%s_%s.png' % (getattr(settings, 'EVE_IMAGESERVER_URL', 'https://image.eveonline.com'), self.pk, getattr(settings, 'EVE_IMAGESERVER_TYPESIZE', 64))
def render(self, size):
if size % 32:
raise ValueError('Size isn\'t a multiple of 32')
if size > 512:
raise ValueError('Size is too large (max 512px)')
return '%s/Render/%s_%s.png' % (getattr(settings, 'EVE_IMAGESERVER_URL', 'https://image.eveonline.com'), self.pk, size)
@property
def attributes_list(self):
return [(attr.attribute.display_name or attr.attribute.name, attr.get_value_display()) for attr in self.attributes.all()]
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.name
class UnitType(models.Model):
id = models.BigIntegerField('Unit ID', primary_key=True)
name = models.CharField('Unit Name', max_length=200)
display_name = models.CharField('Display Name', max_length=200)
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.name
class AttributeType(models.Model):
id = models.BigIntegerField('Attribute ID', primary_key=True)
name = models.CharField('Attribute Name', max_length=200)
display_name = models.CharField('Display Name', max_length=200)
unit = models.ForeignKey(UnitType, related_name='+', null=True)
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.name
class TypeAttribute(models.Model):
type = models.ForeignKey(Type, related_name='attributes')
attribute = models.ForeignKey(AttributeType, related_name='+')
valueint = models.BigIntegerField('Int Value', null=True)
valuefloat = models.FloatField('Float Value', null=True)
@property
def value(self):
return self.valuefloat or self.valueint
def get_value_display(self):
if self.attribute.unit:
return u'%d%s' % (self.value, self.attribute.unit.display_name)
return self.value
class Meta:
app_label = 'poscore'
def __unicode__(self):
return self.attribute.name | bsd-3-clause |
alshedivat/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/dataset_serialization_test_base.py | 8 | 25450 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for testing serializable datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import nest
def remove_variants(get_next_op):
# TODO(b/72408568): Remove this once session.run can get
# variant tensors.
"""Remove variants from a nest structure, so sess.run will execute."""
def _remove_variant(x):
if isinstance(x, ops.Tensor) and x.dtype == dtypes.variant:
return ()
else:
return x
return nest.map_structure(_remove_variant, get_next_op)
class DatasetSerializationTestBase(test.TestCase):
"""Base class for testing serializable datasets."""
def tearDown(self):
self._delete_ckpt()
# TODO(b/72657739): Remove sparse_tensor argument, which is to test the
# (deprecated) saveable `SparseTensorSliceDataset`, once the API
# `from_sparse_tensor_slices()`and related tests are deleted.
def run_core_tests(self, ds_fn1, ds_fn2, num_outputs, sparse_tensors=False):
"""Runs the core tests.
Args:
ds_fn1: 0-argument function that returns a Dataset.
ds_fn2: 0-argument function that returns a Dataset different from
ds_fn1. If None, verify_restore_in_modified_graph test is not run.
num_outputs: Total number of outputs expected from this Dataset.
sparse_tensors: Whether dataset is built from SparseTensor(s).
Raises:
AssertionError if any test fails.
"""
self.verify_unused_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_fully_used_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_exhausted_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_init_before_restore(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_multiple_breaks(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_reset_restored_iterator(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
self.verify_restore_in_empty_graph(
ds_fn1, num_outputs, sparse_tensors=sparse_tensors)
if ds_fn2:
self.verify_restore_in_modified_graph(
ds_fn1, ds_fn2, num_outputs, sparse_tensors=sparse_tensors)
def verify_unused_iterator(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that saving and restoring an unused iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn, [0],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_fully_used_iterator(self, ds_fn, num_outputs,
sparse_tensors=False):
"""Verifies that saving and restoring a fully used iterator works.
Note that this only checks saving and restoring an iterator from which
`num_outputs` items have been produced but does not check for an
exhausted iterator, i.e., one from which an OutOfRange error has been
returned.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if test fails.
"""
self.verify_run_with_breaks(
ds_fn, [num_outputs], num_outputs, sparse_tensors=sparse_tensors)
def verify_exhausted_iterator(self, ds_fn, num_outputs, sparse_tensors=False):
"""Verifies that saving and restoring an exhausted iterator works.
An exhausted iterator is one which has returned an OutOfRange error.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
self.gen_outputs(
ds_fn, [],
num_outputs,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
actual = self.gen_outputs(
ds_fn, [],
0,
ckpt_saved=True,
verify_exhausted=True,
sparse_tensors=sparse_tensors)
self.assertEqual(len(actual), 0)
def verify_init_before_restore(self,
ds_fn,
num_outputs,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that restoring into an already initialized iterator works.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs),
num_outputs,
init_before_restore=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_multiple_breaks(self,
ds_fn,
num_outputs,
num_breaks=10,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to save/restore at multiple break points.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
num_breaks: The number of break points. These are uniformly spread in
[0, num_outputs] both inclusive.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
self.verify_run_with_breaks(
ds_fn,
self.gen_break_points(num_outputs, num_breaks),
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
def verify_reset_restored_iterator(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to re-initialize a restored iterator.
This is useful when restoring a training checkpoint during validation.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Collect ground truth containing all outputs.
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Skip some items and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Restore from checkpoint and then run init_op.
with ops.Graph().as_default() as g:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
self._initialize(init_op, sess)
for _ in range(num_outputs):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_modified_graph(self,
ds_fn1,
ds_fn2,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in a modified graph.
Builds an input pipeline using ds_fn1, runs it for `break_point` steps
and saves a checkpoint. Then builds a new graph using ds_fn2, restores
the checkpoint from ds_fn1 and verifies that the restore is successful.
Args:
ds_fn1: See `run_core_tests`.
ds_fn2: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn1
# in `expected`.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn1, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn1 and save checkpoint.
self.gen_outputs(
ds_fn1, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build graph for ds_fn2 but load checkpoint for ds_fn1.
with ops.Graph().as_default() as g:
_, get_next_op, saver = self._build_graph(
ds_fn2, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_restore_in_empty_graph(self,
ds_fn,
num_outputs,
break_point=None,
sparse_tensors=False,
verify_exhausted=True):
"""Attempts to restore an iterator in an empty graph.
Builds an input pipeline using ds_fn, runs it for `break_point` steps
and saves a checkpoint. Then builds a new empty graph, restores
the checkpoint from ds_fn and verifies that the restore is successful.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
# Skip `break_point` items and store the remaining produced from ds_fn
# in `expected`.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
expected = self.gen_outputs(
ds_fn, [],
num_outputs - break_point,
ckpt_saved=True,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
# Generate `break_point` items from ds_fn and save checkpoint.
self.gen_outputs(
ds_fn, [],
break_point,
sparse_tensors=sparse_tensors,
verify_exhausted=False)
actual = []
# Build an empty graph but load checkpoint for ds_fn.
with ops.Graph().as_default() as g:
get_next_op, saver = self._build_empty_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._restore(saver, sess)
for _ in range(num_outputs - break_point):
actual.append(sess.run(get_next_op))
if verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
self.match(expected, actual)
def verify_error_on_save(self,
ds_fn,
num_outputs,
error,
break_point=None,
sparse_tensors=False):
"""Attempts to save a non-saveable iterator.
Args:
ds_fn: See `run_core_tests`.
num_outputs: See `run_core_tests`.
error: Declared error when trying to save iterator.
break_point: Break point. Optional. Defaults to num_outputs/2.
sparse_tensors: See `run_core_tests`.
Raises:
AssertionError if any test fails.
"""
break_point = num_outputs // 2 if not break_point else break_point
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
self._initialize(init_op, sess)
for _ in range(break_point):
sess.run(get_next_op)
with self.assertRaises(error):
self._save(sess, saver)
def verify_run_with_breaks(self,
ds_fn,
break_points,
num_outputs,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True):
"""Verifies that ds_fn() produces the same outputs with and without breaks.
1. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
*without* stopping at break points.
2. Builds a Dataset using `ds_fn` and produces `num_outputs` items from it
with stopping at break points.
Deep matches outputs from 1 and 2.
Args:
ds_fn: See `gen_outputs`.
break_points: See `gen_outputs`.
num_outputs: See `gen_outputs`.
init_before_restore: See `gen_outputs`.
sparse_tensors: See `run_core_tests`.
verify_exhausted: See `gen_outputs`.
Raises:
AssertionError if any test fails.
"""
expected = self.gen_outputs(
ds_fn, [],
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
actual = self.gen_outputs(
ds_fn,
break_points,
num_outputs,
init_before_restore=init_before_restore,
sparse_tensors=sparse_tensors,
verify_exhausted=verify_exhausted)
self.match(expected, actual)
def gen_outputs(self,
ds_fn,
break_points,
num_outputs,
ckpt_saved=False,
init_before_restore=False,
sparse_tensors=False,
verify_exhausted=True,
save_checkpoint_at_end=True):
"""Generates elements from input dataset while stopping at break points.
Produces `num_outputs` outputs and saves the state of the iterator in the
Saver checkpoint.
Args:
ds_fn: 0-argument function that returns the dataset.
break_points: A list of integers. For each `break_point` in
`break_points`, we produce outputs till `break_point` number of items
have been produced and then checkpoint the state. The current graph
and session are destroyed and a new graph and session are used to
produce outputs till next checkpoint or till `num_outputs` elements
have been produced. `break_point` must be <= `num_outputs`.
num_outputs: The total number of outputs to produce from the iterator.
ckpt_saved: Whether a checkpoint already exists. If False, we build the
graph from ds_fn.
init_before_restore: Whether init should be called before saver.restore.
This is just so that we can verify that restoring an already initialized
iterator works.
sparse_tensors: Whether dataset is built from SparseTensor(s).
verify_exhausted: Whether to verify that the iterator has been exhausted
after producing `num_outputs` elements.
save_checkpoint_at_end: Whether to save a checkpoint after producing all
outputs. If False, checkpoints are saved each break point but not at the
end. Note that checkpoints overwrite each other so there is always only
a single checkpoint available. Defaults to True.
Returns:
A list of `num_outputs` items.
"""
outputs = []
def get_ops():
if ckpt_saved:
saver = self._import_meta_graph()
init_op, get_next_op = self._get_iterator_ops_from_collection(
ds_fn, sparse_tensors=sparse_tensors)
else:
init_op, get_next_op, saver = self._build_graph(
ds_fn, sparse_tensors=sparse_tensors)
return init_op, get_next_op, saver
for i in range(len(break_points) + 1):
with ops.Graph().as_default() as g:
init_op, get_next_op, saver = get_ops()
get_next_op = remove_variants(get_next_op)
with self.session(graph=g) as sess:
if ckpt_saved:
if init_before_restore:
self._initialize(init_op, sess)
self._restore(saver, sess)
else:
self._initialize(init_op, sess)
start = break_points[i - 1] if i > 0 else 0
end = break_points[i] if i < len(break_points) else num_outputs
num_iters = end - start
for _ in range(num_iters):
outputs.append(sess.run(get_next_op))
if i == len(break_points) and verify_exhausted:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
if save_checkpoint_at_end or i < len(break_points):
self._save(sess, saver)
ckpt_saved = True
return outputs
def match(self, expected, actual):
"""Matches nested structures.
Recursively matches shape and values of `expected` and `actual`.
Handles scalars, numpy arrays and other python sequence containers
e.g. list, dict.
Args:
expected: Nested structure 1.
actual: Nested structure 2.
Raises:
AssertionError if matching fails.
"""
if isinstance(expected, np.ndarray):
expected = expected.tolist()
if isinstance(actual, np.ndarray):
actual = actual.tolist()
self.assertEqual(type(expected), type(actual))
if nest.is_sequence(expected):
self.assertEqual(len(expected), len(actual))
if isinstance(expected, dict):
for key1, key2 in zip(sorted(expected), sorted(actual)):
self.assertEqual(key1, key2)
self.match(expected[key1], actual[key2])
else:
for item1, item2 in zip(expected, actual):
self.match(item1, item2)
else:
self.assertEqual(expected, actual)
def does_not_match(self, expected, actual):
with self.assertRaises(AssertionError):
self.match(expected, actual)
def gen_break_points(self, num_outputs, num_samples=10):
"""Generates `num_samples` breaks points in [0, num_outputs]."""
return np.linspace(0, num_outputs, num_samples, dtype=int)
def _build_graph(self, ds_fn, sparse_tensors=False):
iterator = ds_fn().make_initializable_iterator()
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
init_op = iterator.initializer
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
self._add_iterator_ops_to_collection(init_op, get_next, ds_fn,
sparse_tensors)
saver = saver_lib.Saver(allow_empty=True)
return init_op, get_next, saver
def _build_empty_graph(self, ds_fn, sparse_tensors=False):
iterator = iterator_ops.Iterator.from_structure(
self._get_output_types(ds_fn),
output_shapes=self._get_output_shapes(ds_fn),
output_classes=self._get_output_classes(ds_fn))
saveable = contrib_iterator_ops.make_saveable_from_iterator(iterator)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
if sparse_tensors:
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
else:
get_next = iterator.get_next()
saver = saver_lib.Saver(allow_empty=True)
return get_next, saver
def _add_iterator_ops_to_collection(self,
init_op,
get_next,
ds_fn,
sparse_tensors=False):
ops.add_to_collection("iterator_ops", init_op)
# `get_next` may be a tuple e.g. in TensorSliceDataset. Since Collections
# do not support tuples we flatten the tensors and restore the shape in
# `_get_iterator_ops_from_collection`.
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
ops.add_to_collection("iterator_ops", get_next.indices)
ops.add_to_collection("iterator_ops", get_next.values)
ops.add_to_collection("iterator_ops", get_next.dense_shape)
return
get_next_list = nest.flatten(get_next)
for i, output_class in enumerate(
nest.flatten(self._get_output_classes(ds_fn))):
if output_class is sparse_tensor.SparseTensor:
ops.add_to_collection("iterator_ops", get_next_list[i].indices)
ops.add_to_collection("iterator_ops", get_next_list[i].values)
ops.add_to_collection("iterator_ops", get_next_list[i].dense_shape)
else:
ops.add_to_collection("iterator_ops", get_next_list[i])
def _get_iterator_ops_from_collection(self, ds_fn, sparse_tensors=False):
all_ops = ops.get_collection("iterator_ops")
if sparse_tensors: # specific for deprecated `from_sparse_tensor_slices`.
init_op, indices, values, dense_shape = all_ops
return init_op, sparse_tensor.SparseTensor(indices, values, dense_shape)
get_next_list = []
i = 1
for output_class in nest.flatten(self._get_output_classes(ds_fn)):
if output_class is sparse_tensor.SparseTensor:
indices, values, dense_shape = all_ops[i:i + 3]
i += 3
get_next_list.append(
sparse_tensor.SparseTensor(indices, values, dense_shape))
else:
get_next_list.append(all_ops[i])
i += 1
return all_ops[0], nest.pack_sequence_as(
self._get_output_types(ds_fn), get_next_list)
def _get_output_types(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_types
def _get_output_shapes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_shapes
def _get_output_classes(self, ds_fn):
with ops.Graph().as_default():
return ds_fn().output_classes
def _ckpt_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _latest_ckpt(self):
return checkpoint_management.latest_checkpoint(self.get_temp_dir())
def _save(self, sess, saver):
saver.save(sess, self._ckpt_path())
def _restore(self, saver, sess):
sess.run(lookup_ops.tables_initializer())
saver.restore(sess, self._latest_ckpt())
def _initialize(self, init_op, sess):
sess.run(variables.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
sess.run(init_op)
def _import_meta_graph(self):
meta_file_path = self._ckpt_path() + ".meta"
return saver_lib.import_meta_graph(meta_file_path)
def _delete_ckpt(self):
# Remove all checkpoint files.
prefix = self._ckpt_path()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
| apache-2.0 |
Cadair/ginga | ginga/misc/tests/test_Callback.py | 4 | 13315 | #
# Unit Tests for the Callbacks class
#
# Rajul Srivastava (rajul09@gmail.com)
#
import unittest
import logging
import numpy as np
import ginga.misc.Callback as Callback
class TestError(Exception):
pass
class TestCallbacks(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
test_callbacks = Callback.Callbacks()
assert isinstance(test_callbacks.cb, dict)
expected = 0
actual = len(test_callbacks.cb)
assert expected == actual
expected = {}
actual = test_callbacks.cb
assert expected == actual
def test_clear_callback_empties_list(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
test_callbacks.clear_callback("test_name")
expected = 0
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
def test_clear_callback_nonexistent_name(self):
test_callbacks = Callback.Callbacks()
assert "unknown_callback_key" not in test_callbacks.cb
test_callbacks.clear_callback("unknown_callback_key")
assert "unknown_callback_key" in test_callbacks.cb
assert isinstance(test_callbacks.cb["unknown_callback_key"], list)
expected = 0
actual = len(test_callbacks.cb["unknown_callback_key"])
assert expected == actual
def test_enable_callback_nonexistent_name(self):
test_callbacks = Callback.Callbacks()
assert "unknown_callback_key" not in test_callbacks.cb
test_callbacks.enable_callback("unknown_callback_key")
assert "unknown_callback_key" in test_callbacks.cb
assert isinstance(test_callbacks.cb["unknown_callback_key"], list)
expected = 0
actual = len(test_callbacks.cb["unknown_callback_key"])
assert expected == actual
def test_enable_callback_already_existent_name(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
assert "test_name" in test_callbacks.cb
assert isinstance(test_callbacks.cb["test_name"], list)
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
test_callbacks.enable_callback("test_name")
# testing that enable_callback() causes no change
assert "test_name" in test_callbacks.cb
assert isinstance(test_callbacks.cb["test_name"], list)
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
def test_has_callback_existent_name(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
expected = True
actual = test_callbacks.has_callback("test_name")
assert expected == actual
def test_has_callback_non_existent_name(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
expected = False
actual = test_callbacks.has_callback("non_existent_name")
assert expected == actual
def test_has_callback_non_existent_name_empty_dict(self):
test_callbacks = Callback.Callbacks()
expected = False
actual = test_callbacks.has_callback("non_existent_name")
assert expected == actual
def test_delete_callback_existent_name(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
assert "test_name" in test_callbacks.cb
test_callbacks.delete_callback("test_name")
assert "test_name" not in test_callbacks.cb
def test_delete_callback_non_existent_name(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
pass
test_callbacks.cb["test_name"] = [(test_callbacks, (), {}), ]
self.assertRaises(
Callback.CallbackError,
test_callbacks.delete_callback,
"non_existent_name"
)
def test_delete_callback_non_existent_name_empty_dict(self):
test_callbacks = Callback.Callbacks()
self.assertRaises(
Callback.CallbackError,
test_callbacks.delete_callback,
"non_existent_name"
)
def test_add_callback(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.enable_callback("test_name")
assert "test_name" in test_callbacks.cb
test_callbacks.add_callback("test_name", test_callback_function)
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (test_callback_function, (), {})
actual = test_callbacks.cb["test_name"][0]
assert expected == actual
def another_test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.add_callback(
"test_name", another_test_callback_function)
expected = 2
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (another_test_callback_function, (), {})
actual = test_callbacks.cb["test_name"][1]
assert expected == actual
def test_add_callback_arguments(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.enable_callback("test_name")
assert "test_name" in test_callbacks.cb
test_callbacks.add_callback(
"test_name",
test_callback_function,
'test_arg_1',
'test_arg_2',
test_keyword_arg1="test",
test_keyword_arg2="test"
)
assert "test_name" in test_callbacks.cb
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (
test_callback_function,
('test_arg_1', 'test_arg_2'),
{'test_keyword_arg1': 'test', 'test_keyword_arg2': 'test'}
)
actual = test_callbacks.cb["test_name"][0]
assert expected == actual
def test_add_callback_exception(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
pass
self.assertRaises(
Callback.CallbackError,
test_callbacks.add_callback,
"test_name",
test_callback_function
)
def test_set_callback(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.set_callback("test_name", test_callback_function)
assert "test_name" in test_callbacks.cb
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (test_callback_function, (), {})
actual = test_callbacks.cb["test_name"][0]
assert expected == actual
def another_test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.set_callback(
"test_name", another_test_callback_function)
expected = 2
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (another_test_callback_function, (), {})
actual = test_callbacks.cb["test_name"][1]
assert expected == actual
def test_set_callback_arguments(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
pass
test_callbacks.set_callback(
"test_name",
test_callback_function,
'test_arg_1',
'test_arg_2',
test_keyword_arg1="test",
test_keyword_arg2="test"
)
assert "test_name" in test_callbacks.cb
expected = 1
actual = len(test_callbacks.cb["test_name"])
assert expected == actual
expected = (
test_callback_function,
('test_arg_1', 'test_arg_2'),
{'test_keyword_arg1': 'test', 'test_keyword_arg2': 'test'}
)
actual = test_callbacks.cb["test_name"][0]
assert expected == actual
def test_make_callback_non_existent_name(self):
test_callbacks = Callback.Callbacks()
expected = None
actual = test_callbacks.make_callback("non_existent_event_name")
assert expected == actual
def test_make_callback_empty_callback_list(self):
test_callbacks = Callback.Callbacks()
test_callbacks.enable_callback("known_name")
assert "known_name" in test_callbacks.cb
expected = False
actual = test_callbacks.make_callback("known_name")
assert expected == actual
def test_make_callback_single_callback_true(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
return True
test_callbacks.set_callback("test_name", test_callback_function)
expected = True
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_single_callback_false(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
return False
test_callbacks.set_callback("test_name", test_callback_function)
expected = False
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_multiple_callback_all_true(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
return True
def another_test_callback_function(obj, *args, **kwargs):
return True
test_callbacks.set_callback("test_name", test_callback_function)
test_callbacks.set_callback(
"test_name", another_test_callback_function)
expected = True
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_multiple_callback_some_true(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
return False
def another_test_callback_function(obj, *args, **kwargs):
return True
test_callbacks.set_callback("test_name", test_callback_function)
test_callbacks.set_callback(
"test_name", another_test_callback_function)
expected = True
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_multiple_callback_all_false(self):
test_callbacks = Callback.Callbacks()
def test_callback_function(obj, *args, **kwargs):
return False
def another_test_callback_function(obj, *args, **kwargs):
return False
test_callbacks.set_callback("test_name", test_callback_function)
test_callbacks.set_callback(
"test_name", another_test_callback_function)
expected = False
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_raises_no_exception(self):
test_callbacks = Callback.Callbacks()
# This function when used as a callback should raise a TypeError
# as the callbacks, from the logic in ginga.misc.Callback.Callbacks
# always take the calling object as the first argument
def test_callback_function():
return True
test_callbacks.set_callback("test_name", test_callback_function)
# Checking that the callback eats up the TypeError exception
expected = False
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def test_make_callback_raises_no_exception_completes_all_callbacks(self):
test_callbacks = Callback.Callbacks()
def test_callback_function():
return True
def another_test_callback_function(obj, *args, **kwargs):
return True
test_callbacks.set_callback("test_name", test_callback_function)
test_callbacks.set_callback(
"test_name", another_test_callback_function)
# Checking that the callback eats up the TypeError exception and
# continues to the other callback and returns True in the end
expected = True
actual = test_callbacks.make_callback("test_name")
assert expected == actual
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
# END
| bsd-3-clause |
jayansenaratne/aston | aston/trace/PyMS.py | 3 | 3123 | import numpy as np
from pyms.GCMS.Class import GCMS_data, Scan, IonChromatogram
class AstonPyMS(GCMS_data):
"""
Adapter to allow pyms routines to run on aston files.
Note: initialization uses AstonFrame, not scan lists
as in pyms.
"""
def __init__(self, data):
self.data = data
def __len__(self):
return self.data.shape[0]
def get_min_mass(self):
return min(self.data.columns)
def get_max_mass(self):
return max(self.data.columns)
def get_index_at_time(self, time):
time *= 60.0
return np.argmin(np.abs(self.data.index - time))
def get_time_list(self):
return (self.data.index * 60.0).tolist()
@property
def scan_list(self):
return self._scan_list
@property
def _scan_list(self):
for scan in self.data.scans():
yield Scan(scan.x.tolist(), scan.abn.tolist())
def get_scan_list(self):
return list(self._scan_list)
def get_tic(self):
return IonChromatogram(self.data.trace().values.T[0], \
(self.data.index * 60.0).tolist())
def trim(self, begin=None, end=None):
if begin is None and end is None:
return
if begin is None:
st_idx = 0
elif isinstance(begin, int):
st_idx = begin
else:
st_idx = self.get_index_at_time(float(begin)) + 1
if begin is None:
en_idx = 0
elif isinstance(end, int):
en_idx = end
else:
st_idx = self.get_index_at_time(float(end)) + 1
self.data = self.data[st_idx:en_idx]
def info(self, print_scan_n=False):
print(" Data retention time range: %.3f min -- %.3f min" % \
(min(self.data.index), max(self.data.index)))
tdiffs = np.diff(self.data.index)
print(" Time step: %.3f s (std=%.3f s)" % \
(np.mean(tdiffs), np.std(tdiffs)))
print(" Number of scans: %d" % len(self))
print(" Minimum m/z measured: %.3f" % self.get_min_mass())
print(" Maximum m/z measured: %.3f" % self.get_max_mass())
dfc = self.data.values.copy()
dfc[dfc.nonzero()] = 1
dfc = dfc.sum(axis=1)
print(" Mean number of m/z values per scan: %d" % np.mean(dfc))
print(" Median number of m/z values per scan: %d" % np.median(dfc))
def write(self, file_root):
f1name, f2name = file_root + '.I.csv', file_root + '.mz.csv'
with open(f1name, 'w') as f1, open(f2name, 'w') as f2:
for scan in self._scan_list:
i_list = scan.get_intensity_list()
f1.write(','.join('%.4f' % v for v in i_list))
f1.write('\n')
m_list = scan.get_mass_list()
f2.write(','.join('%.4f' % v for v in m_list))
f2.write('\n')
def write_intensities_stream(self, file_name):
with open(file_name, 'w') as f:
for scan in self._scan_list:
for i in scan.get_intensity_list():
f.write('%8.4f\n' % i)
| gpl-3.0 |
FuturesFromYesterday/ardupilot | mk/PX4/Tools/genmsg/scripts/genmsg_check_deps.py | 216 | 2999 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import os
import sys
from genmsg import EXT_MSG, EXT_SRV, MsgContext
from genmsg.gentools import compute_full_type_name
from genmsg.msg_loader import load_msg_from_file, load_srv_from_file
from genmsg.msgs import bare_msg_type, is_builtin, resolve_type
pkg_name = sys.argv[1]
msg_file = sys.argv[2]
deps = sys.argv[3].split(':') if len(sys.argv) > 3 else []
msg_context = MsgContext.create_default()
full_type_name = compute_full_type_name(pkg_name, os.path.basename(msg_file))
if msg_file.endswith(EXT_MSG):
spec = load_msg_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.types
elif msg_file.endswith(EXT_SRV):
spec = load_srv_from_file(msg_context, msg_file, full_type_name)
unresolved_types = spec.request.types + spec.response.types
else:
print("Processing file: '%s' - unknown file extension" % msg_file, file=sys.stderr)
sys.exit(1)
package_context = spec.package
for unresolved_type in unresolved_types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if not is_builtin(resolved_type) and resolved_type not in deps:
print("The dependencies of the message/service '%s' have changed. Please rerun cmake." % spec.full_name, file=sys.stderr)
sys.exit(1)
| gpl-3.0 |
bboalimoe/ndn-cache-policy | docs/sphinx-contrib/napoleon/setup.py | 1 | 1547 | # -*- coding: utf-8 -*-
# Copyright 2014 Rob Ruana
# Licensed under the BSD License, see LICENSE file for details.
"""Sphinx "napoleon" extension."""
import sys
from setuptools import setup, find_packages
reqs = open('requirements.txt', 'r').read().strip().splitlines()
reqs_test = open('requirements_test.txt', 'r').read().strip().splitlines()
extra = {}
if sys.version_info[0] >= 3:
extra['use_2to3'] = True
extra['use_2to3_on_doctests'] = True
setup(
name='sphinxcontrib-napoleon',
version='0.2.8',
url='https://bitbucket.org/birkenfeld/sphinx-contrib',
download_url='http://pypi.python.org/pypi/sphinxcontrib-napoleon',
license='BSD',
author='Rob Ruana',
author_email='rob@relentlessidiot.com',
description=__doc__,
long_description=open('README.rst', 'r').read(),
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Documentation',
'Topic :: Utilities',
],
platforms='any',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
test_suite='nose.collector',
tests_require=reqs_test,
namespace_packages=['sphinxcontrib'],
**extra
)
| gpl-3.0 |
caveman-dick/ansible | test/units/playbook/test_attribute.py | 119 | 1823 | # (c) 2015, Marius Gedminas <marius@gedmin.as>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from ansible.playbook.attribute import Attribute
class TestAttribute(unittest.TestCase):
def setUp(self):
self.one = Attribute(priority=100)
self.two = Attribute(priority=0)
def test_eq(self):
self.assertTrue(self.one == self.one)
self.assertFalse(self.one == self.two)
def test_ne(self):
self.assertFalse(self.one != self.one)
self.assertTrue(self.one != self.two)
def test_lt(self):
self.assertFalse(self.one < self.one)
self.assertTrue(self.one < self.two)
self.assertFalse(self.two < self.one)
def test_gt(self):
self.assertFalse(self.one > self.one)
self.assertFalse(self.one > self.two)
self.assertTrue(self.two > self.one)
def test_le(self):
self.assertTrue(self.one <= self.one)
self.assertTrue(self.one <= self.two)
self.assertFalse(self.two <= self.one)
def test_ge(self):
self.assertTrue(self.one >= self.one)
self.assertFalse(self.one >= self.two)
self.assertTrue(self.two >= self.one)
| gpl-3.0 |
Johnzero/OE7 | openerp/addons/base/ir/ir_values.py | 23 | 25172 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pickle
from openerp.osv import osv, fields
from openerp.osv.orm import except_orm
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', size=128, required=True),
'model': fields.char('Model Name', size=128, select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', size=128, select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', size=128, select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model,id = action['value'].split(',')
fields = [
field
for field in self.pool.get(action_model)._all_columns
if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = self.pool.get(action_model).read(cr, uid, int(id), fields, context)
if action_def:
if action_model in ('ir.actions.report.xml','ir.actions.act_window',
'ir.actions.wizard'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(groups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise osv.except_osv('Error!',
'You do not have the permission to perform this operation!!!')
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except except_orm:
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
khwon/iTerm2 | tools/ply/ply-3.4/example/unicalc/calc.py | 165 | 2502 | # -----------------------------------------------------------------------------
# calc.py
#
# A simple calculator with variables. This is from O'Reilly's
# "Lex and Yacc", p. 63.
#
# This example uses unicode strings for tokens, docstrings, and input.
# -----------------------------------------------------------------------------
import sys
sys.path.insert(0,"../..")
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = ur'\+'
t_MINUS = ur'-'
t_TIMES = ur'\*'
t_DIVIDE = ur'/'
t_EQUALS = ur'='
t_LPAREN = ur'\('
t_RPAREN = ur'\)'
t_NAME = ur'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
ur'\d+'
try:
t.value = int(t.value)
except ValueError:
print "Integer value too large", t.value
t.value = 0
return t
t_ignore = u" \t"
def t_newline(t):
ur'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(t):
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
lex.lex()
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(p):
'statement : NAME EQUALS expression'
names[p[1]] = p[3]
def p_statement_expr(p):
'statement : expression'
print p[1]
def p_expression_binop(p):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if p[2] == u'+' : p[0] = p[1] + p[3]
elif p[2] == u'-': p[0] = p[1] - p[3]
elif p[2] == u'*': p[0] = p[1] * p[3]
elif p[2] == u'/': p[0] = p[1] / p[3]
def p_expression_uminus(p):
'expression : MINUS expression %prec UMINUS'
p[0] = -p[2]
def p_expression_group(p):
'expression : LPAREN expression RPAREN'
p[0] = p[2]
def p_expression_number(p):
'expression : NUMBER'
p[0] = p[1]
def p_expression_name(p):
'expression : NAME'
try:
p[0] = names[p[1]]
except LookupError:
print "Undefined name '%s'" % p[1]
p[0] = 0
def p_error(p):
if p:
print "Syntax error at '%s'" % p.value
else:
print "Syntax error at EOF"
import ply.yacc as yacc
yacc.yacc()
while 1:
try:
s = raw_input('calc > ')
except EOFError:
break
if not s: continue
yacc.parse(unicode(s))
| gpl-2.0 |
admetricks/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py | 118 | 37734 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
from webkitpy.layout_tests.models.test_expectations import *
from webkitpy.layout_tests.models.test_configuration import *
try:
from collections import OrderedDict
except ImportError:
# Needed for Python < 2.7
from webkitpy.thirdparty.ordered_dict import OrderedDict
class Base(unittest.TestCase):
# Note that all of these tests are written assuming the configuration
# being tested is Windows XP, Release build.
def __init__(self, testFunc):
host = MockHost()
self._port = host.port_factory.get('test-win-xp', None)
self._exp = None
unittest.TestCase.__init__(self, testFunc)
def get_test(self, test_name):
# FIXME: Remove this routine and just reference test names directly.
return test_name
def get_basic_tests(self):
return [self.get_test('failures/expected/text.html'),
self.get_test('failures/expected/image_checksum.html'),
self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/missing_text.html'),
self.get_test('failures/expected/image.html'),
self.get_test('passes/text.html')]
def get_basic_expectations(self):
return """
Bug(test) failures/expected/text.html [ Failure ]
Bug(test) failures/expected/crash.html [ WontFix ]
Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
Bug(test) failures/expected/image_checksum.html [ WontFix ]
Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
self._port.expectations_dict = lambda: expectations_dict
expectations_to_lint = expectations_dict if is_lint_mode else None
self._exp = TestExpectations(self._port, self.get_basic_tests(), expectations_to_lint=expectations_to_lint)
def assert_exp(self, test, result):
self.assertEqual(self._exp.get_expectations(self.get_test(test)),
set([result]))
def assert_bad_expectations(self, expectations, overrides=None):
self.assertRaises(ParseError, self.parse_exp, expectations, is_lint_mode=True, overrides=overrides)
class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/image_checksum.html', PASS)
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([FAIL, CRASH]))
def test_result_was_expected(self):
# test basics
self.assertEqual(TestExpectations.result_was_expected(PASS, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(FAIL, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of SKIPped tests and results
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=True), True)
self.assertEqual(TestExpectations.result_was_expected(SKIP, set([CRASH]), test_needs_rebaselining=False, test_is_skipped=False), False)
# test handling of MISSING results and the REBASELINE modifier
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=True, test_is_skipped=False), True)
self.assertEqual(TestExpectations.result_was_expected(MISSING, set([PASS]), test_needs_rebaselining=False, test_is_skipped=False), False)
def test_remove_pixel_failures(self):
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([IMAGE])), set([PASS]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([FAIL])), set([FAIL]))
self.assertEqual(TestExpectations.remove_pixel_failures(set([PASS, IMAGE, CRASH])), set([PASS, CRASH]))
def test_suffixes_for_expectations(self):
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([IMAGE])), set(['png']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set([FAIL, IMAGE, CRASH])), set(['txt', 'png', 'wav']))
self.assertEqual(TestExpectations.suffixes_for_expectations(set()), set())
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = self.get_test(test_name)
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
self.assert_exp('failures/expected/crash.html', PASS)
def test_get_modifiers(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_modifiers(
self.get_test('passes/text.html')), [])
def test_get_expectations_string(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_expectations_string(
self.get_test('failures/expected/text.html')),
'FAIL')
def test_expectation_to_string(self):
# Normal cases are handled by other tests.
self.parse_exp(self.get_basic_expectations())
self.assertRaises(ValueError, self._exp.expectation_to_string,
-1)
def test_get_test_set(self):
# Handle some corner cases for this routine not covered by other tests.
self.parse_exp(self.get_basic_expectations())
s = self._exp.get_test_set(WONTFIX)
self.assertEqual(s,
set([self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/image_checksum.html')]))
def test_parse_warning(self):
try:
filesystem = self._port.host.filesystem
filesystem.write_text_file(filesystem.join(self._port.layout_tests_dir(), 'disabled-test.html-disabled'), 'content')
self.get_test('disabled-test.html-disabled'),
self.parse_exp("[ FOO ] failures/expected/text.html [ Failure ]\n"
"Bug(rniwa) non-existent-test.html [ Failure ]\n"
"Bug(rniwa) disabled-test.html-disabled [ ImageOnlyFailure ]", is_lint_mode=True)
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
warnings = ("expectations:1 Unrecognized modifier 'foo' failures/expected/text.html\n"
"expectations:2 Path does not exist. non-existent-test.html")
self.assertEqual(str(e), warnings)
def test_parse_warnings_are_logged_if_not_in_lint_mode(self):
oc = OutputCapture()
try:
oc.capture_output()
self.parse_exp('-- this should be a syntax error', is_lint_mode=False)
finally:
_, _, logs = oc.restore_output()
self.assertNotEquals(logs, '')
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp('failures/expected/text.html', IMAGE)
def test_overrides__directory(self):
self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected [ Crash ]")
self.assert_exp('failures/expected/text.html', CRASH)
self.assert_exp('failures/expected/image.html', CRASH)
def test_overrides__duplicate(self):
self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
"Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
"Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
return self._exp.matches_an_expected_result(
self.get_test(test), result, pixel_tests_enabled)
self.parse_exp(self.get_basic_expectations())
self.assertTrue(match('failures/expected/text.html', FAIL, True))
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
False))
self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('passes/text.html', PASS, False))
def test_more_specific_override_resets_skip(self):
self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
"Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
class SkippedTests(Base):
def check(self, expectations, overrides, skips, lint=False):
port = MockHost().port_factory.get('qt')
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = expectations
if overrides:
expectations_dict['overrides'] = overrides
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(skips)
expectations_to_lint = expectations_dict if lint else None
exp = TestExpectations(port, ['failures/expected/text.html'], expectations_to_lint=expectations_to_lint)
# Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
self.assertEqual(exp.get_modifiers('failures/expected/text.html'),
[TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
self.assertEqual(exp.get_expectations('failures/expected/text.html'), set([PASS]))
def test_skipped_tests_work(self):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
def test_duplicate_skipped_test_fails_lint(self):
self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
port = MockHost().port_factory.get('qt')
expectations_dict = OrderedDict()
expectations_dict['expectations'] = ''
port.expectations_dict = lambda: expectations_dict
port.skipped_layout_tests = lambda tests: set(['foo/bar/baz.html'])
capture = OutputCapture()
capture.capture_output()
exp = TestExpectations(port)
_, _, logs = capture.restore_output()
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
class ExpectationSyntaxTests(Base):
def test_unrecognized_expectation(self):
self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
modifiers = modifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = 1
expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEqual(expectation_line.warnings, warnings)
self.assertEqual(expectation_line.name, name)
self.assertEqual(expectation_line.filename, filename)
self.assertEqual(expectation_line.line_number, line_number)
if not warnings:
self.assertEqual(expectation_line.modifiers, modifiers)
self.assertEqual(expectation_line.expectations, expectations)
def test_bare_name(self):
self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
def test_bare_name_and_bugs(self):
self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS'])
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
def test_config_modifiers(self):
self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
def test_unknown_expectation(self):
self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
def test_slow(self):
self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE'])
self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None)
self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.'])
class SemanticTests(Base):
def test_bug_format(self):
self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEqual(len(exp.warnings), 1)
def test_missing_bugid(self):
self.parse_exp('failures/expected/text.html [ Failure ]')
self.assertFalse(self._exp.has_warnings())
self._port.warn_if_bug_missing_in_test_expectations = lambda: True
self.parse_exp('failures/expected/text.html [ Failure ]')
line = self._exp._model.get_expectation_line('failures/expected/text.html')
self.assertFalse(line.is_invalid())
self.assertEqual(line.warnings, ['Test lacks BUG modifier.'])
def test_skip_and_wontfix(self):
# Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
self.assertFalse(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertFalse(self._exp.has_warnings())
def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True)
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
Bug(exp) failures/expected/text.html [ Failure ]
Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
Bug(override) failures/expected/text.html [ Failure ]
Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_missing_file(self):
self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
class PrecedenceTests(Base):
def test_file_over_directory(self):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
Bug(x) failures/expected/text.html [ Failure ]
Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
exp_str = """
Bug(x) failures/expected [ WontFix ]
Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
self.assert_exp('failures/expected/crash.html', PASS)
def test_ambiguous(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_modifiers(self):
self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
"Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
"Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
"Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
def test_remove(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {"expectations": """Bug(x) [ Linux Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port, self.get_basic_tests())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
self.assertEqual("""Bug(x) [ Linux Vista Win7 Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Mac Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
def test_remove_line(self):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
test_port.test_exists = lambda test: True
test_port.test_isfile = lambda test: True
test_config = test_port.test_configuration()
test_port.expectations_dict = lambda: {'expectations': """Bug(x) [ Win Release ] failures/expected/foo.html [ Failure ]
Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
"""}
expectations = TestExpectations(test_port)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
self.assertEqual("""Bug(y) [ Win Debug ] failures/expected/foo.html [ Crash ]
""", actual_expectations)
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
def assertRemove(self, input_expectations, input_overrides, tests, expected_expectations, expected_overrides):
self.parse_exp(input_expectations, is_lint_mode=False, overrides=input_overrides)
actual_expectations = self._exp.remove_rebaselined_tests(tests, 'expectations')
self.assertEqual(expected_expectations, actual_expectations)
actual_overrides = self._exp.remove_rebaselined_tests(tests, 'overrides')
self.assertEqual(expected_overrides, actual_overrides)
def test_remove(self):
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n',
['failures/expected/text.html'],
'Bug(y) failures/expected/image.html [ ImageOnlyFailure Rebaseline ]\n'
'Bug(z) failures/expected/crash.html [ Crash ]\n',
'Bug(x0) failures/expected/image.html [ Crash ]\n')
# Ensure that we don't modify unrelated lines, even if we could rewrite them.
# i.e., the second line doesn't get rewritten to "Bug(y) failures/expected/skip.html"
self.assertRemove('Bug(x) failures/expected/text.html [ Failure Rebaseline ]\n'
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'',
['failures/expected/text.html'],
'Bug(Y) failures/expected/image.html [ Skip ]\n'
'Bug(z) failures/expected/crash.html\n',
'')
def test_get_rebaselining_failures(self):
# Make sure we find a test as needing a rebaseline even if it is not marked as a failure.
self.parse_exp('Bug(x) failures/expected/text.html [ Rebaseline ]\n')
self.assertEqual(len(self._exp.get_rebaselining_failures()), 1)
self.parse_exp(self.get_basic_expectations())
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
self._converter = TestConfigurationConverter(test_port.all_test_configurations(), test_port.configuration_specifier_macros())
unittest.TestCase.__init__(self, testFunc)
def _tokenize(self, line):
return TestExpectationParser._tokenize_line('path', line, 0)
def assert_round_trip(self, in_string, expected_string=None):
expectation = self._tokenize(in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, expectation.to_string(self._converter))
def assert_list_round_trip(self, in_string, expected_string=None):
host = MockHost()
parser = TestExpectationParser(host.port_factory.get('test-win-xp', None), [], allow_rebaseline_modifier=False)
expectations = parser.parse('path', in_string)
if expected_string is None:
expected_string = in_string
self.assertEqual(expected_string, TestExpectations.list_to_string(expectations, self._converter))
def test_unparsed_to_string(self):
expectation = TestExpectationLine()
self.assertEqual(expectation.to_string(self._converter), '')
expectation.comment = ' Qux.'
self.assertEqual(expectation.to_string(self._converter), '# Qux.')
expectation.name = 'bar'
self.assertEqual(expectation.to_string(self._converter), 'bar # Qux.')
expectation.modifiers = ['foo']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar # Qux.')
expectation.expectations = ['bAz']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ ] # Qux.')
expectation.expectations = ['bAz1', 'baZ2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.modifiers = ['foo1', 'foO2']
self.assertEqual(expectation.to_string(self._converter), '[ FOO1 FOO2 ] bar [ BAZ1 BAZ2 ] # Qux.')
expectation.warnings.append('Oh the horror.')
self.assertEqual(expectation.to_string(self._converter), '')
expectation.original_string = 'Yes it is!'
self.assertEqual(expectation.to_string(self._converter), 'Yes it is!')
def test_unparsed_list_to_string(self):
expectation = TestExpectationLine()
expectation.comment = 'Qux.'
expectation.name = 'bar'
expectation.modifiers = ['foo']
expectation.expectations = ['bAz1', 'baZ2']
# FIXME: case should be preserved here but we can't until we drop the old syntax.
self.assertEqual(TestExpectations.list_to_string([expectation]), '[ FOO ] bar [ BAZ1 BAZ2 ] #Qux.')
def test_parsed_to_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(expectation_line.to_string(self._converter), None)
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP Release ] test/name/for/realz.html [ ImageOnlyFailure ]')
expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
self.assertEqual(expectation_line.to_string(self._converter), 'Bug(x) [ XP ] test/name/for/realz.html [ ImageOnlyFailure ]')
def test_serialize_parsed_expectations(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_expectations = set([])
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), '')
expectation_line.parsed_expectations = set([FAIL])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'fail')
expectation_line.parsed_expectations = set([PASS, IMAGE])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass image')
expectation_line.parsed_expectations = set([FAIL, PASS])
self.assertEqual(expectation_line._serialize_parsed_expectations(parsed_expectation_to_string), 'pass fail')
def test_serialize_parsed_modifier_string(self):
expectation_line = TestExpectationLine()
expectation_line.parsed_bug_modifiers = ['garden-o-matic']
expectation_line.parsed_modifiers = ['for', 'the']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), 'garden-o-matic for the')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic for the win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = []
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, []), '')
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'win')
expectation_line.parsed_bug_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
expectation_line.parsed_bug_modifiers = []
expectation_line.parsed_modifiers = ['garden-o-matic', 'total', 'is']
self.assertEqual(expectation_line._serialize_parsed_modifiers(self._converter, ['win']), 'garden-o-matic is total win')
def test_format_line(self):
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], 'comment'), '[ MODIFIERS ] name [ EXPECTATIONS ] #comment')
self.assertEqual(TestExpectationLine._format_line(['MODIFIERS'], 'name', ['EXPECTATIONS'], None), '[ MODIFIERS ] name [ EXPECTATIONS ]')
def test_string_roundtrip(self):
self.assert_round_trip('')
self.assert_round_trip('FOO')
self.assert_round_trip('[')
self.assert_round_trip('FOO [')
self.assert_round_trip('FOO ] bar')
self.assert_round_trip(' FOO [')
self.assert_round_trip(' [ FOO ] ')
self.assert_round_trip('[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux. ')
self.assert_round_trip('[ FOO ] ] ] bar BAZ')
self.assert_round_trip('[ FOO ] ] ] bar [ BAZ ]')
self.assert_round_trip('FOO ] ] bar ==== BAZ')
self.assert_round_trip('=')
self.assert_round_trip('#')
self.assert_round_trip('# ')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo')
self.assert_round_trip('# Foo :')
self.assert_round_trip('# Foo : =')
def test_list_roundtrip(self):
self.assert_list_round_trip('')
self.assert_list_round_trip('\n')
self.assert_list_round_trip('\n\n')
self.assert_list_round_trip('bar')
self.assert_list_round_trip('bar\n# Qux.')
self.assert_list_round_trip('bar\n# Qux.\n')
def test_reconstitute_only_these(self):
lines = []
reconstitute_only_these = []
def add_line(matching_configurations, reconstitute):
expectation_line = TestExpectationLine()
expectation_line.original_string = "Nay"
expectation_line.parsed_bug_modifiers = ['BUGX']
expectation_line.name = 'Yay'
expectation_line.parsed_expectations = set([IMAGE])
expectation_line.matching_configurations = matching_configurations
lines.append(expectation_line)
if reconstitute:
reconstitute_only_these.append(expectation_line)
add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectations.list_to_string(lines, self._converter)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nBug(x) [ XP ] Yay [ ImageOnlyFailure ]")
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
def disabled_test_string_whitespace_stripping(self):
# FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
| bsd-3-clause |
adelina-t/nova | nova/tests/functional/v3/test_flavor_extraspecs.py | 31 | 3237 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.v3 import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class FlavorExtraSpecsSampleJsonTests(api_sample_base.ApiSampleTestBaseV3):
ADMIN_API = True
extension_name = 'flavor-extra-specs'
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(FlavorExtraSpecsSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.flavorextraspecs.'
'Flavorextraspecs')
return f
def _flavor_extra_specs_create(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
response = self._do_post('flavors/1/os-extra_specs',
'flavor-extra-specs-create-req', subs)
self._verify_response('flavor-extra-specs-create-resp',
subs, response, 200)
def test_flavor_extra_specs_get(self):
subs = {'value1': 'value1'}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs/key1')
self._verify_response('flavor-extra-specs-get-resp',
subs, response, 200)
def test_flavor_extra_specs_list(self):
subs = {'value1': 'value1',
'value2': 'value2'
}
self._flavor_extra_specs_create()
response = self._do_get('flavors/1/os-extra_specs')
self._verify_response('flavor-extra-specs-list-resp',
subs, response, 200)
def test_flavor_extra_specs_create(self):
self._flavor_extra_specs_create()
def test_flavor_extra_specs_update(self):
subs = {'value1': 'new_value1'}
self._flavor_extra_specs_create()
response = self._do_put('flavors/1/os-extra_specs/key1',
'flavor-extra-specs-update-req', subs)
self._verify_response('flavor-extra-specs-update-resp',
subs, response, 200)
def test_flavor_extra_specs_delete(self):
self._flavor_extra_specs_create()
response = self._do_delete('flavors/1/os-extra_specs/key1')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '')
| apache-2.0 |
tsokalo/ghn-plc | plc/examples/plc-arq-mac-example.py | 1 | 3889 | # -*- Mode:Python; -*-
# /*
# * Copyright (c) 2010 INRIA
# *
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation;
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# *
# * Authors: Alexander Schloegl <alexander.schloegl@gmx.de>
# */
# Python version of plc-mac-example.cc
import ns.plc
import ns.core
import ns.spectrum
import ns.network
def sendPacket(mac, p, dst):
mac.Send(p,dst)
def receivedACK():
print '*** ACK received ***'
def main(dummy_argv):
## Enable logging
ns.core.LogComponentEnableAll(ns.core.LOG_PREFIX_TIME)
ns.core.LogComponentEnable('PLC_Mac', ns.core.LOG_LEVEL_FUNCTION)
## Enable packet printing
ns.network.Packet.EnablePrinting()
## Define spectrum model
sm = ns.plc.PLC_SpectrumModelHelper().GetSpectrumModel(0,10e6,100)
## Define transmit power spectral density
txPsd = ns.spectrum.SpectrumValue(sm)
txPsd += 1e-8
## Create cable type
cable = ns.plc.PLC_NAYY50SE_Cable(sm)
## Create nodes
n1 = ns.plc.PLC_Node()
n2 = ns.plc.PLC_Node()
n1.SetPosition(0,0,0)
n2.SetPosition(1000,0,0)
n1.SetName('Node1')
n2.SetName('Node2')
nodes = [n1,n2]
## Link nodes
ns.plc.PLC_Line(cable,n1,n2)
## Setup channel
channelHelper = ns.plc.PLC_ChannelHelper()
channelHelper.Install(nodes)
channel = channelHelper.GetChannel()
## Create outlets
o1 = ns.plc.PLC_Outlet(n1)
o2 = ns.plc.PLC_Outlet(n2)
## Create PHYs
phy1 = ns.plc.PLC_InformationRatePhy()
phy2 = ns.plc.PLC_InformationRatePhy()
phy1.CreateInterfaces(o1,txPsd)
phy2.CreateInterfaces(o2,txPsd)
## Set background noise
noiseFloor = ns.plc.PLC_ColoredNoiseFloor(-140,38.75,-0.72,sm).GetNoisePsd()
phy1.SetNoiseFloor(noiseFloor)
phy2.SetNoiseFloor(noiseFloor)
## Set modulation and coding scheme
header_mcs = ns.plc.ModulationAndCodingScheme(ns.plc.BPSK_1_2, 0)
payload_mcs = ns.plc.ModulationAndCodingScheme(ns.plc.QAM64_RATELESS, 0)
phy1.SetHeaderModulationAndCodingScheme(header_mcs)
phy2.SetHeaderModulationAndCodingScheme(header_mcs)
phy1.SetPayloadModulationAndCodingScheme(payload_mcs)
phy2.SetPayloadModulationAndCodingScheme(payload_mcs)
## Aggregate RX-Interfaces to ns3 nodes
phy1.GetRxInterface().AggregateObject(ns.network.Node())
phy2.GetRxInterface().AggregateObject(ns.network.Node())
## Create MAC layers
mac1 = ns.plc.PLC_ArqMac()
mac2 = ns.plc.PLC_ArqMac()
mac1.SetPhy(phy1)
mac2.SetPhy(phy2)
mac1.SetAddress(ns.network.Mac48Address('00:00:00:00:00:01'))
mac2.SetAddress(ns.network.Mac48Address('00:00:00:00:00:02'))
## Set callback function to be called when ACK is received
mac1.SetMacAcknowledgementCallback(receivedACK)
## Calculate channels
channel.InitTransmissionChannels()
channel.CalcTransmissionChannels()
## Create packet to send
p = ns.network.Packet(1024)
## Schedule transmission of packet p from phy0 to phy1 to begin at 1s simulation time
ns.core.Simulator.Schedule(ns.core.Seconds(1), sendPacket, mac1, p, ns.network.Mac48Address('00:00:00:00:00:02'))
## Start simulation
ns.core.Simulator.Run()
## Cleanup simulation
ns.core.Simulator.Destroy()
if __name__ == '__main__':
import sys
main(sys.argv)
| gpl-3.0 |
kejbaly2/invoke | invoke/vendor/yaml3/serializer.py | 293 | 4165 |
__all__ = ['Serializer', 'SerializerError']
from .error import YAMLError
from .events import *
from .nodes import *
class SerializerError(YAMLError):
pass
class Serializer:
ANCHOR_TEMPLATE = 'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| bsd-2-clause |
richardtran415/pymatgen | pymatgen/io/tests/test_cif.py | 5 | 44369 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import DummySpecies, Element, Species
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.electronic_structure.core import Magmom
from pymatgen.io.cif import CifBlock, CifParser, CifWriter
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.util.testing import PymatgenTest
try:
import pybtex
except ImportError:
pybtex = None
class CifBlockTest(PymatgenTest):
def test_to_string(self):
with open(self.TEST_FILES_DIR / "Graphite.cif") as f:
s = f.read()
c = CifBlock.from_string(s)
cif_str_2 = str(CifBlock.from_string(str(c)))
cif_str = """data_53781-ICSD
_database_code_ICSD 53781
_audit_creation_date 2003-04-01
_audit_update_record 2013-02-01
_chemical_name_systematic Carbon
_chemical_formula_structural C
_chemical_formula_sum C1
_chemical_name_structure_type Graphite(2H)
_chemical_name_mineral 'Graphite 2H'
_exptl_crystal_density_diffrn 2.22
_publ_section_title 'Structure of graphite'
loop_
_citation_id
_citation_journal_full
_citation_year
_citation_journal_volume
_citation_page_first
_citation_page_last
_citation_journal_id_ASTM
primary 'Physical Review (1,1893-132,1963/141,1966-188,1969)'
1917 10 661 696 PHRVAO
loop_
_publ_author_name
'Hull, A.W.'
_cell_length_a 2.47
_cell_length_b 2.47
_cell_length_c 6.8
_cell_angle_alpha 90.
_cell_angle_beta 90.
_cell_angle_gamma 120.
_cell_volume 35.93
_cell_formula_units_Z 4
_symmetry_space_group_name_H-M 'P 63/m m c'
_symmetry_Int_Tables_number 194
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, x-y, -z+1/2'
2 '-x+y, y, -z+1/2'
3 '-y, -x, -z+1/2'
4 '-x+y, -x, -z+1/2'
5 '-y, x-y, -z+1/2'
6 'x, y, -z+1/2'
7 '-x, -x+y, z+1/2'
8 'x-y, -y, z+1/2'
9 'y, x, z+1/2'
10 'x-y, x, z+1/2'
11 'y, -x+y, z+1/2'
12 '-x, -y, z+1/2'
13 '-x, -x+y, -z'
14 'x-y, -y, -z'
15 'y, x, -z'
16 'x-y, x, -z'
17 'y, -x+y, -z'
18 '-x, -y, -z'
19 'x, x-y, z'
20 '-x+y, y, z'
21 '-y, -x, z'
22 '-x+y, -x, z'
23 '-y, x-y, z'
24 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
C0+ 0
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_symmetry_multiplicity
_atom_site_Wyckoff_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_B_iso_or_equiv
_atom_site_occupancy
_atom_site_attached_hydrogens
C1 C0+ 2 b 0 0 0.25 . 1. 0
C2 C0+ 2 c 0.3333 0.6667 0.25 . 1. 0"""
for l1, l2, l3 in zip(str(c).split("\n"), cif_str.split("\n"), cif_str_2.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
self.assertEqual(l2.strip(), l3.strip())
def test_double_quotes_and_underscore_data(self):
cif_str = """data_test
_symmetry_space_group_name_H-M "P -3 m 1"
_thing '_annoying_data'"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_symmetry_space_group_name_H-M"], "P -3 m 1")
self.assertEqual(cb["_thing"], "_annoying_data")
self.assertEqual(str(cb), cif_str.replace('"', "'"))
def test_double_quoted_data(self):
cif_str = """data_test
_thing ' '_annoying_data''
_other " "_more_annoying_data""
_more ' "even more" ' """
cb = CifBlock.from_string(cif_str)
self.assertEqual(cb["_thing"], " '_annoying_data'")
self.assertEqual(cb["_other"], ' "_more_annoying_data"')
self.assertEqual(cb["_more"], ' "even more" ')
def test_nested_fake_multiline_quotes(self):
cif_str = """data_test
_thing
;
long quotes
;
still in the quote
;
actually going to end now
;"""
cb = CifBlock.from_string(cif_str)
self.assertEqual(
cb["_thing"],
" long quotes ; still in the quote" " ; actually going to end now",
)
def test_long_loop(self):
data = {
"_stuff1": ["A" * 30] * 2,
"_stuff2": ["B" * 30] * 2,
"_stuff3": ["C" * 30] * 2,
}
loops = [["_stuff1", "_stuff2", "_stuff3"]]
cif_str = """data_test
loop_
_stuff1
_stuff2
_stuff3
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA BBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
CCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"""
self.assertEqual(str(CifBlock(data, loops, "test")), cif_str)
class CifIOTest(PymatgenTest):
def test_CifParser(self):
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li4 Fe4 P4 O16", "Incorrectly parsed cif.")
parser = CifParser(self.TEST_FILES_DIR / "V2O3.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "V4 O6")
bibtex_str = """
@article{cifref0,
author = "Andersson, G.",
title = "Studies on vanadium oxides. I. Phase analysis",
journal = "Acta Chemica Scandinavica (1-27,1973-42,1988)",
volume = "8",
year = "1954",
pages = "1599--1606"
}
"""
self.assertEqual(parser.get_bibtex_string().strip(), bibtex_str.strip())
parser = CifParser(self.TEST_FILES_DIR / "Li2O.cif")
prim = parser.get_structures(True)[0]
self.assertEqual(prim.formula, "Li2 O1")
conv = parser.get_structures(False)[0]
self.assertEqual(conv.formula, "Li8 O4")
# test for disordered structures
parser = CifParser(self.TEST_FILES_DIR / "Li10GeP2S12.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Li20.2 Ge2.06 P3.94 S24", "Incorrectly parsed cif.")
cif_str = r"""#\#CIF1.1
##########################################################################
# Crystallographic Information Format file
# Produced by PyCifRW module
#
# This is a CIF file. CIF has been adopted by the International
# Union of Crystallography as the standard for data archiving and
# transmission.
#
# For information on this file format, follow the CIF links at
# http://www.iucr.org
##########################################################################
data_FePO4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 10.4117668699
_cell_length_b 6.06717187997
_cell_length_c 4.75948953998
loop_ # sometimes this is in a loop (incorrectly)
_cell_angle_alpha
91.0
_cell_angle_beta 92.0
_cell_angle_gamma 93.0
_chemical_name_systematic 'Generated by pymatgen'
_symmetry_Int_Tables_number 1
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_attached_hydrogens
_atom_site_B_iso_or_equiv
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 0 . 1
Fe JJ2 1 0.281272 0.250000 0.974867 0 . 1
# there's a typo here, parser should read the symbol from the
# _atom_site_type_symbol
Fe Fe3 1 0.718728 0.750000 0.025133 0 . 1
Fe Fe4 1 0.781272 0.250000 0.525133 0 . 1
P P5 1 0.094613 0.250000 0.418243 0 . 1
P P6 1 0.405387 0.750000 0.918243 0 . 1
P P7 1 0.594613 0.250000 0.081757 0 . 1
P P8 1 0.905387 0.750000 0.581757 0 . 1
O O9 1 0.043372 0.750000 0.707138 0 . 1
O O10 1 0.096642 0.250000 0.741320 0 . 1
O O11 1 0.165710 0.046072 0.285384 0 . 1
O O12 1 0.165710 0.453928 0.285384 0 . 1
O O13 1 0.334290 0.546072 0.785384 0 . 1
O O14 1 0.334290 0.953928 0.785384 0 . 1
O O15 1 0.403358 0.750000 0.241320 0 . 1
O O16 1 0.456628 0.250000 0.207138 0 . 1
O O17 1 0.543372 0.750000 0.792862 0 . 1
O O18 1 0.596642 0.250000 0.758680 0 . 1
O O19 1 0.665710 0.046072 0.214616 0 . 1
O O20 1 0.665710 0.453928 0.214616 0 . 1
O O21 1 0.834290 0.546072 0.714616 0 . 1
O O22 1 0.834290 0.953928 0.714616 0 . 1
O O23 1 0.903358 0.750000 0.258680 0 . 1
O O24 1 0.956628 0.250000 0.292862 0 . 1
"""
parser = CifParser.from_string(cif_str)
struct = parser.get_structures(primitive=False)[0]
self.assertEqual(struct.formula, "Fe4 P4 O16")
self.assertAlmostEqual(struct.lattice.a, 10.4117668699)
self.assertAlmostEqual(struct.lattice.b, 6.06717187997)
self.assertAlmostEqual(struct.lattice.c, 4.75948953998)
self.assertAlmostEqual(struct.lattice.alpha, 91)
self.assertAlmostEqual(struct.lattice.beta, 92)
self.assertAlmostEqual(struct.lattice.gamma, 93)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "srycoo.cif")
self.assertEqual(parser.get_structures()[0].formula, "Sr5.6 Y2.4 Co8 O21")
# Test with a decimal Xyz. This should parse as two atoms in
# conventional cell if it is correct, one if not.
parser = CifParser(self.TEST_FILES_DIR / "Fe.cif")
self.assertEqual(len(parser.get_structures(primitive=False)[0]), 2)
self.assertFalse(parser.has_errors)
def test_site_symbol_preference(self):
parser = CifParser(self.TEST_FILES_DIR / "site_type_symbol_test.cif")
self.assertEqual(parser.get_structures()[0].formula, "Ge0.4 Sb0.4 Te1")
def test_implicit_hydrogen(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "Senegalite_implicit_hydrogen.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Al8 P4 O32")
self.assertEqual(sum(s.site_properties["implicit_hydrogens"]), 20)
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
parser = CifParser(self.TEST_FILES_DIR / "cif_implicit_hydrogens_cod_1011130.cif")
s = parser.get_structures()[0]
self.assertIn(
"Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.",
parser.warnings,
)
def test_CifParserSpringerPauling(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Below are 10 tests for CIFs from the Springer Materials/Pauling file DBs.
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1928405.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Er1 Mn3.888 Fe2.112 Sn6")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1011081.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Zr0.2 Nb0.8")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1615854.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Na2 Al2 Si6 O16")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1622133.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca0.184 Mg13.016 Fe2.8 Si16 O48")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, previously parsed as an ordered structure
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1908491.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mn0.48 Zn0.52 Ga2 Se4")
self.assertTrue(parser.has_errors)
# Partial occupancy on sites, incorrect label, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1811457.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ba2 Mg0.6 Zr0.2 Ta1.2 O6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
# This CIF file contains the molecular species "NH3" which is
# parsed as "N" because the label is "N{x}" (x = 1,2,..) and the
# corresponding symbol is "NH3". Since, the label and symbol are switched
# in CIFs from Springer Materials/Pauling file DBs, CifParser parses the
# element as "Nh" (Nihonium).
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1002871.cif")
self.assertEqual(parser.get_structures(True)[0].formula, "Cu1 Br2 Nh6")
self.assertEqual(parser.get_structures(True)[1].formula, "Cu1 Br4 Nh6")
self.assertTrue(parser.has_errors)
# Incomplete powder diffraction data, previously unparsable
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1704003.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Rb4 Mn2 F12")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1500382.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Mg6 B2 O6 F1.764")
self.assertTrue(parser.has_errors)
# Unparsable species 'OH/OH2', previously parsed as "O"
parser = CifParser(self.TEST_FILES_DIR / "PF_sd_1601634.cif")
for s in parser.get_structures():
self.assertEqual(s.formula, "Zn1.29 Fe0.69 As2 Pb1.02 O8")
def test_CifParserCod(self):
"""
Parsing problematic cif files from the COD database
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Symbol in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_2100513.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Ca4 Nb2.0 Al2 O12")
# Label in capital letters
parser = CifParser(self.TEST_FILES_DIR / "Cod_4115344.cif")
for s in parser.get_structures(True):
self.assertEqual(s.formula, "Mo4 P2 H60 C60 I4 O4")
def test_parse_symbol(self):
"""
Test the _parse_symbol function with several potentially
problematic examples of symbols and labels.
"""
test_cases = {
"MgT": "Mg",
"MgT1": "Mg",
"H(46A)": "H",
"O(M)": "O",
"N(Am)": "N",
"H1N2a": "H",
"CO(1)": "Co",
"Wat1": "O",
"MgM2A": "Mg",
"CaX": "Ca",
"X1": "X",
"X": "X",
"OA1": "O",
"NaA2": "Na",
"O-H2": "O",
"OD2": "O",
"OW": "O",
"SiT": "Si",
"SiTet": "Si",
"Na-Int": "Na",
"CaD1": "Ca",
"KAm": "K",
"D+1": "D",
"D": "D",
"D1-": "D",
"D4": "D",
"D0": "D",
"NH": "Nh",
"NH2": "Nh",
"NH3": "Nh",
"SH": "S",
}
for e in Element:
name = e.name
test_cases[name] = name
if len(name) == 2:
test_cases[name.upper()] = name
test_cases[name.upper() + str(1)] = name
test_cases[name.upper() + "A"] = name
test_cases[name + str(1)] = name
test_cases[name + str(2)] = name
test_cases[name + str(3)] = name
test_cases[name + str(1) + "A"] = name
special = {"Hw": "H", "Ow": "O", "Wat": "O", "wat": "O", "OH": "", "OH2": ""}
test_cases.update(special)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "LiFePO4.cif")
for sym, expected_symbol in test_cases.items():
self.assertEqual(parser._parse_symbol(sym), expected_symbol)
def test_CifWriter(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
writer = CifWriter(poscar.structure, symprec=0.01)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe0 4 0.21872822 0.75000000 0.47486711 1
P P1 4 0.09461309 0.25000000 0.41824327 1
O O2 8 0.16570974 0.04607233 0.28538394 1
O O3 4 0.04337231 0.75000000 0.70713767 1
O O4 4 0.09664244 0.25000000 0.74132035 1"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_symmetrized(self):
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath, check_for_POTCAR=False)
writer = CifWriter(poscar.structure, symprec=0.1)
ans = """# generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 4 0.218728 0.250000 0.525133 1
P P2 4 0.094613 0.750000 0.581757 1
O O3 8 0.165710 0.546072 0.714616 1
O O4 4 0.043372 0.250000 0.292862 1
O O5 4 0.096642 0.750000 0.258680 1"""
cif = CifParser.from_string(str(writer))
m = StructureMatcher()
self.assertTrue(m.fit(cif.get_structures()[0], poscar.structure))
# for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
# self.assertEqual(l1.strip(), l2.strip())
ans = """# generated using pymatgen
data_LiFePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41037000
_cell_length_b 6.06577000
_cell_length_c 4.74480000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 62
_chemical_formula_structural LiFePO4
_chemical_formula_sum 'Li4 Fe4 P4 O16'
_cell_volume 299.619458734
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
2 '-x, -y, -z'
3 '-x+1/2, -y, z+1/2'
4 'x+1/2, y, -z+1/2'
5 'x+1/2, -y+1/2, -z+1/2'
6 '-x+1/2, y+1/2, z+1/2'
7 '-x, y+1/2, -z'
8 'x, -y+1/2, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Li Li1 4 0.000000 0.000000 0.000000 1.0
Fe Fe2 4 0.218845 0.750000 0.474910 1.0
P P3 4 0.094445 0.250000 0.417920 1.0
O O4 8 0.165815 0.044060 0.286540 1.0
O O5 4 0.043155 0.750000 0.708460 1.0
O O6 4 0.096215 0.250000 0.741480 1.0
"""
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
s = self.get_structure("Li2O")
writer = CifWriter(s, symprec=0.1)
s2 = CifParser.from_string(str(writer)).get_structures()[0]
self.assertTrue(m.fit(s, s2))
# test angle tolerance.
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=0)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 14)
s = Structure.from_file(self.TEST_FILES_DIR / "LiFePO4.cif")
writer = CifWriter(s, symprec=0.1, angle_tolerance=2)
d = list(writer.ciffile.data.values())[0]
self.assertEqual(d["_symmetry_Int_Tables_number"], 62)
def test_disordered(self):
si = Element("Si")
n = Element("N")
coords = list()
coords.append(np.array([0, 0, 0]))
coords.append(np.array([0.75, 0.5, 0.75]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [si, {si: 0.5, n: 0.5}], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_Si1.5N0.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N0.5
_chemical_formula_sum 'Si1.5 N0.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Si Si0 1 0.00000000 0.00000000 0.00000000 1
Si Si1 1 0.75000000 0.50000000 0.75000000 0.5
N N2 1 0.75000000 0.50000000 0.75000000 0.5"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
def test_cifwrite_without_refinement(self):
si2 = Structure.from_file(self.TEST_FILES_DIR / "abinit" / "si.cif")
writer = CifWriter(si2, symprec=1e-3, significant_figures=10, refine_struct=False)
s = str(writer)
assert "Fd-3m" in s
same_si2 = CifParser.from_string(s).get_structures()[0]
assert len(si2) == len(same_si2)
def test_specie_cifwriter(self):
si4 = Species("Si", 4)
si3 = Species("Si", 3)
n = DummySpecies("X", -3)
coords = list()
coords.append(np.array([0.5, 0.5, 0.5]))
coords.append(np.array([0.75, 0.5, 0.75]))
coords.append(np.array([0, 0, 0]))
lattice = Lattice(
np.array(
[
[3.8401979337, 0.00, 0.00],
[1.9200989668, 3.3257101909, 0.00],
[0.00, -2.2171384943, 3.1355090603],
]
)
)
struct = Structure(lattice, [n, {si3: 0.5, n: 0.5}, si4], coords)
writer = CifWriter(struct)
ans = """# generated using pymatgen
data_X1.5Si1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural X1.5Si1.5
_chemical_formula_sum 'X1.5 Si1.5'
_cell_volume 40.04479464
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
X3- -3.0
Si3+ 3.0
Si4+ 4.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
X3- X0 1 0.50000000 0.50000000 0.50000000 1
X3- X1 1 0.75000000 0.50000000 0.75000000 0.5
Si3+ Si2 1 0.75000000 0.50000000 0.75000000 0.5
Si4+ Si3 1 0.00000000 0.00000000 0.00000000 1
"""
for l1, l2 in zip(str(writer).split("\n"), ans.split("\n")):
self.assertEqual(l1.strip(), l2.strip())
# test that mixed valence works properly
s2 = Structure.from_str(ans, "cif")
self.assertEqual(struct.composition, s2.composition)
def test_primes(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "C26H16BeN2O2S2.cif")
for s in parser.get_structures(False):
self.assertEqual(s.composition, 8 * Composition("C26H16BeN2O2S2"))
def test_missing_atom_site_type_with_oxistates(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
parser = CifParser(self.TEST_FILES_DIR / "P24Ru4H252C296S24N16.cif")
c = Composition({"S0+": 24, "Ru0+": 4, "H0+": 252, "C0+": 296, "N0+": 16, "P0+": 24})
for s in parser.get_structures(False):
self.assertEqual(s.composition, c)
def test_no_coords_or_species(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
string = """#generated using pymatgen
data_Si1.5N1.5
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 3.84019793
_cell_length_b 3.84019899
_cell_length_c 3.84019793
_cell_angle_alpha 119.99999086
_cell_angle_beta 90.00000000
_cell_angle_gamma 60.00000914
_symmetry_Int_Tables_number 1
_chemical_formula_structural Si1.5N1.5
_chemical_formula_sum 'Si1.5 N1.5'
_cell_volume 40.0447946443
_cell_formula_units_Z 0
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Si3+ 3.0
Si4+ 4.0
N3- -3.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
? ? ? ? ? ? ?
"""
parser = CifParser.from_string(string)
self.assertRaises(ValueError, parser.get_structures)
def test_get_lattice_from_lattice_type(self):
cif_structure = """#generated using pymatgen
data_FePO4
_symmetry_space_group_name_H-M Pnma
_cell_length_a 10.41176687
_cell_length_b 6.06717188
_cell_length_c 4.75948954
_chemical_formula_structural FePO4
_chemical_formula_sum 'Fe4 P4 O16'
_cell_volume 300.65685512
_cell_formula_units_Z 4
_symmetry_cell_setting Orthorhombic
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Fe Fe1 1 0.218728 0.750000 0.474867 1
Fe Fe2 1 0.281272 0.250000 0.974867 1
Fe Fe3 1 0.718728 0.750000 0.025133 1
Fe Fe4 1 0.781272 0.250000 0.525133 1
P P5 1 0.094613 0.250000 0.418243 1
P P6 1 0.405387 0.750000 0.918243 1
P P7 1 0.594613 0.250000 0.081757 1
P P8 1 0.905387 0.750000 0.581757 1
O O9 1 0.043372 0.750000 0.707138 1
O O10 1 0.096642 0.250000 0.741320 1
O O11 1 0.165710 0.046072 0.285384 1
O O12 1 0.165710 0.453928 0.285384 1
O O13 1 0.334290 0.546072 0.785384 1
O O14 1 0.334290 0.953928 0.785384 1
O O15 1 0.403358 0.750000 0.241320 1
O O16 1 0.456628 0.250000 0.207138 1
O O17 1 0.543372 0.750000 0.792862 1
O O18 1 0.596642 0.250000 0.758680 1
O O19 1 0.665710 0.046072 0.214616 1
O O20 1 0.665710 0.453928 0.214616 1
O O21 1 0.834290 0.546072 0.714616 1
O O22 1 0.834290 0.953928 0.714616 1
O O23 1 0.903358 0.750000 0.258680 1
O O24 1 0.956628 0.250000 0.292862 1
"""
cp = CifParser.from_string(cif_structure)
s_test = cp.get_structures(False)[0]
filepath = self.TEST_FILES_DIR / "POSCAR"
poscar = Poscar.from_file(filepath)
s_ref = poscar.structure
sm = StructureMatcher(stol=0.05, ltol=0.01, angle_tol=0.1)
self.assertTrue(sm.fit(s_ref, s_test))
def test_empty(self):
# single line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n ''")
self.assertEqual(cb.data["_tag"][0], "")
# multi line
cb = CifBlock.from_string("data_mwe\nloop_\n_tag\n;\n;")
self.assertEqual(cb.data["_tag"][0], "")
cb2 = CifBlock.from_string(str(cb))
self.assertEqual(cb, cb2)
def test_bad_cif(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "bad_occu.cif"
p = CifParser(f)
self.assertRaises(ValueError, p.get_structures)
p = CifParser(f, occupancy_tolerance=2)
s = p.get_structures()[0]
self.assertAlmostEqual(s[0].species["Al3+"], 0.5)
def test_one_line_symm(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "OneLineSymmP1.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "Ga4 Pb2 O8")
def test_no_symmops(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
f = self.TEST_FILES_DIR / "nosymm.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "H96 C60 O8")
def test_dot_positions(self):
f = self.TEST_FILES_DIR / "ICSD59959.cif"
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(s.formula, "K1 Mn1 F3")
def test_replacing_finite_precision_frac_coords(self):
f = self.TEST_FILES_DIR / "cif_finite_precision_frac_coord_error.cif"
with warnings.catch_warnings():
p = CifParser(f)
s = p.get_structures()[0]
self.assertEqual(str(s.composition), "N5+24")
self.assertIn(
"Some fractional co-ordinates rounded to ideal " "values to avoid issues with finite precision.",
p.warnings,
)
def test_empty_deque(self):
s = """data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
_iucr_refine_fcf_details
;
data_symmetries
loop_
_space_group_symop_id
_space_group_symop_operation_xyz
1 x,y,z
2 -x+1/2,y+1/2,-z+1/2
3 -x,-y,-z
4 x-1/2,-y-1/2,z-1/2
;"""
p = CifParser.from_string(s)
self.assertEqual(p.get_structures()[0].formula, "Si1")
cif = """
data_1526655
_journal_name_full
_space_group_IT_number 227
_symmetry_space_group_name_Hall 'F 4d 2 3 -1d'
_symmetry_space_group_name_H-M 'F d -3 m :1'
_cell_angle_alpha 90
_cell_angle_beta 90
_cell_angle_gamma 90
_cell_formula_units_Z 8
_cell_length_a 5.381
_cell_length_b 5.381
_cell_length_c 5.381
_cell_volume 155.808
_iucr_refine_fcf_details
;
data_symmetries
Some arbitrary multiline string
;
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
_atom_site_U_iso_or_equiv
Si1 Si 0 0 0 1 0.0
"""
p = CifParser.from_string(cif)
self.assertRaises(ValueError, p.get_structures)
class MagCifTest(PymatgenTest):
def setUp(self):
warnings.filterwarnings("ignore")
self.mcif = CifParser(self.TEST_FILES_DIR / "magnetic.example.NiO.mcif")
self.mcif_ncl = CifParser(self.TEST_FILES_DIR / "magnetic.ncl.example.GdB4.mcif")
self.mcif_incom = CifParser(self.TEST_FILES_DIR / "magnetic.incommensurate.example.Cr.mcif")
self.mcif_disord = CifParser(self.TEST_FILES_DIR / "magnetic.disordered.example.CuMnO2.mcif")
self.mcif_ncl2 = CifParser(self.TEST_FILES_DIR / "Mn3Ge_IR2.mcif")
def tearDown(self):
warnings.simplefilter("default")
def test_mcif_detection(self):
self.assertTrue(self.mcif.feature_flags["magcif"])
self.assertTrue(self.mcif_ncl.feature_flags["magcif"])
self.assertTrue(self.mcif_incom.feature_flags["magcif"])
self.assertTrue(self.mcif_disord.feature_flags["magcif"])
self.assertFalse(self.mcif.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_ncl.feature_flags["magcif_incommensurate"])
self.assertTrue(self.mcif_incom.feature_flags["magcif_incommensurate"])
self.assertFalse(self.mcif_disord.feature_flags["magcif_incommensurate"])
def test_get_structures(self):
# incommensurate structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_incom.get_structures)
# disordered magnetic structures not currently supported
self.assertRaises(NotImplementedError, self.mcif_disord.get_structures)
# taken from self.mcif_ncl, removing explicit magnetic symmops
# so that MagneticSymmetryGroup() has to be invoked
magcifstr = """
data_5yOhtAoR
_space_group.magn_name_BNS "P 4/m' b' m' "
_cell_length_a 7.1316
_cell_length_b 7.1316
_cell_length_c 4.0505
_cell_angle_alpha 90.00
_cell_angle_beta 90.00
_cell_angle_gamma 90.00
loop_
_atom_site_label
_atom_site_type_symbol
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd1 Gd 0.31746 0.81746 0.00000 1
B1 B 0.00000 0.00000 0.20290 1
B2 B 0.17590 0.03800 0.50000 1
B3 B 0.08670 0.58670 0.50000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd1 5.05 5.05 0.0"""
s = self.mcif.get_structures(primitive=False)[0]
self.assertEqual(s.formula, "Ni32 O32")
self.assertTrue(Magmom.are_collinear(s.site_properties["magmom"]))
# example with non-collinear spin
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
s_ncl_from_msg = CifParser.from_string(magcifstr).get_structures(primitive=False)[0]
self.assertEqual(s_ncl.formula, "Gd4 B16")
self.assertFalse(Magmom.are_collinear(s_ncl.site_properties["magmom"]))
self.assertTrue(s_ncl.matches(s_ncl_from_msg))
def test_write(self):
cw_ref_string = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 5.05000000 5.05000000 0.00000000
Gd1 -5.05000000 5.05000000 0.00000000
Gd2 5.05000000 -5.05000000 0.00000000
Gd3 -5.05000000 -5.05000000 0.00000000
"""
s_ncl = self.mcif_ncl.get_structures(primitive=False)[0]
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
# from list-type magmoms
list_magmoms = [list(m) for m in s_ncl.site_properties["magmom"]]
# float magmoms (magnitude only)
float_magmoms = [float(m) for m in s_ncl.site_properties["magmom"]]
s_ncl.add_site_property("magmom", list_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_ref_string)
s_ncl.add_site_property("magmom", float_magmoms)
cw = CifWriter(s_ncl, write_magmoms=True)
cw_ref_string_magnitudes = """# generated using pymatgen
data_GdB4
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 7.13160000
_cell_length_b 7.13160000
_cell_length_c 4.05050000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural GdB4
_chemical_formula_sum 'Gd4 B16'
_cell_volume 206.00729003
_cell_formula_units_Z 4
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Gd Gd0 1 0.31746000 0.81746000 0.00000000 1.0
Gd Gd1 1 0.18254000 0.31746000 0.00000000 1.0
Gd Gd2 1 0.81746000 0.68254000 0.00000000 1.0
Gd Gd3 1 0.68254000 0.18254000 0.00000000 1.0
B B4 1 0.00000000 0.00000000 0.20290000 1.0
B B5 1 0.50000000 0.50000000 0.79710000 1.0
B B6 1 0.00000000 0.00000000 0.79710000 1.0
B B7 1 0.50000000 0.50000000 0.20290000 1.0
B B8 1 0.17590000 0.03800000 0.50000000 1.0
B B9 1 0.96200000 0.17590000 0.50000000 1.0
B B10 1 0.03800000 0.82410000 0.50000000 1.0
B B11 1 0.67590000 0.46200000 0.50000000 1.0
B B12 1 0.32410000 0.53800000 0.50000000 1.0
B B13 1 0.82410000 0.96200000 0.50000000 1.0
B B14 1 0.53800000 0.67590000 0.50000000 1.0
B B15 1 0.46200000 0.32410000 0.50000000 1.0
B B16 1 0.08670000 0.58670000 0.50000000 1.0
B B17 1 0.41330000 0.08670000 0.50000000 1.0
B B18 1 0.58670000 0.91330000 0.50000000 1.0
B B19 1 0.91330000 0.41330000 0.50000000 1.0
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
Gd0 0.00000000 0.00000000 7.14177849
Gd1 0.00000000 0.00000000 7.14177849
Gd2 0.00000000 0.00000000 -7.14177849
Gd3 0.00000000 0.00000000 -7.14177849
"""
self.assertEqual(cw.__str__().strip(), cw_ref_string_magnitudes.strip())
# test we're getting correct magmoms in ncl case
s_ncl2 = self.mcif_ncl2.get_structures()[0]
list_magmoms = [list(m) for m in s_ncl2.site_properties["magmom"]]
self.assertEqual(list_magmoms[0][0], 0.0)
self.assertAlmostEqual(list_magmoms[0][1], 5.9160793408726366)
self.assertAlmostEqual(list_magmoms[1][0], -5.1234749999999991)
self.assertAlmostEqual(list_magmoms[1][1], 2.9580396704363183)
# test creating an structure without oxidation state doesn't raise errors
s_manual = Structure(Lattice.cubic(4.2), ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
s_manual.add_spin_by_site([1, -1])
cw = CifWriter(s_manual, write_magmoms=True)
# check oxidation state
cw_manual_oxi_string = """# generated using pymatgen
data_CsCl
_symmetry_space_group_name_H-M 'P 1'
_cell_length_a 4.20000000
_cell_length_b 4.20000000
_cell_length_c 4.20000000
_cell_angle_alpha 90.00000000
_cell_angle_beta 90.00000000
_cell_angle_gamma 90.00000000
_symmetry_Int_Tables_number 1
_chemical_formula_structural CsCl
_chemical_formula_sum 'Cs1 Cl1'
_cell_volume 74.08800000
_cell_formula_units_Z 1
loop_
_symmetry_equiv_pos_site_id
_symmetry_equiv_pos_as_xyz
1 'x, y, z'
loop_
_atom_type_symbol
_atom_type_oxidation_number
Cs+ 1.0
Cl+ 1.0
loop_
_atom_site_type_symbol
_atom_site_label
_atom_site_symmetry_multiplicity
_atom_site_fract_x
_atom_site_fract_y
_atom_site_fract_z
_atom_site_occupancy
Cs+ Cs0 1 0.00000000 0.00000000 0.00000000 1
Cl+ Cl1 1 0.50000000 0.50000000 0.50000000 1
loop_
_atom_site_moment_label
_atom_site_moment_crystalaxis_x
_atom_site_moment_crystalaxis_y
_atom_site_moment_crystalaxis_z
"""
s_manual.add_oxidation_state_by_site([1, 1])
cw = CifWriter(s_manual, write_magmoms=True)
self.assertEqual(cw.__str__(), cw_manual_oxi_string)
@unittest.skipIf(pybtex is None, "pybtex not present")
def test_bibtex(self):
ref_bibtex_string = """@article{cifref0,
author = "Blanco, J.A.",
journal = "PHYSICAL REVIEW B",
volume = "73",
year = "2006",
pages = "?--?"
}
"""
self.assertEqual(self.mcif_ncl.get_bibtex_string(), ref_bibtex_string)
if __name__ == "__main__":
unittest.main()
| mit |
hsuchie4/TACTIC | src/tactic/protocol/rest_test.py | 6 | 5688 | #!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
import tacticenv
from pyasm.common import Container, jsonloads, Environment, Xml
from pyasm.security import Batch
from pyasm.search import Search, SearchType
from pyasm.unittest import UnittestEnvironment
import unittest
import urllib2
class RestTest(unittest.TestCase):
def test_all(my):
test_env = UnittestEnvironment()
test_env.create()
try:
my._setup()
print
print
print
my._test_accept()
my._test_method()
my._test_custom_handler()
print
print
print
finally:
test_env.delete()
def send_request(my, url, headers, data={} ):
ticket = Environment.get_ticket()
method = headers.get("Method")
if method == 'POST':
data['login_ticket'] = ticket
import urllib
data = urllib.urlencode(data)
request = urllib2.Request(url, data)
else:
url = "%s?login_ticket=%s" % (url, ticket)
request = urllib2.Request(url)
for key,value in headers.items():
request.add_header(key,value)
try:
response = urllib2.urlopen(request)
except Exception, e:
# try again
print "WARNING: ", e
response = urllib2.urlopen(request)
#print response.info().headers
value = response.read()
accept = headers.get("Accept")
if accept == "application/json":
value = jsonloads(value)
return value
def _setup(my):
url = SearchType.create("config/url")
url.set_value("url", "/rest/{code}")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.PythonRestHandler'>
<script_path>rest/test</script_path>
</display>
</element>
''')
url.commit()
url = SearchType.create("config/url")
url.set_value("url", "/rest2")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.TestCustomRestHandler'>
</display>
</element>
''')
url.commit()
url = SearchType.create("config/url")
url.set_value("url", "/rest3/{method}/{data}")
url.set_value('widget', '''
<element>
<display class='tactic.protocol.SObjectRestHandler'>
</display>
</element>
''')
url.commit()
script = SearchType.create("config/custom_script")
script.set_value("folder", "rest")
script.set_value("title", "test")
script.set_value("script", """
from pyasm.common import Xml
accept = kwargs.get("Accept")
method = kwargs.get("Method")
print "kwargs: ", kwargs
code = kwargs.get("code")
if code == "CODE0123":
return "OK"
if method == "POST":
return "Method is POST"
if accept == "application/json":
return [3,2,1]
else:
return Xml('''
<arr>
<int>1</int>
<int>2</int>
<int>3</int>
</arr>
''')
""")
script.commit()
def _test_accept(my):
# try json
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/json"
}
ret_val = my.send_request(url, headers)
my.assertEquals( [3,2,1], ret_val)
# try xml
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/xml"
}
ret_val = my.send_request(url, headers)
xml = Xml(ret_val)
values = xml.get_values("arr/int")
my.assertEquals( ['1','2','3'], values)
# try json
url = "http://localhost/tactic/unittest/rest/CODE0123"
headers = {
"Accept": "application/json"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "OK", ret_val)
def _test_method(my):
# try json
url = "http://localhost/tactic/unittest/rest"
headers = {
"Accept": "application/json",
"Method": "POST"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "Method is POST", ret_val)
def _test_custom_handler(my):
# try json
url = "http://localhost/tactic/unittest/rest2"
headers = {
"Accept": "application/json",
"Method": "POST"
}
ret_val = my.send_request(url, headers)
my.assertEquals( "Test Custom POST", ret_val)
# try json
url = "http://localhost/tactic/unittest/rest3/expression"
headers = {
"Accept": "application/json",
"Method": "POST"
}
data = {
'expression': '@SOBJECT(unittest/person)'
}
ret_val = my.send_request(url, headers, data)
print ret_val
def _test_update(my):
# try json
url = "http://localhost/tactic/unittest/rest3/person/CODE0123"
headers = {
"Accept": "application/json",
"Method": "PUT"
}
data = {
'description': 'abcdefg'
}
ret_val = my.send_request(url, headears, data)
if __name__ == "__main__":
Batch()
unittest.main()
| epl-1.0 |
x2Ident/x2Ident_test | mitmproxy/mitmproxy/models/connections.py | 2 | 7079 | from __future__ import absolute_import, print_function, division
import time
import copy
import os
import six
from mitmproxy import stateobject
from netlib import certutils
from netlib import tcp
class ClientConnection(tcp.BaseHandler, stateobject.StateObject):
"""
A client connection
Attributes:
address: Remote address
ssl_established: True if TLS is established, False otherwise
clientcert: The TLS client certificate
timestamp_start: Connection start timestamp
timestamp_ssl_setup: TLS established timestamp
timestamp_end: Connection end timestamp
"""
def __init__(self, client_connection, address, server):
# Eventually, this object is restored from state. We don't have a
# connection then.
if client_connection:
super(ClientConnection, self).__init__(client_connection, address, server)
else:
self.connection = None
self.server = None
self.wfile = None
self.rfile = None
self.address = None
self.clientcert = None
self.ssl_established = None
self.timestamp_start = time.time()
self.timestamp_end = None
self.timestamp_ssl_setup = None
self.protocol = None
def __bool__(self):
return bool(self.connection) and not self.finished
if six.PY2:
__nonzero__ = __bool__
def __repr__(self):
return "<ClientConnection: {ssl}{address}>".format(
ssl="[ssl] " if self.ssl_established else "",
address=repr(self.address)
)
@property
def tls_established(self):
return self.ssl_established
_stateobject_attributes = dict(
address=tcp.Address,
ssl_established=bool,
clientcert=certutils.SSLCert,
timestamp_start=float,
timestamp_ssl_setup=float,
timestamp_end=float,
)
def copy(self):
return copy.copy(self)
def send(self, message):
if isinstance(message, list):
message = b''.join(message)
self.wfile.write(message)
self.wfile.flush()
@classmethod
def from_state(cls, state):
f = cls(None, tuple(), None)
f.set_state(state)
return f
@classmethod
def make_dummy(cls, address):
return cls.from_state(dict(
address=dict(address=address, use_ipv6=False),
clientcert=None,
ssl_established=False,
timestamp_start=None,
timestamp_end=None,
timestamp_ssl_setup=None
))
def convert_to_ssl(self, *args, **kwargs):
super(ClientConnection, self).convert_to_ssl(*args, **kwargs)
self.timestamp_ssl_setup = time.time()
def finish(self):
super(ClientConnection, self).finish()
self.timestamp_end = time.time()
class ServerConnection(tcp.TCPClient, stateobject.StateObject):
"""
A server connection
Attributes:
address: Remote address. Can be both a domain or an IP address.
ip_address: Resolved remote IP address.
source_address: Local IP address
ssl_established: True if TLS is established, False otherwise
cert: The certificate presented by the remote during the TLS handshake
sni: Server Name Indication sent by the proxy during the TLS handshake
via: The underlying server connection (e.g. the connection to the upstream proxy in upstream proxy mode)
timestamp_start: Connection start timestamp
timestamp_tcp_setup: TCP ACK received timestamp
timestamp_ssl_setup: TLS established timestamp
timestamp_end: Connection end timestamp
"""
def __init__(self, address, source_address=None):
tcp.TCPClient.__init__(self, address, source_address)
self.via = None
self.timestamp_start = None
self.timestamp_end = None
self.timestamp_tcp_setup = None
self.timestamp_ssl_setup = None
self.protocol = None
def __bool__(self):
return bool(self.connection) and not self.finished
if six.PY2:
__nonzero__ = __bool__
def __repr__(self):
if self.ssl_established and self.sni:
ssl = "[ssl: {0}] ".format(self.sni)
elif self.ssl_established:
ssl = "[ssl] "
else:
ssl = ""
return "<ServerConnection: {ssl}{address}>".format(
ssl=ssl,
address=repr(self.address)
)
@property
def tls_established(self):
return self.ssl_established
_stateobject_attributes = dict(
address=tcp.Address,
ip_address=tcp.Address,
source_address=tcp.Address,
ssl_established=bool,
cert=certutils.SSLCert,
sni=str,
timestamp_start=float,
timestamp_tcp_setup=float,
timestamp_ssl_setup=float,
timestamp_end=float,
)
@classmethod
def from_state(cls, state):
f = cls(tuple())
f.set_state(state)
return f
@classmethod
def make_dummy(cls, address):
return cls.from_state(dict(
address=dict(address=address, use_ipv6=False),
ip_address=dict(address=address, use_ipv6=False),
cert=None,
sni=None,
source_address=dict(address=('', 0), use_ipv6=False),
ssl_established=False,
timestamp_start=None,
timestamp_tcp_setup=None,
timestamp_ssl_setup=None,
timestamp_end=None,
via=None
))
def copy(self):
return copy.copy(self)
def connect(self):
self.timestamp_start = time.time()
tcp.TCPClient.connect(self)
self.timestamp_tcp_setup = time.time()
def send(self, message):
if isinstance(message, list):
message = b''.join(message)
self.wfile.write(message)
self.wfile.flush()
def establish_ssl(self, clientcerts, sni, **kwargs):
if sni and not isinstance(sni, six.string_types):
raise ValueError("sni must be str, not " + type(sni).__name__)
clientcert = None
if clientcerts:
if os.path.isfile(clientcerts):
clientcert = clientcerts
else:
path = os.path.join(
clientcerts,
self.address.host.encode("idna").decode()) + ".pem"
if os.path.exists(path):
clientcert = path
self.convert_to_ssl(cert=clientcert, sni=sni, **kwargs)
self.sni = sni
self.timestamp_ssl_setup = time.time()
def finish(self):
tcp.TCPClient.finish(self)
self.timestamp_end = time.time()
ServerConnection._stateobject_attributes["via"] = ServerConnection
| gpl-3.0 |
ttakamura/chainer | examples/ptb/train_ptb.py | 2 | 5101 | #!/usr/bin/env python
"""Sample script of recurrent neural network language model.
This code is ported from following implementation written in Torch.
https://github.com/tomsercu/lstm
"""
import argparse
import math
import sys
import time
import numpy as np
from chainer import cuda, Variable, FunctionSet, optimizers
import chainer.functions as F
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
n_epoch = 39 # number of epochs
n_units = 650 # number of units per layer
batchsize = 20 # minibatch size
bprop_len = 35 # length of truncated BPTT
grad_clip = 5 # gradient norm threshold to clip
# Prepare dataset (preliminary download dataset by ./download.py)
vocab = {}
def load_data(filename):
global vocab, n_vocab
words = open(filename).read().replace('\n', '<eos>').strip().split()
dataset = np.ndarray((len(words),), dtype=np.int32)
for i, word in enumerate(words):
if word not in vocab:
vocab[word] = len(vocab)
dataset[i] = vocab[word]
return dataset
train_data = load_data('ptb.train.txt')
valid_data = load_data('ptb.valid.txt')
test_data = load_data('ptb.test.txt')
print '#vocab =', len(vocab)
# Prepare RNNLM model
model = FunctionSet(embed=F.EmbedID(len(vocab), n_units),
l1_x =F.Linear(n_units, 4 * n_units),
l1_h =F.Linear(n_units, 4 * n_units),
l2_x =F.Linear(n_units, 4 * n_units),
l2_h =F.Linear(n_units, 4 * n_units),
l3 =F.Linear(n_units, len(vocab)))
for param in model.parameters:
param[:] = np.random.uniform(-0.1, 0.1, param.shape)
if args.gpu >= 0:
cuda.init(args.gpu)
model.to_gpu()
# Neural net architecture
def forward_one_step(x_data, y_data, state, train=True):
if args.gpu >= 0:
x_data = cuda.to_gpu(x_data)
y_data = cuda.to_gpu(y_data)
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h0 = model.embed(x)
h1_in = model.l1_x(F.dropout(h0, train=train)) + model.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = model.l2_x(F.dropout(h1, train=train)) + model.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = model.l3(F.dropout(h2, train=train))
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
return state, F.softmax_cross_entropy(y, t)
def make_initial_state(batchsize=batchsize, train=True):
mod = cuda if args.gpu >= 0 else np
return {name: Variable(mod.zeros((batchsize, n_units), dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1', 'c2', 'h2')}
# Setup optimizer
optimizer = optimizers.SGD(lr=1.)
optimizer.setup(model.collect_parameters())
# Evaluation routine
def evaluate(dataset):
sum_log_perp = 0
state = make_initial_state(batchsize=1, train=False)
for i in xrange(dataset.size - 1):
x_batch = dataset[i :i+1]
y_batch = dataset[i+1:i+2]
state, loss = forward_one_step(x_batch, y_batch, state, train=False)
sum_log_perp += float(cuda.to_cpu(loss.data))
return math.exp(sum_log_perp / (dataset.size - 1))
# Learning loop
whole_len = train_data.shape[0]
jump = whole_len / batchsize
cur_log_perp = 0
epoch = 0
start_at = time.time()
cur_at = start_at
state = make_initial_state()
accum_loss = 0
print 'going to train {} iterations'.format(jump * n_epoch)
for i in xrange(jump * n_epoch):
x_batch = np.array([train_data[(jump * j + i) % whole_len]
for j in xrange(batchsize)])
y_batch = np.array([train_data[(jump * j + i + 1) % whole_len]
for j in xrange(batchsize)])
state, loss_i = forward_one_step(x_batch, y_batch, state)
accum_loss += loss_i
cur_log_perp += float(cuda.to_cpu(loss_i.data))
if (i + 1) % bprop_len == 0: # Run truncated BPTT
optimizer.zero_grads()
accum_loss.backward()
accum_loss.unchain_backward() # truncate
accum_loss = 0
optimizer.clip_grads(grad_clip)
optimizer.update()
if (i + 1) % 10000 == 0:
now = time.time()
throuput = 10000. / (now - cur_at)
perp = math.exp(cur_log_perp / 10000)
print 'iter {} training perplexity: {:.2f} ({:.2f} iters/sec)'.format(
i + 1, perp, throuput)
cur_at = now
cur_log_perp = 0
if (i + 1) % jump == 0:
epoch += 1
print 'evaluate'
now = time.time()
perp = evaluate(valid_data)
print 'epoch {} validation perplexity: {:.2f}'.format(epoch, perp)
cur_at += time.time() - now # skip time of evaluation
if epoch >= 6:
optimizer.lr /= 1.2
print 'learning rate =', optimizer.lr
sys.stdout.flush()
# Evaluate on test dataset
print 'test'
test_perp = evaluate(test_data)
print 'test perplexity:', test_perp
| mit |
laic/gensim | gensim/topic_coherence/indirect_confirmation_measure.py | 17 | 6611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains functions to compute confirmation on a pair of words or word subsets. The advantage of indirect
confirmation measure is that it computes similarity of words in W' and W* with respect to direct confirmations to all words.
Eg. Suppose x and z are both competing brands of cars, which semantically support each other. However, both brands are
seldom mentioned together in documents in the reference corpus. But their confirmations to other words like “road”
or “speed” do strongly correlate. This would be reflected by an indirect confirmation measure. Thus, indirect confirmation
measures may capture semantic support that direct measures would miss.
The formula used to compute indirect confirmation measure is:
m_{sim}_{(m, \gamma)}(W', W*) = s_{sim}(\vec{V}^{\,}_{m,\gamma}(W'), \vec{V}^{\,}_{m,\gamma}(W*))
where s_sim can be cosine, dice or jaccard similarity and
\vec{V}^{\,}_{m,\gamma}(W') = \Bigg \{{\sum_{w_{i} \in W'}^{ } m(w_{i}, w_{j})^{\gamma}}\Bigg \}_{j = 1,...,|W|}
Here 'm' is the direct confirmation measure used.
"""
import logging
import numpy as np
from gensim.topic_coherence import direct_confirmation_measure
from gensim.matutils import cossim
logger = logging.getLogger(__name__)
def _present(w_prime_star, w, w_backtrack):
"""
Internal helper function to return index of (w_prime_star, w) in w_backtrack.
Return -1 if not present.
"""
index = -1
flag = 0
for arr in w_backtrack:
index += 1
if np.all(w_prime_star == arr[0]) and np.all(w == arr[1]):
flag += 1
break
if not flag:
return -1
return index
def _make_seg(w_prime, w, per_topic_postings, measure, gamma, backtrack, num_docs):
"""
Internal helper function to return context vectors for segmentations.
"""
context_vectors = {}
if isinstance(w_prime, np.ndarray):
for w_j in w:
for w_i in w_prime:
if (w_i, w_j) not in backtrack:
backtrack[(w_i, w_j)] = measure[0]([[(w_i, w_j)]], per_topic_postings, num_docs, measure[1])[0]
if w_j not in context_vectors:
context_vectors[w_j] = backtrack[(w_i, w_j)] ** gamma
else:
context_vectors[w_j] += backtrack[(w_i, w_j)] ** gamma
else:
for w_j in w:
if (w_prime, w_j) not in backtrack:
backtrack[(w_prime, w_j)] = measure[0]([[(w_prime, w_j)]], per_topic_postings, num_docs, measure[1])[0]
context_vectors[w_j] = backtrack[(w_prime, w_j)] ** gamma
return (context_vectors, backtrack)
def cosine_similarity(topics, segmented_topics, per_topic_postings, measure, gamma, num_docs):
"""
This function calculates the indirect cosine measure. Given context vectors
_ _ _ _
u = V(W') and w = V(W*) for the word sets of a pair S_i = (W', W*) indirect
_ _
cosine measure is computed as the cosine similarity between u and w. The formula used is:
m_{sim}_{(m, \gamma)}(W', W*) = s_{sim}(\vec{V}^{\,}_{m,\gamma}(W'), \vec{V}^{\,}_{m,\gamma}(W*))
where each vector \vec{V}^{\,}_{m,\gamma}(W') = \Bigg \{{\sum_{w_{i} \in W'}^{ } m(w_{i}, w_{j})^{\gamma}}\Bigg \}_{j = 1,...,|W|}
Args:
----
topics : Topics obtained from the trained topic model.
segmented_topics : segmented_topics : Output from the segmentation module of the segmented topics. Is a list of list of tuples.
per_topic_postings : Output from the probability_estimation module. Is a dictionary of the posting list of all topics.
measure : String. Direct confirmation measure to be used. Supported values are "nlr" (normalized log ratio).
gamma : Gamma value for computing W', W* vectors.
num_docs : Total number of documents in corresponding corpus.
Returns:
-------
s_cos_sim : array of cosine similarity of the context vectors for each segmentation
"""
if measure == 'nlr':
# make normalized log ratio measure tuple
measure = (direct_confirmation_measure.log_ratio_measure, True)
else:
raise ValueError("The direct confirmation measure you entered is not currently supported.")
backtrack = {} # Backtracking dictionary for storing measure values of topic id tuples eg. (1, 2).
"""
For backtracking context vectors, we will create a list called w_backtrack to store (w_prime, w) or
(w_star, w) tuples and a corresponding list context_vector_backtrack which will create a
mapping of (w_prime or w_star, w) ---> context_vector.
"""
w_backtrack = []
context_vector_backtrack = []
s_cos_sim = []
for top_words, s_i in zip(topics, segmented_topics):
for w_prime, w_star in s_i:
# Step 1. Check if (w_prime, top_words) tuple in w_backtrack.
# Step 2. If yes, return corresponding context vector
w_prime_index = _present(w_prime, top_words, w_backtrack)
if w_backtrack and w_prime_index != -1:
w_prime_context_vectors = context_vector_backtrack[w_prime_index]
else:
w_prime_context_vectors, backtrack_i = _make_seg(w_prime, top_words, per_topic_postings, measure, gamma, backtrack, num_docs)
backtrack.update(backtrack_i)
# Update backtracking lists
w_backtrack.append((w_prime, top_words))
context_vector_backtrack.append(w_prime_context_vectors)
# Step 1. Check if (w_star, top_words) tuple in w_backtrack.
# Step 2. If yes, check if corresponding w is the same
w_star_index = _present(w_star, top_words, w_backtrack)
if w_backtrack and w_star_index != -1:
w_star_context_vectors = context_vector_backtrack[w_star_index]
else:
w_star_context_vectors, backtrack_i = _make_seg(w_star, top_words, per_topic_postings, measure, gamma, backtrack, num_docs)
backtrack.update(backtrack_i)
# Update all backtracking lists
w_backtrack.append((w_star, top_words))
context_vector_backtrack.append(w_star_context_vectors)
s_cos_sim_i = cossim(w_prime_context_vectors.items(), w_star_context_vectors.items())
s_cos_sim.append(s_cos_sim_i)
return s_cos_sim
| lgpl-2.1 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/lib2to3/pygram.py | 320 | 1118 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.iteritems():
setattr(self, name, symbol)
python_grammar = driver.load_grammar(_GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
pattern_grammar = driver.load_grammar(_PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| bsd-2-clause |
minhphung171093/GreenERP_V8 | openerp/sql_db.py | 168 | 23783 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import urlparse
import uuid
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
'datetime': (1114,),
}
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
import tools
from tools.func import frame_codeinfo
from datetime import datetime as mdt
from datetime import timedelta
import threading
from inspect import currentframe
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def check(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._closed:
msg = 'Unable to use a closed cursor.'
if self.__closer:
msg += ' It was closed at %s, line %s' % self.__closer
raise psycopg2.OperationalError(msg)
return f(self, *args, **kwargs)
return wrapper
def __init__(self, pool, dbname, dsn, serialized=True):
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self.__closer = False
self._default_log_exceptions = True
self.cache = {}
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return map(self.__build_dict, self._obj.fetchmany(size))
def dictfetchall(self):
return map(self.__build_dict, self._obj.fetchall())
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.warning(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s")
if params and not isinstance(params, (tuple, list, dict)):
_logger.error("SQL query parameters should be a tuple, list or dict; got %r", params)
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
if self.sql_log:
now = mdt.now()
try:
params = params or None
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.exception("bad query: %s", self._obj.query or query)
raise
# simple query count is always computed
self.sql_log_count += 1
# advanced stats only if sql_log is enabled
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
_logger.debug("query: %s", self._obj.query)
res_from = re_from.match(query.lower())
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query.lower())
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, ids)
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(), 3)
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
self._closed = True
# Clean the underlying connection.
self._cnx.rollback()
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snaphsot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def commit(self):
""" Perform an SQL `COMMIT`
"""
return self._cnx.commit()
@check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
return self._cnx.rollback()
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
self.close()
@contextmanager
@check
def savepoint(self):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
self.execute('SAVEPOINT "%s"' % name)
try:
yield
self.execute('RELEASE SAVEPOINT "%s"' % name)
except:
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
@check
def __getattr__(self, name):
return getattr(self._obj, name)
@property
def closed(self):
return self._closed
class TestCursor(Cursor):
""" A cursor to be used for tests. It keeps the transaction open across
several requests, and simulates committing, rolling back, and closing.
"""
def __init__(self, *args, **kwargs):
super(TestCursor, self).__init__(*args, **kwargs)
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self.execute("SAVEPOINT test_cursor")
# we use a lock to serialize concurrent requests
self._lock = threading.RLock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def force_close(self):
super(TestCursor, self).close()
def close(self):
if not self._closed:
self.rollback() # for stuff that has not been committed
self.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
def commit(self):
self.execute("RELEASE SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
def rollback(self):
self.execute("ROLLBACK TO SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
class PsycoConnection(psycopg2.extensions.connection):
pass
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.warning('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and cnx._original_dsn == dsn:
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
_logger.exception('Connection to the database failed')
raise
result._original_dsn = dsn
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not below to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or cnx._original_dsn == dsn:
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.dbname = dbname
self.dsn = dsn
self.__pool = pool
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
def test_cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create test %scursor to %r', cursor_type, self.dsn)
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __nonzero__(self):
"""Check if connection is possible"""
try:
_logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True
except Exception:
return False
def dsn(db_or_uri):
"""parse the given `db_or_uri` and return a 2-tuple (dbname, uri)"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urlparse.urlsplit(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, db_or_uri
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
cfg = tools.config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri)
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, uri = dsn(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, uri)
def close_db(db_name):
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(dsn(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
idea4bsd/idea4bsd | python/lib/Lib/site-packages/django/forms/forms.py | 73 | 21332 | """
Form classes
"""
from django.core.exceptions import ValidationError
from django.utils.copycompat import deepcopy
from django.utils.datastructures import SortedDict
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.safestring import mark_safe
from fields import Field, FileField
from widgets import Media, media_property, TextInput, Textarea
from util import flatatt, ErrorDict, ErrorList
__all__ = ('BaseForm', 'Form')
NON_FIELD_ERRORS = '__all__'
def pretty_name(name):
"""Converts 'first_name' to 'First name'"""
if not name:
return u''
return name.replace('_', ' ').capitalize()
def get_declared_fields(bases, attrs, with_base_fields=True):
"""
Create a list of form field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases'). This is used by both the
Form and ModelForm metclasses.
If 'with_base_fields' is True, all fields from the bases are used.
Otherwise, only fields in the 'declared_fields' attribute on the bases are
used. The distinction is useful in ModelForm subclassing.
Also integrates any additional media definitions
"""
fields = [(field_name, attrs.pop(field_name)) for field_name, obj in attrs.items() if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
# If this class is subclassing another Form, add that Form's fields.
# Note that we loop over the bases in *reverse*. This is necessary in
# order to preserve the correct order of fields.
if with_base_fields:
for base in bases[::-1]:
if hasattr(base, 'base_fields'):
fields = base.base_fields.items() + fields
else:
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return SortedDict(fields)
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
new_class = super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class BaseForm(StrAndUnicode):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
self.label_suffix = label_suffix
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
self._changed_data = None
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = deepcopy(self.base_fields)
def __unicode__(self):
return self.as_table()
def __iter__(self):
for name, field in self.fields.items():
yield BoundField(self, field, name)
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name)
def _get_errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not bool(self.errors)
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return u'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = BoundField(self, field, name)
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors]) # Escape and cache in local variable.
if bf.is_hidden:
if bf_errors:
top_errors.extend([u'(Hidden field %s) %s' % (name, force_unicode(e)) for e in bf_errors])
hidden_fields.append(unicode(bf))
else:
# Create a 'class="..."' atribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_unicode(bf_errors))
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.label_suffix:
if label[-1] not in ':?.!':
label += self.label_suffix
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_unicode(field.help_text)
else:
help_text = u''
output.append(normal_row % {
'errors': force_unicode(bf_errors),
'label': force_unicode(label),
'field': unicode(bf),
'help_text': help_text,
'html_class_attr': html_class_attr
})
if top_errors:
output.insert(0, error_row % force_unicode(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = u''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {'errors': '', 'label': '',
'field': '', 'help_text':'',
'html_class_attr': html_class_attr})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe(u'\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row = u'<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row = u'<tr><td colspan="2">%s</td></tr>',
row_ender = u'</td></tr>',
help_text_html = u'<br /><span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row = u'<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row = u'<li>%s</li>',
row_ender = '</li>',
help_text_html = u' <span class="helptext">%s</span>',
errors_on_separate_row = False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row = u'<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row = u'%s',
row_ender = '</p>',
help_text_html = u' <span class="helptext">%s</span>',
errors_on_separate_row = True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class())
def _raw_value(self, fieldname):
"""
Returns the raw_value for a particular field name. This is just a
convenient wrapper around widget.value_from_datadict.
"""
field = self.fields[fieldname]
prefix = self.add_prefix(fieldname)
return field.widget.value_from_datadict(self.data, self.files, prefix)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
if self._errors:
del self.cleaned_data
def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError, e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name]
def _clean_form(self):
try:
self.cleaned_data = self.clean()
except ValidationError, e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages)
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
def _get_changed_data(self):
if self._changed_data is None:
self._changed_data = []
# XXX: For now we're asking the individual widgets whether or not the
# data has changed. It would probably be more efficient to hash the
# initial data, store it in a hidden field, and compare a hash of the
# submitted data, but we'd need a way to easily get the string value
# for a given field. Right now, that logic is embedded in the render
# method of each widget.
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
initial_value = hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name)
if field.widget._has_changed(initial_value, data_value):
self._changed_data.append(name)
return self._changed_data
changed_data = property(_get_changed_data)
def _get_media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
media = property(_get_media)
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encrypted, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(BaseForm):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
__metaclass__ = DeclarativeFieldsMetaclass
class BoundField(StrAndUnicode):
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ''
def __unicode__(self):
"""Renders this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
def _errors(self):
"""
Returns an ErrorList for this field. Returns an empty ErrorList
if there are none.
"""
return self.form.errors.get(self.name, self.form.error_class())
errors = property(_errors)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
if not widget:
widget = self.field.widget
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and 'id' not in widget.attrs:
if not only_initial:
attrs['id'] = auto_id
else:
attrs['id'] = self.html_initial_id
if not only_initial:
name = self.html_name
else:
name = self.html_initial_name
return widget.render(name, self.value(), attrs=attrs)
def as_text(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"Returns a string of HTML for representing this as a <textarea>."
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Returns a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
def _data(self):
"""
Returns the data for this BoundField, or None if it wasn't given.
"""
return self.field.widget.value_from_datadict(self.form.data, self.form.files, self.html_name)
data = property(_data)
def value(self):
"""
Returns the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
if not self.form.is_bound:
data = self.form.initial.get(self.name, self.field.initial)
if callable(data):
data = data()
else:
data = self.field.bound_data(
self.data, self.form.initial.get(self.name, self.field.initial)
)
return self.field.prepare_value(data)
def label_tag(self, contents=None, attrs=None):
"""
Wraps the given contents in a <label>, if the field has an ID attribute.
Does not HTML-escape the contents. If contents aren't given, uses the
field's HTML-escaped label.
If attrs are given, they're used as HTML attributes on the <label> tag.
"""
contents = contents or conditional_escape(self.label)
widget = self.field.widget
id_ = widget.attrs.get('id') or self.auto_id
if id_:
attrs = attrs and flatatt(attrs) or ''
contents = u'<label for="%s"%s>%s</label>' % (widget.id_for_label(id_), attrs, unicode(contents))
return mark_safe(contents)
def css_classes(self, extra_classes=None):
"""
Returns a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, 'split'):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, 'error_css_class'):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, 'required_css_class'):
extra_classes.add(self.form.required_css_class)
return ' '.join(extra_classes)
def _is_hidden(self):
"Returns True if this BoundField's widget is hidden."
return self.field.widget.is_hidden
is_hidden = property(_is_hidden)
def _auto_id(self):
"""
Calculates and returns the ID attribute for this BoundField, if the
associated Form has specified auto_id. Returns an empty string otherwise.
"""
auto_id = self.form.auto_id
if auto_id and '%s' in smart_unicode(auto_id):
return smart_unicode(auto_id) % self.html_name
elif auto_id:
return self.html_name
return ''
auto_id = property(_auto_id)
| apache-2.0 |
frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/OpenGL/GL/ATI/element_array.py | 4 | 1223 | '''OpenGL extension ATI.element_array
This module customises the behaviour of the
OpenGL.raw.GL.ATI.element_array to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a mechanism for an application to create
an array of index data for use in specifying geometric primitives.
This extension is most useful when used in conjunction with the
ATI_vertex_array_object extension. ATI_vertex_array_object
provides an interface for storing vertex array data in persistent,
hardware-addressable memory. In cases where large amounts of
vertex data are in use, the index data used to construct
primitives (typically as passed to the GL through DrawElements)
can impose a significant bandwidth burden. ATI_element_array
allows the application to specify independent arrays of elements,
which can then be cached using ATI_vertex_array_object.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/element_array.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ATI.element_array import *
### END AUTOGENERATED SECTION | bsd-2-clause |
pgmillon/ansible | lib/ansible/modules/network/edgeos/edgeos_config.py | 28 | 10823 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: edgeos_config
version_added: "2.5"
author:
- "Nathaniel Case (@Qalthos)"
- "Sam Doran (@samdoran)"
short_description: Manage EdgeOS configuration on remote device
description:
- This module provides configuration file management of EdgeOS
devices. It provides arguments for managing both the
configuration file and state of the active configuration. All
configuration statements are based on `set` and `delete` commands
in the device configuration.
- "This is a network module and requires the C(connection: network_cli) in order
to work properly."
- For more information please see the L(Network Guide,../network/getting_started/index.html).
notes:
- Tested against EdgeOS 1.9.7
- Setting C(ANSIBLE_PERSISTENT_COMMAND_TIMEOUT) to 30 is recommended since
the save command can take longer than the default of 10 seconds on
some EdgeOS hardware.
options:
lines:
description:
- The ordered set of configuration lines to be managed and
compared with the existing configuration on the remote
device.
src:
description:
- The C(src) argument specifies the path to the source config
file to load. The source config file can either be in
bracket format or set format. The source file can include
Jinja2 template variables.
match:
description:
- The C(match) argument controls the method used to match
against the current active configuration. By default, the
desired config is matched against the active config and the
deltas are loaded. If the C(match) argument is set to C(none)
the active configuration is ignored and the configuration is
always loaded.
default: line
choices: ['line', 'none']
backup:
description:
- The C(backup) argument will backup the current device's active
configuration to the Ansible control host prior to making any
changes. If the C(backup_options) value is not given, the backup
file will be located in the backup folder in the playbook root
directory or role root directory if the playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by edgeos_config'
config:
description:
- The C(config) argument specifies the base configuration to use
to compare against the desired configuration. If this value
is not specified, the module will automatically retrieve the
current active configuration from the remote device.
save:
description:
- The C(save) argument controls whether or not changes made
to the active configuration are saved to disk. This is
independent of committing the config. When set to C(True), the
active configuration is saved.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure the remote device
edgeos_config:
lines:
- set system host-name {{ inventory_hostname }}
- set service lldp
- delete service dhcp-server
- name: backup and load from file
edgeos_config:
src: edgeos.cfg
backup: yes
- name: configurable backup path
edgeos_config:
src: edgeos.cfg
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
commands:
description: The list of configuration commands sent to the device
returned: always
type: list
sample: ['...', '...']
filtered:
description: The list of configuration commands removed to avoid a load failure
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/edgeos_config.2016-07-16@22:28:34
"""
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.edgeos.edgeos import load_config, get_config, run_commands
DEFAULT_COMMENT = 'configured by edgeos_config'
CONFIG_FILTERS = [
re.compile(r'set system login user \S+ authentication encrypted-password')
]
def config_to_commands(config):
set_format = config.startswith('set') or config.startswith('delete')
candidate = NetworkConfig(indent=4, contents=config)
if not set_format:
candidate = [c.line for c in candidate.items]
commands = list()
# this filters out less specific lines
for item in candidate:
for index, entry in enumerate(commands):
if item.startswith(entry):
del commands[index]
break
commands.append(item)
commands = ['set %s' % cmd.replace(' {', '') for cmd in commands]
else:
commands = to_native(candidate).split('\n')
return commands
def get_candidate(module):
contents = module.params['src'] or module.params['lines']
if module.params['lines']:
contents = '\n'.join(contents)
return config_to_commands(contents)
def diff_config(commands, config):
config = [to_native(c).replace("'", '') for c in config.splitlines()]
updates = list()
visited = set()
delete_commands = [line for line in commands if line.startswith('delete')]
for line in commands:
item = to_native(line).replace("'", '')
if not item.startswith('set') and not item.startswith('delete'):
raise ValueError('line must start with either `set` or `delete`')
elif item.startswith('set'):
if item not in config:
updates.append(line)
# If there is a corresponding delete command in the desired config, make sure to append
# the set command even though it already exists in the running config
else:
ditem = re.sub('set', 'delete', item)
for line in delete_commands:
if ditem.startswith(line):
updates.append(item)
elif item.startswith('delete'):
if not config:
updates.append(line)
else:
item = re.sub(r'delete', 'set', item)
for entry in config:
if entry.startswith(item) and line not in visited:
updates.append(line)
visited.add(line)
return list(updates)
def sanitize_config(config, result):
result['filtered'] = list()
for regex in CONFIG_FILTERS:
for index, line in reversed(list(enumerate(config))):
if regex.search(line):
result['filtered'].append(line)
del config[index]
def run(module, result):
# get the current active config from the node or passed in via
# the config param
config = module.params['config'] or get_config(module)
# create the candidate config object from the arguments
candidate = get_candidate(module)
# create loadable config that includes only the configuration updates
commands = diff_config(candidate, config)
sanitize_config(commands, result)
result['commands'] = commands
commit = not module.check_mode
comment = module.params['comment']
if commands:
load_config(module, commands, commit=commit, comment=comment)
if result.get('filtered'):
result['warnings'].append('Some configuration commands were '
'removed, please see the filtered key')
result['changed'] = True
def main():
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
spec = dict(
src=dict(type='path'),
lines=dict(type='list'),
match=dict(default='line', choices=['line', 'none']),
comment=dict(default=DEFAULT_COMMENT),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save=dict(type='bool', default=False),
)
mutually_exclusive = [('lines', 'src')]
module = AnsibleModule(
argument_spec=spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
warnings = list()
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module=module)
if any((module.params['src'], module.params['lines'])):
run(module, result)
if module.params['save']:
diff = run_commands(module, commands=['configure', 'compare saved'])[1]
if diff != '[edit]':
run_commands(module, commands=['save'])
result['changed'] = True
run_commands(module, commands=['exit'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
bashrc/zeronet-debian | src/plugins/disabled-Bootstrapper/Test/TestBootstrapper.py | 5 | 7777 | import hashlib
import os
import pytest
from Bootstrapper import BootstrapperPlugin
from Bootstrapper.BootstrapperDb import BootstrapperDb
from Peer import Peer
from Crypt import CryptRsa
from util import helper
@pytest.fixture()
def bootstrapper_db(request):
BootstrapperPlugin.db.close()
BootstrapperPlugin.db = BootstrapperDb()
BootstrapperPlugin.db.createTables() # Reset db
BootstrapperPlugin.db.cur.logging = True
def cleanup():
BootstrapperPlugin.db.close()
os.unlink(BootstrapperPlugin.db.db_path)
request.addfinalizer(cleanup)
return BootstrapperPlugin.db
@pytest.mark.usefixtures("resetSettings")
class TestBootstrapper:
def testIp4(self, file_server, bootstrapper_db):
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("site1").digest()
hash2 = hashlib.sha256("site2").digest()
hash3 = hashlib.sha256("site3").digest()
# Verify empty result
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
# Verify added peer on previous request
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 1
# hash2 deleted from 1.2.3.4
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 0
# Announce 3 hash again
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2, hash3], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2, hash3],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 1
assert len(res["peers"][2]["ip4"]) == 1
# Single hash announce
res = peer.request("announce", {
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
# Test DB cleanup
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 1 # 127.0.0.1 never get added to db
# Delete peers
bootstrapper_db.execute("DELETE FROM peer WHERE ip4 = '1.2.3.4'")
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer_to_hash").fetchone()["num"] == 0
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM hash").fetchone()["num"] == 3 # 3 sites
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 0 # 0 peer
def testPassive(self, file_server, bootstrapper_db):
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("hash1").digest()
bootstrapper_db.peerAnnounce(ip4=None, port=15441, hashes=[hash1])
res = peer.request("announce", {
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": []
})
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
def testAddOnion(self, file_server, site, bootstrapper_db, tor_manager):
onion1 = tor_manager.addOnion()
onion2 = tor_manager.addOnion()
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("site1").digest()
hash2 = hashlib.sha256("site2").digest()
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash1, hash2])
res = peer.request("announce", {
"onions": [onion1, onion2],
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert "onion_sign_this" in res
# Onion address not added yet
site_peers = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers["onion"]) == 0
assert "onion_sign_this" in res
# Sign the nonces
sign1 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion1))
sign2 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion2))
# Bad sign (different address)
res = peer.request("announce", {
"onions": [onion1], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion2): sign2},
"hashes": [hash1], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" in res
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 0 # Not added
# Bad sign (missing one)
res = peer.request("announce", {
"onions": [onion1, onion2], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion1): sign1},
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" in res
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 0 # Not added
# Good sign
res = peer.request("announce", {
"onions": [onion1, onion2], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion1): sign1, tor_manager.getPublickey(onion2): sign2},
"hashes": [hash1, hash2], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" not in res
# Onion addresses added
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 1
site_peers2 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash2)
assert len(site_peers2["onion"]) == 1
assert site_peers1["onion"][0] != site_peers2["onion"][0]
assert helper.unpackOnionAddress(site_peers1["onion"][0])[0] == onion1+".onion"
assert helper.unpackOnionAddress(site_peers2["onion"][0])[0] == onion2+".onion"
tor_manager.delOnion(onion1)
tor_manager.delOnion(onion2)
def testRequestPeers(self, file_server, site, bootstrapper_db, tor_manager):
site.connection_server = file_server
hash = hashlib.sha256(site.address).digest()
# Request peers from tracker
assert len(site.peers) == 0
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash])
site.announceTracker("zero", "127.0.0.1:1544")
assert len(site.peers) == 1
# Test onion address store
bootstrapper_db.peerAnnounce(onion="bka4ht2bzxchy44r", port=1234, hashes=[hash], onion_signed=True)
site.announceTracker("zero", "127.0.0.1:1544")
assert len(site.peers) == 2
assert "bka4ht2bzxchy44r.onion:1234" in site.peers
| gpl-2.0 |
MeerkatLabs/gnucash-reporting | gnucash_reports/reports/base.py | 1 | 1888 | """
Definition of a report.
"""
_reports = dict()
def register_plugin(report, report_type=None):
"""
Register the plugin class definition into the module.
:param report: report definition class. Must have a class variable of report_type.
:param report_type: the type of report being identified, if none, a valid value will searched for.
:return: None
"""
global _reports
if report_type:
_reports[report_type] = report
else:
try:
_reports[report.report_type] = report
except AttributeError:
_reports[report.func_name] = report
def run_report(type='UNDEFINED_REPORT', name='UNTITLED_REPORT', description=None, definition=None):
"""
Execute the report as defined by arguments.
:param type: string containing the report type.
:param name: string containing the report name
:param description: string containing a description
:param definition: a dictionary containing the report configuration parameters
:return:
"""
definition = definition or {}
_report = _reports.get(type, None)
if _report:
payload = _report(**definition)
return {
'name': name,
'description': description,
'type': type,
'data': payload
}
print 'Could not find report by name: %s' % type
return None
def multi_report(reports=None):
"""
Report that will calculate multiple reports and store the results.
:param reports: list of reports to execute
:return: dictionary containing
reports - results of the reports that were executed
"""
report_definitions = reports or []
report_results = []
for report in report_definitions:
_result = run_report(**report)
if _result:
report_results.append(_result)
return dict(reports=report_results)
| mit |
hanlind/nova | nova/virt/vmwareapi/images.py | 1 | 18920 | # Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
import tarfile
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import strutils
from oslo_utils import units
from oslo_vmware import rw_handles
import six
from nova import exception
from nova.i18n import _, _LI
from nova import image
from nova.objects import fields
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vm_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
NFC_LEASE_UPDATE_PERIOD = 60 # update NFC lease every 60sec.
CHUNK_SIZE = 64 * units.Ki # default chunk size for image transfer
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
container_format=constants.CONTAINER_FORMAT_BARE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vsphere_location=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
container_format (str): container format (bare or ova)
file_type (str): vmdk or iso
linked_clone (bool): use linked clone, or don't
vsphere_location (str): image location in datastore or None
vif_model (str): virtual machine network interface
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.container_format = container_format
self.disk_type = disk_type
self.file_type = file_type
self.vsphere_location = vsphere_location
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@property
def is_ova(self):
return self.container_format == constants.CONTAINER_FORMAT_OVA
@classmethod
def from_image(cls, context, image_id, image_meta):
"""Returns VMwareImage, the subset of properties the driver uses.
:param context - context
:param image_id - image id of image
:param image_meta - image metadata object we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.images.VmwareImage
"""
properties = image_meta.properties
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get('img_linked_clone',
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
if image_meta.obj_attr_is_set('container_format'):
container_format = image_meta.container_format
else:
container_format = None
props = {
'image_id': image_id,
'linked_clone': linked_clone,
'container_format': container_format,
'vsphere_location': get_vsphere_location(context, image_id)
}
if image_meta.obj_attr_is_set('size'):
props['file_size'] = image_meta.size
if image_meta.obj_attr_is_set('disk_format'):
props['file_type'] = image_meta.disk_format
hw_disk_bus = properties.get('hw_disk_bus')
if hw_disk_bus:
mapping = {
fields.SCSIModel.LSILOGIC:
constants.DEFAULT_ADAPTER_TYPE,
fields.SCSIModel.LSISAS1068:
constants.ADAPTER_TYPE_LSILOGICSAS,
fields.SCSIModel.BUSLOGIC:
constants.ADAPTER_TYPE_BUSLOGIC,
fields.SCSIModel.VMPVSCSI:
constants.ADAPTER_TYPE_PARAVIRTUAL,
}
if hw_disk_bus == fields.DiskBus.IDE:
props['adapter_type'] = constants.ADAPTER_TYPE_IDE
elif hw_disk_bus == fields.DiskBus.SCSI:
hw_scsi_model = properties.get('hw_scsi_model')
props['adapter_type'] = mapping.get(hw_scsi_model)
props_map = {
'os_distro': 'os_type',
'hw_disk_type': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in six.iteritems(props_map):
if properties.obj_attr_is_set(k):
props[v] = properties.get(k)
return cls(**props)
def get_vsphere_location(context, image_id):
"""Get image location in vsphere or None."""
# image_id can be None if the instance is booted using a volume.
if image_id:
metadata = IMAGE_API.get(context, image_id, include_locations=True)
locations = metadata.get('locations')
if locations:
for loc in locations:
loc_url = loc.get('url')
if loc_url and loc_url.startswith('vsphere://'):
return loc_url
return None
def image_transfer(read_handle, write_handle):
# write_handle could be an NFC lease, so we need to periodically
# update its progress
update_cb = getattr(write_handle, 'update_progress', lambda: None)
updater = loopingcall.FixedIntervalLoopingCall(update_cb)
try:
updater.start(interval=NFC_LEASE_UPDATE_PERIOD)
while True:
data = read_handle.read(CHUNK_SIZE)
if not data:
break
write_handle.write(data)
finally:
updater.stop()
read_handle.close()
write_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = rw_handles.FileWriteHandle(
kwargs.get("host"),
kwargs.get("port"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, port, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = rw_handles.ImageReadHandle(read_iter)
write_file_handle = rw_handles.FileWriteHandle(
host, port, dc_name, ds_name, cookies, file_path, file_size)
image_transfer(read_file_handle, write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name):
"""Return spec for creating a shadow VM for image disk.
The VM is never meant to be powered on. When used in importing
a disk it governs the directory name created for the VM
and the disk type of the disk image to convert to.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param ds_name: Datastore name where the disk is to be provisioned
:return: Spec for creation
"""
cf = session.vim.client.factory
controller_device = cf.create('ns0:VirtualLsiLogicController')
controller_device.key = -100
controller_device.busNumber = 0
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
disk_device = cf.create('ns0:VirtualDisk')
# for very small disks allocate at least 1KB
disk_device.capacityInKB = max(1, int(size_kb))
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = -100
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == constants.DISK_TYPE_THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = '[%s]' % ds_name
disk_device_bkng.diskMode = 'persistent'
disk_device.backing = disk_device_bkng
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = constants.DEFAULT_OS_TYPE
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
return create_spec
def _build_import_spec_for_import_vapp(session, vm_name, datastore_name):
vm_create_spec = _build_shadow_vm_config_spec(
session, vm_name, 0, constants.DISK_TYPE_THIN, datastore_name)
client_factory = session.vim.client.factory
vm_import_spec = client_factory.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = vm_create_spec
return vm_import_spec
def fetch_image_stream_optimized(context, instance, session, vm_name,
ds_name, vm_folder_ref, res_pool_ref):
"""Fetch image from Glance to ESX datastore."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
write_handle = rw_handles.VmdkWriteHandle(session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
image_transfer(read_handle, write_handle)
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
vmdk = vm_util.get_vmdk_info(session, imported_vm_ref, vm_name)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
return vmdk.capacity_in_bytes
def get_vmdk_name_from_ovf(xmlstr):
"""Parse the OVA descriptor to extract the vmdk name."""
ovf = etree.fromstring(encodeutils.safe_encode(xmlstr))
nsovf = "{%s}" % ovf.nsmap["ovf"]
disk = ovf.find("./%sDiskSection/%sDisk" % (nsovf, nsovf))
file_id = disk.get("%sfileRef" % nsovf)
file = ovf.find('./%sReferences/%sFile[@%sid="%s"]' % (nsovf, nsovf,
nsovf, file_id))
vmdk_name = file.get("%shref" % nsovf)
return vmdk_name
def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_folder_ref, res_pool_ref):
"""Download the OVA image from the glance image server to the
Nova compute node.
"""
image_ref = instance.image_ref
LOG.debug("Downloading OVA image file %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
with tarfile.open(mode="r|", fileobj=read_handle) as tar:
vmdk_name = None
for tar_info in tar:
if tar_info and tar_info.name.endswith(".ovf"):
extracted = tar.extractfile(tar_info)
xmlstr = extracted.read()
vmdk_name = get_vmdk_name_from_ovf(xmlstr)
elif vmdk_name and tar_info.name.startswith(vmdk_name):
# Actual file name is <vmdk_name>.XXXXXXX
extracted = tar.extractfile(tar_info)
write_handle = rw_handles.VmdkWriteHandle(
session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
image_transfer(extracted, write_handle)
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
vmdk = vm_util.get_vmdk_info(session,
imported_vm_ref,
vm_name)
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
return vmdk.capacity_in_bytes
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
def upload_image_stream_optimized(context, image_id, instance, session,
vm, vmdk_size):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s", image_id, instance=instance)
metadata = IMAGE_API.get(context, image_id)
read_handle = rw_handles.VmdkReadHandle(session,
session._host,
session._port,
vm,
None,
vmdk_size)
# Set the image properties. It is important to set the 'size' to 0.
# Otherwise, the image service client will use the VM's disk capacity
# which will not be the image size after upload, since it is converted
# to a stream-optimized sparse disk.
image_metadata = {'disk_format': constants.DISK_FORMAT_VMDK,
'is_public': metadata['is_public'],
'name': metadata['name'],
'status': 'active',
'container_format': constants.CONTAINER_FORMAT_BARE,
'size': 0,
'properties': {'vmware_image_version': 1,
'vmware_disktype': 'streamOptimized',
'owner_id': instance.project_id}}
updater = loopingcall.FixedIntervalLoopingCall(read_handle.update_progress)
try:
updater.start(interval=NFC_LEASE_UPDATE_PERIOD)
IMAGE_API.update(context, image_id, image_metadata, data=read_handle)
finally:
updater.stop()
read_handle.close()
LOG.debug("Uploaded image %s to the Glance image server", image_id,
instance=instance)
| apache-2.0 |
petebachant/pyqtgraph | pyqtgraph/GraphicsScene/exportDialog.py | 31 | 5317 | from ..Qt import QtCore, QtGui, USE_PYSIDE, USE_PYQT5
from .. import exporters as exporters
from .. import functions as fn
from ..graphicsItems.ViewBox import ViewBox
from ..graphicsItems.PlotItem import PlotItem
if USE_PYSIDE:
from . import exportDialogTemplate_pyside as exportDialogTemplate
elif USE_PYQT5:
from . import exportDialogTemplate_pyqt5 as exportDialogTemplate
else:
from . import exportDialogTemplate_pyqt as exportDialogTemplate
class ExportDialog(QtGui.QWidget):
def __init__(self, scene):
QtGui.QWidget.__init__(self)
self.setVisible(False)
self.setWindowTitle("Export")
self.shown = False
self.currentExporter = None
self.scene = scene
self.selectBox = QtGui.QGraphicsRectItem()
self.selectBox.setPen(fn.mkPen('y', width=3, style=QtCore.Qt.DashLine))
self.selectBox.hide()
self.scene.addItem(self.selectBox)
self.ui = exportDialogTemplate.Ui_Form()
self.ui.setupUi(self)
self.ui.closeBtn.clicked.connect(self.close)
self.ui.exportBtn.clicked.connect(self.exportClicked)
self.ui.copyBtn.clicked.connect(self.copyClicked)
self.ui.itemTree.currentItemChanged.connect(self.exportItemChanged)
self.ui.formatList.currentItemChanged.connect(self.exportFormatChanged)
def show(self, item=None):
if item is not None:
## Select next exportable parent of the item originally clicked on
while not isinstance(item, ViewBox) and not isinstance(item, PlotItem) and item is not None:
item = item.parentItem()
## if this is a ViewBox inside a PlotItem, select the parent instead.
if isinstance(item, ViewBox) and isinstance(item.parentItem(), PlotItem):
item = item.parentItem()
self.updateItemList(select=item)
self.setVisible(True)
self.activateWindow()
self.raise_()
self.selectBox.setVisible(True)
if not self.shown:
self.shown = True
vcenter = self.scene.getViewWidget().geometry().center()
self.setGeometry(vcenter.x()-self.width()/2, vcenter.y()-self.height()/2, self.width(), self.height())
def updateItemList(self, select=None):
self.ui.itemTree.clear()
si = QtGui.QTreeWidgetItem(["Entire Scene"])
si.gitem = self.scene
self.ui.itemTree.addTopLevelItem(si)
self.ui.itemTree.setCurrentItem(si)
si.setExpanded(True)
for child in self.scene.items():
if child.parentItem() is None:
self.updateItemTree(child, si, select=select)
def updateItemTree(self, item, treeItem, select=None):
si = None
if isinstance(item, ViewBox):
si = QtGui.QTreeWidgetItem(['ViewBox'])
elif isinstance(item, PlotItem):
si = QtGui.QTreeWidgetItem(['Plot'])
if si is not None:
si.gitem = item
treeItem.addChild(si)
treeItem = si
if si.gitem is select:
self.ui.itemTree.setCurrentItem(si)
for ch in item.childItems():
self.updateItemTree(ch, treeItem, select=select)
def exportItemChanged(self, item, prev):
if item is None:
return
if item.gitem is self.scene:
newBounds = self.scene.views()[0].viewRect()
else:
newBounds = item.gitem.sceneBoundingRect()
self.selectBox.setRect(newBounds)
self.selectBox.show()
self.updateFormatList()
def updateFormatList(self):
current = self.ui.formatList.currentItem()
if current is not None:
current = str(current.text())
self.ui.formatList.clear()
self.exporterClasses = {}
gotCurrent = False
for exp in exporters.listExporters():
self.ui.formatList.addItem(exp.Name)
self.exporterClasses[exp.Name] = exp
if exp.Name == current:
self.ui.formatList.setCurrentRow(self.ui.formatList.count()-1)
gotCurrent = True
if not gotCurrent:
self.ui.formatList.setCurrentRow(0)
def exportFormatChanged(self, item, prev):
if item is None:
self.currentExporter = None
self.ui.paramTree.clear()
return
expClass = self.exporterClasses[str(item.text())]
exp = expClass(item=self.ui.itemTree.currentItem().gitem)
params = exp.parameters()
if params is None:
self.ui.paramTree.clear()
else:
self.ui.paramTree.setParameters(params)
self.currentExporter = exp
self.ui.copyBtn.setEnabled(exp.allowCopy)
def exportClicked(self):
self.selectBox.hide()
self.currentExporter.export()
def copyClicked(self):
self.selectBox.hide()
self.currentExporter.export(copy=True)
def close(self):
self.selectBox.setVisible(False)
self.setVisible(False)
def closeEvent(self, event):
self.close()
QtGui.QWidget.closeEvent(self, event)
| mit |
etalab/udata | udata/tests/forms/test_model_list_field.py | 2 | 3571 | from werkzeug.datastructures import MultiDict
from udata.forms import ModelForm, fields
from udata.models import db
from udata.tests import TestCase
from udata.utils import faker
class Nested(db.Document):
name = db.StringField()
class Fake(db.Document):
name = db.StringField()
nested = db.ListField(db.ReferenceField(Nested))
class NestedListField(fields.ModelList, fields.Field):
model = Nested
class FakeForm(ModelForm):
model_class = Fake
name = fields.StringField()
nested = NestedListField()
class ModelListFieldTest(TestCase):
def test_empty_data(self):
fake = Fake()
form = FakeForm()
form.populate_obj(fake)
self.assertEqual(fake.nested, [])
def test_with_one_valid_data(self):
nested = Nested.objects.create(name=faker.name())
fake = Fake()
form = FakeForm(MultiDict({'nested': str(nested.id)}))
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), 1)
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested[0], nested)
def test_with_multiple_valid_data(self):
nesteds = [Nested.objects.create(name=faker.name()) for _ in range(3)]
ids = [str(n.id) for n in nesteds]
fake = Fake()
form = FakeForm(MultiDict({'nested': ','.join(ids)}))
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), len(nesteds))
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested, nesteds)
def test_with_one_valid_json_id(self):
nested = Nested.objects.create(name=faker.name())
fake = Fake()
form = FakeForm.from_json({'nested': [str(nested.id)]})
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), 1)
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested[0], nested)
def test_with_one_valid_json_object(self):
nested = Nested.objects.create(name=faker.name())
fake = Fake()
form = FakeForm.from_json({'nested': [{'id': str(nested.id)}]})
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), 1)
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested[0], nested)
def test_with_multiple_valid_json_id(self):
nested = [Nested.objects.create(name=faker.name()) for _ in range(3)]
ids = [str(n.id) for n in nested]
fake = Fake()
form = FakeForm.from_json({'nested': ids})
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), len(nested))
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested, nested)
def test_with_multiple_valid_json_object(self):
nested = [Nested.objects.create(name=faker.name()) for _ in range(3)]
ids = [{'id': str(n.id)} for n in nested]
fake = Fake()
form = FakeForm.from_json({'nested': ids})
form.validate()
self.assertEqual(form.errors, {})
form.populate_obj(fake)
self.assertEqual(len(fake.nested), len(nested))
self.assertIsInstance(fake.nested[0], Nested)
self.assertEqual(fake.nested, nested)
| agpl-3.0 |
reqshark/duktape | src/genhashsizes.py | 16 | 3054 | #!/usr/bin/python
#
# Find a sequence of duk_hobject hash sizes which have a desired 'ratio'
# and are primes. Prime hash sizes ensure that all probe sequence values
# (less than hash size) are relatively prime to hash size, i.e. cover the
# entire hash. Prime data is packed into about 1 byte/prime using a
# prediction-correction model.
#
# Also generates a set of probe steps which are relatively prime to every
# hash size.
import sys
import math
def is_prime(n):
if n == 0:
return False
if n == 1 or n == 2:
return True
n_limit = int(math.ceil(float(n) ** 0.5)) + 1
n_limit += 100 # paranoia
if n_limit >= n:
n_limit = n - 1
for i in xrange(2,n_limit + 1):
if (n % i) == 0:
return False
return True
def next_prime(n):
while True:
n += 1
if is_prime(n):
return n
def generate_sizes(min_size, max_size, step_ratio):
"Generate a set of hash sizes following a nice ratio."
sizes = []
ratios = []
curr = next_prime(min_size)
next = curr
sizes.append(curr)
step_ratio = float(step_ratio) / 1024
while True:
if next > max_size:
break
ratio = float(next) / float(curr)
if ratio < step_ratio:
next = next_prime(next)
continue
sys.stdout.write('.'); sys.stdout.flush()
sizes.append(next)
ratios.append(ratio)
curr = next
next = next_prime(int(next * step_ratio))
sys.stdout.write('\n'); sys.stdout.flush()
return sizes, ratios
def generate_corrections(sizes, step_ratio):
"Generate a set of correction from a ratio-based predictor."
# Generate a correction list for size list, assuming steps follow a certain
# ratio; this allows us to pack size list into one byte per size
res = []
res.append(sizes[0]) # first entry is first size
for i in xrange(1, len(sizes)):
prev = sizes[i - 1]
pred = int(prev * step_ratio) >> 10
diff = int(sizes[i] - pred)
res.append(diff)
if diff < 0 or diff > 127:
raise Exception('correction does not fit into 8 bits')
res.append(-1) # negative denotes last end of list
return res
def generate_probes(count, sizes):
res = []
# Generate probe values which are guaranteed to be relatively prime to
# all generated hash size primes. These don't have to be primes, but
# we currently use smallest non-conflicting primes here.
i = 2
while len(res) < count:
if is_prime(i) and (i not in sizes):
if i > 255:
raise Exception('probe step does not fit into 8 bits')
res.append(i)
i += 1
continue
i += 1
return res
# NB: these must match duk_hobject defines and code
step_ratio = 1177 # approximately (1.15 * (1 << 10))
min_size = 16
max_size = 2**32 - 1
sizes, ratios = generate_sizes(min_size, max_size, step_ratio)
corrections = generate_corrections(sizes, step_ratio)
probes = generate_probes(32, sizes)
print len(sizes)
print 'SIZES: ' + repr(sizes)
print 'RATIOS: ' + repr(ratios)
print 'CORRECTIONS: ' + repr(corrections)
print 'PROBES: ' + repr(probes)
# highest 32-bit prime
i = 2**32
while True:
i -= 1
if is_prime(i):
print 'highest 32-bit prime is: %d (0x%08x)' % (i, i)
break
| mit |
edx/ansible | v2/ansible/plugins/action/fail.py | 16 | 1140 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2012, Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Fail with custom message '''
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
msg = 'Failed as requested from task'
if self._task.args and 'msg' in self._task.args:
msg = self._task.args.get('msg')
return dict(failed=True, msg=msg)
| gpl-3.0 |
samuto/pysolar | pysolar/util.py | 3 | 23740 | # -*- coding: utf-8 -*-
# Copyright Brandon Stafford
#
# This file is part of Pysolar.
#
# Pysolar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Pysolar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with Pysolar. If not, see <http://www.gnu.org/licenses/>.
"""Additional support functions for solar geometry, astronomy, radiation correlation
:Original author: Simeon Nwaogaidu
:Contact: SimeonObinna.Nwaogaidu AT lahmeyer DOT de
:Additional author: Holger Zebner
:Contact: holger.zebner AT lahmeyer DOT de
:Additional author: Brandon Stafford
"""
from datetime import \
datetime, \
timedelta
import math
from . import solar, constants
# Some default constants
AM_default = 2.0 # Default air mass is 2.0
TL_default = 1.0 # Default Linke turbidity factor is 1.0
SC_default = 1367.0 # Solar constant in W/m^2 is 1367.0. Note that this value could vary by +/-4 W/m^2
TY_default = 365 # Total year number from 1 to 365 days
elevation_default = 0.0 # Default elevation is 0.0
# Useful equations for analysis
def get_sunrise_sunset(latitude_deg, longitude_deg, when):
"""This function calculates the astronomical sunrise and sunset times in local time.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
when : datetime.datetime
date and time in any valid timezone, answers will be for same day in same timezone.
Returns
-------
sunrise_time_dt : datetime.datetime
Sunrise time in local time.
sunset_time_dt : datetime.datetime
Sunset time in local time.
References
----------
.. [1] http://www.skypowerinternational.com/pdf/Radiation/7.1415.01.121_cm121_bed-anleitung_engl.pdf
.. [2] http://pysolar.org/
Examples
--------
>>> lat = 50.111512
>>> lon = 8.680506
>>> timezone_local = pytz.timezone('Europe/Berlin')
>>> now = datetime.now(timezone_local)
>>> sr, ss = sb.get_sunrise_sunset(lat, lon, now)
>>> print('sunrise: ', sr)
>>> print('sunset:', ss)
"""
utc_offset = when.utcoffset()
if utc_offset != None :
utc_offset = utc_offset.total_seconds()
else :
utc_offset = 0
#end if
day = when.utctimetuple().tm_yday # Day of the year
SHA = utc_offset / 3600 * 15.0 - longitude_deg # Solar hour angle
TT = math.radians(279.134 + 0.985647 * day) # Time adjustment angle
time_adst = \
(
(
5.0323
-
100.976 * math.sin(TT)
+
595.275 * math.sin(2 * TT)
+
3.6858 * math.sin(3 * TT)
-
12.47 * math.sin(4 * TT)
-
430.847 * math.cos(TT)
+
12.5024 * math.cos(2 * TT)
+
18.25 * math.cos(3 * TT)
)
/
3600
) # Time adjustment in hours
TON = 12 + SHA / 15.0 - time_adst # Time of noon
sunn = \
(
(
math.pi / 2
-
math.radians(constants.earth_axis_inclination)
*
math.tan(math.radians(latitude_deg))
*
math.cos(2 * math.pi * day / 365.25)
)
*
(12 / math.pi)
)
same_day = datetime(year = when.year, month = when.month, day = when.day, tzinfo = when.tzinfo)
sunrise_time = same_day + timedelta(hours = TON - sunn + time_adst)
sunset_time = same_day + timedelta(hours = TON + sunn - time_adst)
return sunrise_time, sunset_time
def get_sunrise_time(latitude_deg, longitude_deg, when):
"Wrapper for get_sunrise_sunset that returns just the sunrise time."
return \
get_sunrise_sunset(latitude_deg, longitude_deg, when)[0]
def get_sunset_time(latitude_deg, longitude_deg, when):
"Wrapper for get_sunrise_sunset that returns just the sunset time."
return \
get_sunrise_sunset(latitude_deg, longitude_deg, when)[1]
def mean_earth_sun_distance(when):
"""Mean Earth-Sun distance is the arithmetical mean of the maximum and minimum distances
between a planet (Earth) and the object about which it revolves (Sun). However,
the function is used to calculate the Mean earth sun distance.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
Returns
-------
KD : float
Mean earth sun distance
References
----------
.. [1] http://sunbird.jrc.it/pvgis/solres/solmod3.htm#clear-sky%20radiation
.. [2] R. aguiar and et al, "The ESRA user guidebook, vol. 2. database", models and exploitation software-Solar
radiation models, p.113
"""
return 1 - 0.0335 * math.sin(2 * math.pi * (when.utctimetuple().tm_yday - 94)) / 365
def extraterrestrial_irrad(when, latitude_deg, longitude_deg,SC=SC_default):
"""Equation calculates Extratrestrial radiation. Solar radiation incident outside the earth's
atmosphere is called extraterrestrial radiation. On average the extraterrestrial irradiance
is 1367 Watts/meter2 (W/m2). This value varies by + or - 3 percent as the earth orbits the sun.
The earth's closest approach to the sun occurs around January 4th and it is furthest
from the sun around July 5th.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location
of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
SC : float
The solar constant is the amount of incoming solar electromagnetic radiation per unit area, measured
on the outer surface of Earth's atmosphere in a plane perpendicular to the rays.It is measured by
satellite to be roughly 1366 watts per square meter (W/m^2)
Returns
-------
EXTR1 : float
Extraterrestrial irradiation
References
----------
.. [1] http://solardat.uoregon.edu/SolarRadiationBasics.html
.. [2] Dr. J. Schumacher and et al,"INSEL LE(Integrated Simulation Environment Language)Block reference",p.68
"""
day = when.utctimetuple().tm_yday
ab = math.cos(2 * math.pi * (day - 1.0)/(365.0))
bc = math.sin(2 * math.pi * (day - 1.0)/(365.0))
cd = math.cos(2 * (2 * math.pi * (day - 1.0)/(365.0)))
df = math.sin(2 * (2 * math.pi * (day - 1.0)/(365.0)))
decl = solar.get_declination(day)
ha = solar.get_hour_angle(when, longitude_deg)
ZA = math.sin(latitude_deg) * math.sin(decl) + math.cos(latitude_deg) * math.cos(decl) * math.cos(ha)
return SC * ZA * (1.00010 + 0.034221 * ab + 0.001280 * bc + 0.000719 * cd + 0.000077 * df)
def declination_degree(when, TY = TY_default ):
"""The declination of the sun is the angle between Earth's equatorial plane and a line
between the Earth and the sun. It varies between 23.45 degrees and -23.45 degrees,
hitting zero on the equinoxes and peaking on the solstices.
Parameters
----------
when : datetime.datetime
date/time for which to do the calculation
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
Returns
-------
DEC : float
The declination of the Sun
References
----------
.. [1] http://pysolar.org/
"""
return constants.earth_axis_inclination * math.sin((2 * math.pi / (TY)) * ((when.utctimetuple().tm_yday) - 81))
def solarelevation_function_clear(latitude_deg, longitude_deg, when,temperature = constants.standard_temperature,
pressure = constants.standard_pressure, elevation = elevation_default):
"""Equation calculates Solar elevation function for clear sky type.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting
the north/south angular location of a place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location
in an east-west direction,relative to the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
SOLALTC : float
Solar elevation function clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status
and proposed new approaches", energy 30 (2005), pp 1533 - 1549.
"""
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return (0.038175 + (1.5458 * (math.sin(altitude))) + ((-0.59980) * (0.5 * (1 - math.cos(2 * (altitude))))))
def solarelevation_function_overcast(latitude_deg, longitude_deg, when,
elevation = elevation_default, temperature = constants.standard_temperature,
pressure = constants.standard_pressure):
""" The function calculates solar elevation function for overcast sky type.
This associated hourly overcast radiation model is based on the estimation of the
overcast sky transmittance with the sun directly overhead combined with the application
of an over sky elavation function to estimate the overcast day global irradiation
value at any solar elevation.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
Returns
-------
SOLALTO : float
Solar elevation function overcast
References
----------
.. [1] Prof. Peter Tregenza,"Solar radiation and daylight models", p.89.
.. [2] Also accessible through Google Books: http://tinyurl.com/5kdbwu
Tariq Muneer, "Solar Radiation and Daylight Models, Second Edition: For the Energy Efficient
Design of Buildings"
"""
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return ((-0.0067133) + (0.78600 * (math.sin(altitude)))) + (0.22401 * (0.5 * (1 - math.cos(2 * altitude))))
def diffuse_transmittance(TL = TL_default):
"""Equation calculates the Diffuse_transmittance and the is the Theoretical Diffuse Irradiance on a horizontal
surface when the sun is at the zenith.
Parameters
----------
TL : float
Linke turbidity factor
Returns
-------
DT : float
diffuse_transmittance
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
return ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
def diffuse_underclear(latitude_deg, longitude_deg, when, elevation = elevation_default,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TL=TL_default):
"""Equation calculates diffuse radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on
a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TL : float
Linke turbidity factor
Returns
-------
DIFFC : float
Diffuse Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
altitude = solar.get_altitude(latitude_deg, longitude_deg,when, elevation, temperature,pressure)
return mean_earth_sun_distance(when) * DT * altitude
def diffuse_underovercast(latitude_deg, longitude_deg, when, elevation = elevation_default,
temperature = constants.standard_temperature, pressure = constants.standard_pressure,TL=TL_default):
"""Function calculates the diffuse radiation under overcast conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place on a
sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TL : float
Linke turbidity factor
Returns
-------
DIFOC : float
Diffuse Irradiation under overcast
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DT = ((-21.657) + (41.752 * (TL)) + (0.51905 * (TL) * (TL)))
DIFOC = ((mean_earth_sun_distance(when)
)*(DT)*(solar.get_altitude(latitude_deg,longitude_deg, when, elevation,
temperature, pressure)))
return DIFOC
def direct_underclear(latitude_deg, longitude_deg, when,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TY = TY_default,
AM = AM_default, TL = TL_default,elevation = elevation_default):
"""Equation calculates direct radiation under clear sky conditions.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in space,
where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m^2
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean
sea level.
Returns
-------
DIRC : float
Direct Irradiation under clear
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
KD = mean_earth_sun_distance(when)
DEC = declination_degree(when,TY)
DIRC = (1367 * KD * math.exp(-0.8662 * (AM) * (TL) * (DEC)
) * math.sin(solar.get_altitude(latitude_deg,longitude_deg,
when,elevation ,
temperature , pressure )))
return DIRC
def global_irradiance_clear(DIRC, DIFFC, latitude_deg, longitude_deg, when,
temperature = constants.standard_temperature, pressure = constants.standard_pressure, TY = TY_default,
AM = AM_default, TL = TL_default, elevation = elevation_default):
"""Equation calculates global irradiance under clear sky conditions.
Parameters
----------
DIRC : float
Direct Irradiation under clear
DIFFC : float
Diffuse Irradiation under clear sky
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to
the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
TY : float
Total number of days in a year. eg. 365 days per year,(no leap days)
AM : float
Air mass. An Air Mass is a measure of how far light travels through the Earth's atmosphere. One air mass,
or AM1, is the thickness of the Earth's atmosphere. Air mass zero (AM0) describes solar irradiance in
space, where it is unaffected by the atmosphere. The power density of AM1 light is about 1,000 W/m.
TL : float
Linke turbidity factor
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the mean sea
level.
Returns
-------
ghic : float
Global Irradiation under clear sky
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
DIRC = direct_underclear(latitude_deg, longitude_deg, when,
TY, AM, TL, elevation, temperature = constants.standard_temperature,
pressure = constants.standard_pressure)
DIFFC = diffuse_underclear(latitude_deg, longitude_deg, when,
elevation, temperature = constants.standard_temperature, pressure= constants.standard_pressure)
ghic = (DIRC + DIFFC)
return ghic
def global_irradiance_overcast(latitude_deg, longitude_deg, when,
elevation = elevation_default, temperature = constants.standard_temperature,
pressure = constants.standard_pressure):
"""Calculated Global is used to compare to the Diffuse under overcast conditions.
Under overcast skies, global and diffuse are expected to be equal due to the absence of the beam
component.
Parameters
----------
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a
place on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative
to the Greenwich meridian.
when : datetime.datetime
date/time for which to do the calculation
elevation : float
The elevation of a geographic location is its height above a fixed reference point, often the
mean sea level.
temperature : float
atmospheric temperature
pressure : float
pressure in pascals
Returns
-------
ghioc : float
Global Irradiation under overcast sky
References
----------
.. [1] S. Younes, R.Claywell and el al, "Quality
control of solar radiation data: present status
and proposed new approaches", energy 30
(2005), pp 1533 - 1549.
"""
ghioc = (572 * (solar.get_altitude(latitude_deg, longitude_deg, when,
elevation , temperature , pressure )))
return ghioc
def diffuse_ratio(DIFF_data,ghi_data):
"""Function calculates the Diffuse ratio.
Parameters
----------
DIFF_data : array_like
Diffuse horizontal irradiation data
ghi_data : array_like
global horizontal irradiation data array
Returns
-------
K : float
diffuse_ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
K = DIFF_data/ghi_data
return K
def clear_index(ghi_data, when, latitude_deg, longitude_deg):
"""This calculates the clear index ratio.
Parameters
----------
ghi_data : array_like
global horizontal irradiation data array
when : datetime.datetime
date/time for which to do the calculation
latitude_deg : float
latitude in decimal degree. A geographical term denoting the north/south angular location of a place
on a sphere.
longitude_deg : float
longitude in decimal degree. Longitude shows your location in an east-west direction,relative to the
Greenwich meridian.
Returns
-------
KT : float
Clear index ratio
References
----------
.. [1] S. Younes, R.Claywell and el al,"Quality control of solar radiation data: present status and proposed
new approaches", energy 30 (2005), pp 1533 - 1549.
"""
EXTR1 = extraterrestrial_irrad(when, latitude_deg, longitude_deg)
KT = (ghi_data/EXTR1)
return KT
| gpl-3.0 |
aimas/TuniErp-8.0 | openerp/addons/base/res/res_users.py | 1 | 45483 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
import logging
from functools import partial
from itertools import repeat
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID, models
from openerp import tools
import openerp.exceptions
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.http import request
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = ['password']
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class res_groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls', copy=True),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(res_groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(res_groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(res_groups, self).write(cr, uid, ids, vals, context=context)
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
self.pool['res.users'].has_group.clear_cache(self.pool['res.users'])
return res
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
_uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1, copy=False),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user', auto_join=True),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.html('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
}
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = openerp.fields.Char(related='partner_id.name', inherited=True)
email = openerp.fields.Char(related='partner_id.email', inherited=True)
def on_change_login(self, cr, uid, ids, login, context=None):
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
# Use read() to compute default company, and pass load=_classic_write to
# avoid useless name_get() calls. This will avoid prefetching fields
# while computing default values for new db columns, as the
# db backend may not be fully initialized yet.
user_data = self.pool['res.users'].read(cr, uid, uid2, ['company_id'],
context=context, load='_classic_write')
comp_id = user_data['company_id']
return comp_id or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
def _get_default_image(self, cr, uid, context=None):
return self.pool['res.partner']._get_default_image(cr, uid, False, colorize=True, context=context)
_defaults = {
'password': '',
'active': True,
'customer': False,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'image': _get_default_image,
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if ('id' not in o or o['id'] != uid):
for f in USER_PRIVATE_FIELDS:
if f in o:
o[f] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
user = self.browse(cr, SUPERUSER_ID, uid, context=context)
if not (values['company_id'] in user.company_ids.ids):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear default ir values when company changes
self.pool['ir.values'].get_defaults_dict.clear_cache(self.pool['ir.values'])
# clear caches linked to the users
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
clear = partial(self.pool['ir.rule'].clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
self._context_get.clear_cache(self)
self.has_group.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by TuniErp (updates, module installation, ...)'))
db = cr.dbname
if db in self._uid_cache:
for id in ids:
if id in self._uid_cache[db]:
del self._uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name and operator in ['=', 'ilike']:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
@tools.ormcache(skiparg=2)
def _context_get(self, cr, uid):
user = self.browse(cr, SUPERUSER_ID, uid)
result = {}
for k in self._fields:
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user, k) or False
if isinstance(res, models.BaseModel):
res = res.id
result[context_key] = res or False
return result
def context_get(self, cr, uid, context=None):
return self._context_get(cr, uid)
def action_get(self, cr, uid, context=None):
dataobj = self.pool['ir.model.data']
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
if passwd == tools.config['admin_passwd']:
return True
else:
raise openerp.exceptions.AccessDenied()
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def _login(self, db, login, password):
if not password:
return False
user_id = False
cr = self.pool.cursor()
try:
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
# check if user exists
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
# check credentials
self.check_credentials(cr, user_id, password)
# We effectively unconditionally write the res_users line.
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
# Failing to acquire the lock on the res_users row probably means
# another request is holding it. No big deal, we don't want to
# prevent/delay login in that case. It will also have been logged
# as a SQL error, if anyone cares.
try:
# NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299
update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE'
cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False)
cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,))
self.invalidate_cache(cr, user_id, ['login_date'], [user_id])
except Exception:
_logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
finally:
cr.close()
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self._login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = self.pool.cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool['ir.config_parameter']
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self._uid_cache.get(db, {}).get(uid) == passwd:
return
cr = self.pool.cursor()
try:
self.check_credentials(cr, uid, passwd)
if self._uid_cache.has_key(db):
self._uid_cache[db][uid] = passwd
else:
self._uid_cache[db] = {uid:passwd}
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@tools.ormcache(skiparg=2)
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
#----------------------------------------------------------
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied"
# or "inherited" groups. Once a user belongs to a group, it
# automatically belongs to the implied groups (transitively).
#----------------------------------------------------------
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
concat = itertools.chain.from_iterable
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids, context=context):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
self.pool['ir.ui.view'].clear_cache()
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
self.pool['ir.ui.view'].clear_cache()
return res
#----------------------------------------------------------
# Vitrual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#----------------------------------------------------------
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (1, 4):
ids.append(command[2])
elif command[0] == 5:
ids = []
elif command[0] == 6:
ids = list(command[2])
else:
ids.append(command)
return ids
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
# we have to try-catch this, because at first init the view does not exist
# but we are already creating some basic groups
if not context or context.get('install_mode'):
# use installation/admin language for translatable names in the view
context = dict(context or {})
context.update(self.pool['res.users'].context_get(cr, uid))
view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=context)
if view and view.exists() and view._name == 'ir.ui.view':
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name, **attrs))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.iteritems():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = zip(repeat(3), rem) + zip(repeat(4), add)
return values1
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._add_reified_groups(group_fields, values)
# add "default_groups_ref" inside the context to set default value for group_id with xml values
if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list):
groups = []
ir_model_data = self.pool.get('ir.model.data')
for group_xml_id in context["default_groups_ref"]:
group_split = group_xml_id.split('.')
if len(group_split) != 2:
raise osv.except_osv(_('Invalid context value'), _('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id)
try:
temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1])
except ValueError:
group_id = False
groups += [group_id]
values['groups_id'] = groups
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or self.fields_get(cr, uid, context=context).keys()
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(users_view, self).read(cr, uid, ids, other_fields, context=context, load=load)
# post-process result to add reified group fields
if group_fields:
for values in (res if isinstance(res, list) else [res]):
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access, attributes)
# add reified groups fields
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
return res
for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class change_password_wizard(osv.TransientModel):
"""
A wizard to manage the change of users' passwords
"""
_name = "change.password.wizard"
_description = "Change Password Wizard"
_columns = {
'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'),
}
def _default_user_ids(self, cr, uid, context=None):
if context is None:
context = {}
user_model = self.pool['res.users']
user_ids = context.get('active_model') == 'res.users' and context.get('active_ids') or []
return [
(0, 0, {'user_id': user.id, 'user_login': user.login})
for user in user_model.browse(cr, uid, user_ids, context=context)
]
_defaults = {
'user_ids': _default_user_ids,
}
def change_password_button(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids, context=context)[0]
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""
A model to configure users in the change password wizard
"""
_name = 'change.password.user'
_description = 'Change Password Wizard User'
_columns = {
'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True),
'user_id': fields.many2one('res.users', string='User', required=True),
'user_login': fields.char('User Login', readonly=True),
'new_passwd': fields.char('New Password'),
}
_defaults = {
'new_passwd': '',
}
def change_password_button(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write(cr, uid, ids, {'new_passwd': False}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.