repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
stucox/djangae | djangae/contrib/gauth/common/backends.py | 1 | 3166 | import logging
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import BaseUserManager
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.backends import ModelBackend
from django.utils import timezone
# DJANGAE
from djangae.contrib.gauth.common.models import GaeAbstractBaseUser
# This is here so that we only log once on import, not on each authentication
if hasattr(settings, "ALLOW_USER_PRE_CREATION"):
logging.warning(
"settings.ALLOW_USER_PRE_CREATION is deprecated, "
"please use DJANGAE_ALLOW_USER_PRECREATION instead"
)
class BaseAppEngineUserAPIBackend(ModelBackend):
"""
A custom Django authentication backend, which lets us authenticate against the Google
users API
"""
supports_anonymous_user = True
def authenticate(self, **credentials):
"""
Handles authentication of a user from the given credentials.
Credentials must be a combination of 'request' and 'google_user'.
If any other combination of credentials are given then we raise a TypeError, see
authenticate() in django.contrib.auth.__init__.py.
"""
User = get_user_model()
if not issubclass(User, GaeAbstractBaseUser):
raise ImproperlyConfigured(
"djangae.contrib.auth.backends.AppEngineUserAPI requires AUTH_USER_MODEL to be a "
" subclass of djangae.contrib.auth.base.GaeAbstractBaseUser."
)
if len(credentials) != 1:
# Django expects a TypeError if this backend cannot handle the given credentials
raise TypeError()
google_user = credentials.get('google_user', None)
if google_user:
user_id = google_user.user_id()
email = google_user.email().lower()
try:
user = User.objects.get(username=user_id)
except User.DoesNotExist:
if (
getattr(settings, 'DJANGAE_ALLOW_USER_PRE_CREATION', False) or
# Backwards compatibility, remove before 1.0
getattr(settings, 'ALLOW_USER_PRE_CREATION', False)
):
# Check to see if a User object for this email address has been pre-created.
try:
# Convert the pre-created User object so that the user can now login via
# Google Accounts, and ONLY via Google Accounts.
user = User.objects.get(email=BaseUserManager.normalize_email(email), username=None)
user.username = user_id
user.last_login = timezone.now()
user.save()
return user
except User.DoesNotExist:
pass
user = User.objects.create_user(user_id, email)
return user
else:
raise TypeError() # Django expects to be able to pass in whatever credentials it has, and for you to raise a TypeError if they mean nothing to you
| bsd-3-clause |
renxinhe/TestFlaskWebsite | lib/werkzeug/exceptions.py | 80 | 18609 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extent this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and there '
'is no forwarding address. If you followed a link from a foreign '
'page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
class GatewayTimeout(HTTPException):
"""*504* `Gateway Timeout`
Status code you should return if a connection to an upstream server
times out.
"""
code = 504
description = (
'The connection to an upstream server timed out.'
)
class HTTPVersionNotSupported(HTTPException):
"""*505* `HTTP Version Not Supported`
The server does not support the HTTP protocol version used in the request.
"""
code = 505
description = (
'The server does not support the HTTP protocol version used in the '
'request.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
is_http_exception = issubclass(obj, HTTPException)
except TypeError:
is_http_exception = False
if not is_http_exception or obj.code is None:
continue
__all__.append(obj.__name__)
old_obj = default_exceptions.get(obj.code, None)
if old_obj is not None and issubclass(obj, old_obj):
continue
default_exceptions[obj.code] = obj
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| apache-2.0 |
beni55/django | django/contrib/gis/measure.py | 118 | 12286 | # Copyright (c) 2007, Robert Coup <robert.coup@onetrackmind.co.nz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (http://exogen.case.edu/projects/geopy/)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
__all__ = ['A', 'Area', 'D', 'Distance']
from decimal import Decimal
from django.utils import six
from django.utils.functional import total_ordering
NUMERIC_TYPES = six.integer_types + (float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase(object):
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)
if default_unit and isinstance(default_unit, six.string_types):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit,
getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)})
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)})
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __idiv__(self, other): # Python 2 compatibility
return type(self).__itruediv__(self, other)
def __bool__(self):
return bool(self.standard)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in six.iteritems(kwargs):
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)})
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
# Shortcuts
D = Distance
A = Area
| bsd-3-clause |
dexterx17/nodoSocket | clients/Python-2.7.6/Lib/plat-atheos/TYPES.py | 74 | 2682 | # Generated by h2py from /include/sys/types.h
_SYS_TYPES_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC9X_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506L
_XOPEN_SOURCE = 500
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC9X = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506L
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 1
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __P(args): return ()
def __PMT(args): return ()
def __STRING(x): return #x
def __STRING(x): return "x"
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/types.h
_BITS_TYPES_H = 1
__FD_SETSIZE = 1024
def __FDELT(d): return ((d) / __NFDBITS)
# Included from bits/pthreadtypes.h
# Included from time.h
_TIME_H = 1
# Included from bits/time.h
# Included from posix/time.h
# Included from posix/types.h
MAXHOSTNAMELEN = 64
FD_SETSIZE = 1024
CLOCKS_PER_SEC = 1000000
_BITS_TIME_H = 1
CLOCKS_PER_SEC = 1000000
CLK_TCK = 100
_STRUCT_TIMEVAL = 1
CLK_TCK = CLOCKS_PER_SEC
__clock_t_defined = 1
__time_t_defined = 1
__timespec_defined = 1
def __isleap(year): return \
__BIT_TYPES_DEFINED__ = 1
# Included from endian.h
_ENDIAN_H = 1
__LITTLE_ENDIAN = 1234
__BIG_ENDIAN = 4321
__PDP_ENDIAN = 3412
# Included from bits/endian.h
__BYTE_ORDER = __LITTLE_ENDIAN
__FLOAT_WORD_ORDER = __BYTE_ORDER
LITTLE_ENDIAN = __LITTLE_ENDIAN
BIG_ENDIAN = __BIG_ENDIAN
PDP_ENDIAN = __PDP_ENDIAN
BYTE_ORDER = __BYTE_ORDER
# Included from sys/select.h
_SYS_SELECT_H = 1
# Included from bits/select.h
def __FD_ZERO(fdsp): return \
def __FD_ZERO(set): return \
# Included from bits/sigset.h
_SIGSET_H_types = 1
_SIGSET_H_fns = 1
def __sigmask(sig): return \
def __sigemptyset(set): return \
def __sigfillset(set): return \
def __sigisemptyset(set): return \
FD_SETSIZE = __FD_SETSIZE
def FD_ZERO(fdsetp): return __FD_ZERO (fdsetp)
# Included from sys/sysmacros.h
_SYS_SYSMACROS_H = 1
def major(dev): return ( (( (dev) >> 8) & 0xff))
def minor(dev): return ( ((dev) & 0xff))
| mit |
fortunado/zerorpc-python | zerorpc/events.py | 24 | 8839 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.coros
import gevent_zmq as zmq
from .context import Context
class Sender(object):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Queue(maxsize=0)
self._send_task = gevent.spawn(self._sender)
def __del__(self):
self.close()
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
running = True
for parts in self._send_queue:
for i in xrange(len(parts) - 1):
try:
self._socket.send(parts[i], flags=zmq.SNDMORE)
except gevent.GreenletExit:
if i == 0:
return
running = False
self._socket.send(parts[i], flags=zmq.SNDMORE)
self._socket.send(parts[-1])
if not running:
return
def __call__(self, parts):
self._send_queue.put(parts)
class Receiver(object):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Queue(maxsize=0)
self._recv_task = gevent.spawn(self._recver)
def __del__(self):
self.close()
def close(self):
if self._recv_task:
self._recv_task.kill()
def _recver(self):
running = True
while True:
parts = []
while True:
try:
part = self._socket.recv()
except gevent.GreenletExit:
running = False
if len(parts) == 0:
return
part = self._socket.recv()
parts.append(part)
if not self._socket.getsockopt(zmq.RCVMORE):
break
if not running:
break
self._recv_queue.put(parts)
def __call__(self):
return self._recv_queue.get()
class Event(object):
__slots__ = [ '_name', '_args', '_header' ]
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
context = context
self._header = {
'message_id': context.new_msgid(),
'v': 3
}
else:
self._header = header
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
def pack(self):
return msgpack.Packer().pack((self._header, self._name, self._args))
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker()
unpacker.feed(blob)
(header, name, args) = unpacker.unpack()
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = '[...]'
else:
args = self._args
try:
args = '<<{0}>>'.format(str(self.unpack(self._args)))
except:
pass
return '{0} {1} {2}'.format(self._name, self._header,
args)
class Events(object):
def __init__(self, zmq_socket_type, context=None):
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = zmq.Socket(self._context, zmq_socket_type)
self._send = self._socket.send_multipart
self._recv = self._socket.recv_multipart
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.XREQ, zmq.XREP):
self._send = Sender(self._socket)
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.XREQ, zmq.XREP):
self._recv = Receiver(self._socket)
@property
def recv_is_available(self):
return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.XREQ, zmq.XREP)
def __del__(self):
if not self._socket.closed:
self.close()
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.middleware_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
return r
def create_event(self, name, args, xheader={}):
event = Event(name, args, context=self._context)
for k, v in xheader.items():
if k == 'zmqid':
continue
event.header[k] = v
return event
def emit_event(self, event, identity=None):
if identity is not None:
parts = list(identity)
parts.extend(['', event.pack()])
elif self._zmq_socket_type in (zmq.XREQ, zmq.XREP):
parts = ('', event.pack())
else:
parts = (event.pack(),)
self._send(parts)
def emit(self, name, args, xheader={}):
event = self.create_event(name, args, xheader)
identity = xheader.get('zmqid', None)
return self.emit_event(event, identity)
def recv(self):
parts = self._recv()
if len(parts) == 1:
identity = None
blob = parts[0]
else:
identity = parts[0:-2]
blob = parts[-1]
event = Event.unpack(blob)
if identity is not None:
event.header['zmqid'] = identity
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
class WrappedEvents(object):
def __init__(self, channel):
self._channel = channel
def close(self):
pass
@property
def recv_is_available(self):
return self._channel.recv_is_available
def create_event(self, name, args, xheader={}):
event = Event(name, args, self._channel.context)
event.header.update(xheader)
return event
def emit_event(self, event, identity=None):
event_payload = (event.header, event.name, event.args)
wrapper_event = self._channel.create_event('w', event_payload)
self._channel.emit_event(wrapper_event)
def emit(self, name, args, xheader={}):
wrapper_event = self.create_event(name, args, xheader)
self.emit_event(wrapper_event)
def recv(self, timeout=None):
wrapper_event = self._channel.recv()
(header, name, args) = wrapper_event.args
return Event(name, args, None, header)
@property
def context(self):
return self._channel.context
| mit |
klahnakoski/TestLog-ETL | vendor/jx_base/expressions/false_op.py | 4 | 1756 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
"""
# NOTE:
THE self.lang[operator] PATTERN IS CASTING NEW OPERATORS TO OWN LANGUAGE;
KEEPING Python AS# Python, ES FILTERS AS ES FILTERS, AND Painless AS
Painless. WE COULD COPY partial_eval(), AND OTHERS, TO THIER RESPECTIVE
LANGUAGE, BUT WE KEEP CODE HERE SO THERE IS LESS OF IT
"""
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import literal, expression
from jx_base.expressions.literal import Literal
from mo_json import BOOLEAN
TRUE = None
class FalseOp(Literal):
data_type = BOOLEAN
def __new__(cls, *args, **kwargs):
return object.__new__(cls, *args, **kwargs)
def __init__(self, op=None, term=None):
Literal.__init__(self, False)
@classmethod
def define(cls, expr):
return FALSE
def __nonzero__(self):
return False
def __eq__(self, other):
return (other is FALSE) or (other is False)
def __data__(self):
return False
def vars(self):
return set()
def map(self, map_):
return self
def missing(self):
return FALSE
def is_true(self):
return FALSE
def is_false(self):
return TRUE
def __call__(self, row=None, rownum=None, rows=None):
return False
def __unicode__(self):
return "false"
def __str__(self):
return b"false"
def __bool__(self):
return False
FALSE = FalseOp()
expression.FALSE = FALSE
literal.FALSE = FALSE
| mpl-2.0 |
reider-roque/crypto-challenges | cryptopals/set-1-basics/chal-4/single_byte_xor_cipher.py | 2 | 5192 | from binascii import unhexlify
from pprint import pprint
from xor import xor
def get_possible_keys(ciphertext):
key_len = len(ciphertext) // 2
# Generate list of potential keys
possible_keys = []
for i in range(256):
hex_num = hex(i).split('x')[1].zfill(2)
possible_keys.append(hex_num * key_len)
return possible_keys
def get_possible_plaintexts(ciphertext, keys):
plaintexts = []
for key in keys:
plaintext = unhexlify(xor(ciphertext, key))
plaintexts.append(plaintext)
return plaintexts
def printable_characters_rank(plaintexts):
"""Return a list of (plaintext, rank) tuples where the higher the rank,
the less non-printable characters are in the plaintext
"""
ranked_plaintexts = []
for plaintext in plaintexts:
printable_chars_count = 0
for byte in plaintext:
# Chars between 0x20 and 0x7E are printable ASCII chars
if byte >= 32 and byte <= 126:
printable_chars_count +=1
rank = printable_chars_count / len(plaintext)
ranked_plaintexts.append((plaintext, rank))
ranked_plaintexts.sort(key=lambda tup: tup[1])
return ranked_plaintexts
def is_letter(char_code):
"""Return True if char_code is a letter character code from the ASCII
table. Otherwise return False.
"""
if isinstance(char_code, str) or isinstance(char_code, bytes):
char_code = ord(char_code)
if char_code >= 65 and char_code <= 90: # uppercase letters
return True
if char_code >= 97 and char_code <= 122: # lowercase letters
return True
return False
def mostly_letters_rank(plaintexts, additional_allowed_chars=[" "]):
"""Return a list of (plaintext, rank) tuples where the higher the rank,
the less non-letter characters are in the plaintext
"""
ranked_plaintexts = []
for plaintext in plaintexts:
letter_count = 0
for byte in plaintext:
if is_letter(byte):
letter_count +=1
elif chr(byte) in additional_allowed_chars:
letter_count += 1
rank = letter_count / len(plaintext)
ranked_plaintexts.append((plaintext, rank))
ranked_plaintexts.sort(key=lambda tup: tup[1])
return ranked_plaintexts
def letter_frequency_rank(plaintexts, no_letters_rank=1000000):
# English letter frequency table comes from here - https://www.math.cornell.edu/~mec/2003-2004/cryptography/subs/frequencies.html
default_letter_frequencies = {
'e': 0.1202, 't': 0.0910, 'a': 0.0812, 'o': 0.0768, 'i': 0.0731,
'n': 0.0695, 's': 0.0628, 'r': 0.0602, 'h': 0.0592, 'd': 0.0432,
'l': 0.0398, 'u': 0.0288, 'c': 0.0271, 'm': 0.0261, 'f': 0.0230,
'y': 0.0211, 'w': 0.0209, 'g': 0.0203, 'p': 0.0182, 'b': 0.0149,
'v': 0.0111, 'k': 0.0069, 'x': 0.0017, 'q': 0.0011, 'j': 0.0010,
'z': 0.0007
}
ranked_plaintexts = []
for plaintext in plaintexts:
plaintext_letter_frequencies = {}
for byte in plaintext:
if not is_letter(byte):
continue # skip further processing for this byte
char = chr(byte).lower()
if char in plaintext_letter_frequencies:
plaintext_letter_frequencies[char] += 1
else:
plaintext_letter_frequencies[char] = 1
# If there were no letters in the plaintext, then skip further processing
if not plaintext_letter_frequencies:
continue
# Replace absolute occurence with relative distribution
for key in plaintext_letter_frequencies:
plaintext_letter_frequencies[key] = plaintext_letter_frequencies[key] / len(plaintext_letter_frequencies)
rank = 0
for letter in default_letter_frequencies:
default_letter_frequency = default_letter_frequencies[letter]
if letter in plaintext_letter_frequencies:
plaintext_letter_frequency = plaintext_letter_frequencies[letter]
else:
plaintext_letter_frequency = 0
rank += abs(default_letter_frequency - plaintext_letter_frequency)
# print("DEBUG: letter = {}".format(letter))
# print("DEBUG: default_letter_frequency = {}".format(default_letter_frequency))
# print("DEBUG: plaintext_letter_frequency = {}".format(plaintext_letter_frequency))
# print("DEBUG: rank = {}".format(rank))
ranked_plaintexts.append((plaintext, rank))
# The closer the rank to number 1, the better
ranked_plaintexts.sort(key=lambda tup: abs(1 - tup[1]), reverse=True)
return ranked_plaintexts
if __name__ == "__main__":
ciphertext = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736"
keys = get_possible_keys(ciphertext)
plaintexts = get_possible_plaintexts(ciphertext, keys)
# printable_characters_ranked_plaintexts = mostly_letters_rank(plaintexts)
# pprint(printable_characters_ranked_plaintexts)
letter_frequency_ranked_plaintexts = letter_frequency_rank(plaintexts)
pprint(letter_frequency_ranked_plaintexts) | mit |
evernym/plenum | plenum/test/node_catchup/test_remove_request_keys_post_catchup.py | 2 | 2743 | import pytest
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.messages.node_messages import CatchupRep
from plenum.test.delayers import delay_3pc_messages, pDelay, cDelay, ppDelay, \
cr_delay
from plenum.test.helper import send_reqs_batches_and_get_suff_replies, \
check_last_ordered_3pc, sdk_json_couples_to_request_list, assertExp
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.test_node import getNonPrimaryReplicas, ensureElectionsDone
from plenum.test.view_change.helper import ensure_view_change
from stp_core.loop.eventually import eventually
@pytest.fixture(scope='module', params=['some', 'all'])
def setup(request, looper, txnPoolNodeSet):
slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node
fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
# Delay catchup reply so that the test gets time to make the check,
# this delay is reset after the check
slow_node.nodeIbStasher.delay(cr_delay(100))
slow_node.nodeIbStasher.delay(pDelay(100, 0))
slow_node.nodeIbStasher.delay(cDelay(100, 0))
if request.param == 'all':
slow_node.nodeIbStasher.delay(ppDelay(100, 0))
return slow_node, fast_nodes
def test_nodes_removes_request_keys_for_ordered(setup, looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
"""
A node does not order requests since it is missing some 3PC messages,
gets them from catchup. It then clears them from its request queues
"""
slow_node, fast_nodes = setup
reqs = sdk_json_couples_to_request_list(
send_reqs_batches_and_get_suff_replies(
looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
10,
5))
ensure_all_nodes_have_same_data(looper, fast_nodes)
assert slow_node.master_replica.last_ordered_3pc != \
fast_nodes[0].master_replica.last_ordered_3pc
def chk(key, nodes, present):
for node in nodes:
assert (key in node.master_replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID]) == present
for req in reqs:
chk(req.digest, fast_nodes, False)
chk(req.digest, [slow_node], True)
# Reset catchup reply delay so that catchup can complete
slow_node.nodeIbStasher.reset_delays_and_process_delayeds(CatchupRep.typename)
slow_node.start_catchup()
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
for req in reqs:
chk(req.digest, txnPoolNodeSet, False)
# Needed for the next run due to the parametrised fixture
slow_node.reset_delays_and_process_delayeds()
| apache-2.0 |
Peddle/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/authn.py | 31 | 8142 | import logging
from urllib import urlencode
from urlparse import parse_qs
from urlparse import urlsplit
import time
import ldap
from saml2 import SAMLError
from saml2.aes import AESCipher
from saml2.httputil import Response
from saml2.httputil import make_cookie
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import parse_cookie
__author__ = 'rolandh'
logger = logging.getLogger(__name__)
class AuthnFailure(SAMLError):
pass
class EncodeError(SAMLError):
pass
class UserAuthnMethod(object):
def __init__(self, srv):
self.srv = srv
def __call__(self, *args, **kwargs):
raise NotImplemented
def authenticated_as(self, **kwargs):
raise NotImplemented
def verify(self, **kwargs):
raise NotImplemented
def is_equal(a, b):
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= x ^ y
return result == 0
def url_encode_params(params=None):
if not isinstance(params, dict):
raise EncodeError("You must pass in a dictionary!")
params_list = []
for k, v in params.items():
if isinstance(v, list):
params_list.extend([(k, x) for x in v])
else:
params_list.append((k, v))
return urlencode(params_list)
def create_return_url(base, query, **kwargs):
"""
Add a query string plus extra parameters to a base URL which may contain
a query part already.
:param base: redirect_uri may contain a query part, no fragment allowed.
:param query: Old query part as a string
:param kwargs: extra query parameters
:return:
"""
part = urlsplit(base)
if part.fragment:
raise ValueError("Base URL contained parts it shouldn't")
for key, values in parse_qs(query).items():
if key in kwargs:
if isinstance(kwargs[key], basestring):
kwargs[key] = [kwargs[key]]
kwargs[key].extend(values)
else:
kwargs[key] = values
if part.query:
for key, values in parse_qs(part.query).items():
if key in kwargs:
if isinstance(kwargs[key], basestring):
kwargs[key] = [kwargs[key]]
kwargs[key].extend(values)
else:
kwargs[key] = values
_pre = base.split("?")[0]
else:
_pre = base
logger.debug("kwargs: %s" % kwargs)
return "%s?%s" % (_pre, url_encode_params(kwargs))
class UsernamePasswordMako(UserAuthnMethod):
"""Do user authentication using the normal username password form
using Mako as template system"""
cookie_name = "userpassmako"
def __init__(self, srv, mako_template, template_lookup, pwd, return_to):
"""
:param srv: The server instance
:param mako_template: Which Mako template to use
:param pwd: Username/password dictionary like database
:param return_to: Where to send the user after authentication
:return:
"""
UserAuthnMethod.__init__(self, srv)
self.mako_template = mako_template
self.template_lookup = template_lookup
self.passwd = pwd
self.return_to = return_to
self.active = {}
self.query_param = "upm_answer"
self.aes = AESCipher(self.srv.symkey, srv.iv)
def __call__(self, cookie=None, policy_url=None, logo_url=None,
query="", **kwargs):
"""
Put up the login form
"""
if cookie:
headers = [cookie]
else:
headers = []
resp = Response(headers=headers)
argv = {"login": "",
"password": "",
"action": "verify",
"policy_url": policy_url,
"logo_url": logo_url,
"query": query}
logger.info("do_authentication argv: %s" % argv)
mte = self.template_lookup.get_template(self.mako_template)
resp.message = mte.render(**argv)
return resp
def _verify(self, pwd, user):
assert is_equal(pwd, self.passwd[user])
def verify(self, request, **kwargs):
"""
Verifies that the given username and password was correct
:param request: Either the query part of a URL a urlencoded
body of a HTTP message or a parse such.
:param kwargs: Catch whatever else is sent.
:return: redirect back to where ever the base applications
wants the user after authentication.
"""
#logger.debug("verify(%s)" % request)
if isinstance(request, basestring):
_dict = parse_qs(request)
elif isinstance(request, dict):
_dict = request
else:
raise ValueError("Wrong type of input")
# verify username and password
try:
self._verify(_dict["password"][0], _dict["login"][0])
timestamp = str(int(time.mktime(time.gmtime())))
info = self.aes.encrypt("::".join([_dict["login"][0], timestamp]))
self.active[info] = timestamp
cookie = make_cookie(self.cookie_name, info, self.srv.seed)
return_to = create_return_url(self.return_to, _dict["query"][0],
**{self.query_param: "true"})
resp = Redirect(return_to, headers=[cookie])
except (AssertionError, KeyError):
resp = Unauthorized("Unknown user or wrong password")
return resp
def authenticated_as(self, cookie=None, **kwargs):
if cookie is None:
return None
else:
logger.debug("kwargs: %s" % kwargs)
try:
info, timestamp = parse_cookie(self.cookie_name,
self.srv.seed, cookie)
if self.active[info] == timestamp:
uid, _ts = self.aes.decrypt(info).split("::")
if timestamp == _ts:
return {"uid": uid}
except Exception:
pass
return None
def done(self, areq):
try:
_ = areq[self.query_param]
return False
except KeyError:
return True
class SocialService(UserAuthnMethod):
def __init__(self, social):
UserAuthnMethod.__init__(self, None)
self.social = social
def __call__(self, server_env, cookie=None, sid="", query="", **kwargs):
return self.social.begin(server_env, cookie, sid, query)
def callback(self, server_env, cookie=None, sid="", query="", **kwargs):
return self.social.callback(server_env, cookie, sid, query, **kwargs)
class AuthnMethodChooser(object):
def __init__(self, methods=None):
self.methods = methods
def __call__(self, **kwargs):
if not self.methods:
raise SAMLError("No authentication methods defined")
elif len(self.methods) == 1:
return self.methods[0]
else:
pass # TODO
class LDAPAuthn(UsernamePasswordMako):
def __init__(self, srv, ldapsrv, return_to,
dn_pattern, mako_template, template_lookup):
"""
:param srv: The server instance
:param ldapsrv: Which LDAP server to us
:param return_to: Where to send the user after authentication
:return:
"""
UsernamePasswordMako.__init__(self, srv, mako_template, template_lookup,
None, return_to)
self.ldap = ldap.initialize(ldapsrv)
self.ldap.protocol_version = 3
self.ldap.set_option(ldap.OPT_REFERRALS, 0)
self.dn_pattern = dn_pattern
def _verify(self, pwd, user):
"""
Verifies the username and password agains a LDAP server
:param pwd: The password
:param user: The username
:return: AssertionError if the LDAP verification failed.
"""
_dn = self.dn_pattern % user
try:
self.ldap.simple_bind_s(_dn, pwd)
except Exception:
raise AssertionError()
| apache-2.0 |
johnruddell/Simple-Survey | node_modules/duo-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
reingart/vb2py | vb2py/test/testimports.py | 3 | 1038 | import vb2py.vbparser
import unittest
class TestImports(unittest.TestCase):
# << Imports tests >>
def testImportClassToModule(self):
"""Import from class to module"""
self.proj = vb2py.vbparser.VBProject()
self.utils = vb2py.vbparser.VBCodeModule(modulename="utils")
self.cls = vb2py.vbparser.VBClassModule(modulename="Cls", classname="Cls")
#
self.utils.assignParent(self.proj)
self.cls.assignParent(self.proj)
#
utils = vb2py.vbparser.parseVB("""
Public Function Fact(x)
Dim c As New Cls
End Function
""", container=self.utils)
#
cls = vb2py.vbparser.parseVB("""
Public A
""", container=self.cls)
#
utils_code = self.utils.renderAsCode()
self.assertNotEqual(utils_code.find("import Cls"), -1)
# -- end -- << Imports tests >>
import vb2py.vbparser
vb2py.vbparser.log.setLevel(0) # Don't print all logging stuff
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
shakamunyi/tensorflow | tensorflow/contrib/ffmpeg/ffmpeg_ops.py | 22 | 3729 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Encoding and decoding audio using FFmpeg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.ffmpeg.ops import gen_decode_audio_op_py
from tensorflow.contrib.ffmpeg.ops import gen_encode_audio_op_py
from tensorflow.contrib.util import loader
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
_ffmpeg_so = loader.load_op_library(
resource_loader.get_path_to_datafile('ffmpeg.so'))
def decode_audio(contents, file_format=None, samples_per_second=None,
channel_count=None):
"""Create an op that decodes the contents of an audio file.
Note that ffmpeg is free to select the "best" audio track from an mp4.
https://trac.ffmpeg.org/wiki/Map
Args:
contents: The binary contents of the audio file to decode. This is a
scalar.
file_format: A string or scalar string tensor specifying which
format the contents will conform to. This can be mp3, mp4, ogg,
or wav.
samples_per_second: The number of samples per second that is
assumed, as an `int` or scalar `int32` tensor. In some cases,
resampling will occur to generate the correct sample rate.
channel_count: The number of channels that should be created from the
audio contents, as an `int` or scalar `int32` tensor. If the
`contents` have more than this number, then some channels will
be merged or dropped. If `contents` has fewer than this, then
additional channels will be created from the existing ones.
Returns:
A rank-2 tensor that has time along dimension 0 and channels along
dimension 1. Dimension 0 will be `samples_per_second *
length_in_seconds` wide, and dimension 1 will be `channel_count`
wide. If ffmpeg fails to decode the audio then an empty tensor will
be returned.
"""
return gen_decode_audio_op_py.decode_audio_v2(
contents, file_format=file_format, samples_per_second=samples_per_second,
channel_count=channel_count)
ops.NotDifferentiable('DecodeAudio')
def encode_audio(audio, file_format=None, samples_per_second=None):
"""Creates an op that encodes an audio file using sampled audio from a tensor.
Args:
audio: A rank-2 `Tensor` that has time along dimension 0 and
channels along dimension 1. Dimension 0 is `samples_per_second *
length_in_seconds` long.
file_format: The type of file to encode, as a string or rank-0
string tensor. "wav" is the only supported format.
samples_per_second: The number of samples in the audio tensor per
second of audio, as an `int` or rank-0 `int32` tensor.
Returns:
A scalar tensor that contains the encoded audio in the specified file
format.
"""
return gen_encode_audio_op_py.encode_audio_v2(
audio,
file_format=file_format,
samples_per_second=samples_per_second,
bits_per_second=192000) # not used by WAV
ops.NotDifferentiable('EncodeAudio')
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/newrelic-2.46.0.37/newrelic/hooks/framework_twisted.py | 4 | 20180 | import logging
import sys
import weakref
import UserList
import newrelic.api.application
import newrelic.api.object_wrapper
import newrelic.api.transaction
import newrelic.api.web_transaction
import newrelic.api.function_trace
import newrelic.api.error_trace
_logger = logging.getLogger(__name__)
class RequestProcessWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
transaction = newrelic.api.transaction.current_transaction()
# Check to see if we are being called within the context of any
# sort of transaction. If we are, then we don't bother doing
# anything and just call the wrapped function. This should not
# really ever occur with Twisted.Web wrapper but check anyway.
if transaction:
return self._nr_next_object()
# Always use the default application specified in the agent
# configuration.
application = newrelic.api.application.application_instance()
# We need to fake up a WSGI like environ dictionary with the key
# bits of information we need.
environ = {}
environ['REQUEST_URI'] = self._nr_instance.path
# Now start recording the actual web transaction.
transaction = newrelic.api.web_transaction.WebTransaction(
application, environ)
if not transaction.enabled:
return self._nr_next_object()
transaction.__enter__()
self._nr_instance._nr_transaction = transaction
self._nr_instance._nr_is_deferred_callback = False
self._nr_instance._nr_is_request_finished = False
self._nr_instance._nr_wait_function_trace = None
# We need to add a reference to the Twisted.Web request object
# in the transaction as only able to stash the transaction in a
# deferred. Need to use a weakref to avoid an object cycle which
# may prevent cleanup of transaction.
transaction._nr_current_request = weakref.ref(self._nr_instance)
try:
# Call the original method in a trace object to give better
# context in transaction traces. Three things can happen
# within this call. The render() function which is in turn
# called can return a result immediately which means user
# code should have called finish() on the request, it can
# raise an exception which is caught in process() function
# where error handling calls finish(), or it can return that
# it is not done yet and register deferred callbacks to
# complete the request.
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Process', group='Python/Twisted'):
result = self._nr_next_object()
# In the case of a result having being returned or an
# exception occuring, then finish() will have been called.
# We can't just exit the transaction in the finish call
# however as need to still pop back up through the above
# function trace. So if flagged that have finished, then we
# exit the transaction here. Otherwise we setup a function
# trace to track wait time for deferred and manually pop the
# transaction as being the current one for this thread.
if self._nr_instance._nr_is_request_finished:
transaction.__exit__(None, None, None)
self._nr_instance._nr_transaction = None
self._nr_instance = None
else:
self._nr_instance._nr_wait_function_trace = \
newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Wait',
group='Python/Twisted')
self._nr_instance._nr_wait_function_trace.__enter__()
transaction.drop_transaction()
except: # Catch all
# If an error occurs assume that transaction should be
# exited. Technically don't believe this should ever occur
# unless our code here has an error or Twisted.Web is
# broken.
_logger.exception('Unexpected exception raised by Twisted.Web '
'Request.process() exception.')
transaction.__exit__(*sys.exc_info())
self._nr_instance._nr_transaction = None
self._nr_instance = None
raise
return result
class RequestFinishWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
# Call finish() method straight away if request is not even
# associated with a transaction.
if not hasattr(self._nr_instance, '_nr_transaction'):
return self._nr_next_object()
# Technically we should only be able to be called here without
# an active transaction if we are in the wait state. If we
# are called in context of original request process() function
# or a deferred the transaction should already be registered.
transaction = self._nr_instance._nr_transaction
if self._nr_instance._nr_wait_function_trace:
if newrelic.api.transaction.current_transaction():
_logger.debug('The Twisted.Web request finish() method is '
'being called while in wait state but there is '
'already a current transaction.')
else:
transaction.save_transaction()
elif not newrelic.api.transaction.current_transaction():
_logger.debug('The Twisted.Web request finish() method is '
'being called from request process() method or a '
'deferred but there is not a current transaction.')
# Except for case of being called when in wait state, we can't
# actually exit the transaction at this point as may be called
# in context of an outer function trace node. We thus flag that
# are finished and pop back out allowing outer scope to actually
# exit the transaction.
self._nr_instance._nr_is_request_finished = True
# Now call the original finish() function.
if self._nr_instance._nr_is_deferred_callback:
# If we are in a deferred callback log any error against the
# transaction here so we know we will capture it. We
# possibly don't need to do it here as outer scope may catch
# it anyway. Duplicate will be ignored so not too important.
# Most likely the finish() call would never fail anyway.
try:
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
except: # Catch all
transaction.record_exception(*sys.exc_info())
raise
elif self._nr_instance._nr_wait_function_trace:
# Now handle the special case where finish() was called
# while in the wait state. We might get here through
# Twisted.Web itself somehow calling finish() when still
# waiting for a deferred. If this were to occur though then
# the transaction will not be popped if we simply marked
# request as finished as no outer scope to see that and
# clean up. We will thus need to end the function trace and
# exit the transaction. We end function trace here and then
# the transaction down below.
try:
self._nr_instance._nr_wait_function_trace.__exit__(
None, None, None)
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
transaction.__exit__(None, None, None)
except: # Catch all
transaction.__exit__(*sys.exc_info())
raise
finally:
self._nr_instance._nr_wait_function_trace = None
self._nr_instance._nr_transaction = None
self._nr_instance = None
else:
# This should be the case where finish() is being called in
# the original render() function.
with newrelic.api.function_trace.FunctionTrace(transaction,
name='Request/Finish', group='Python/Twisted'):
result = self._nr_next_object()
return result
class ResourceRenderWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args):
# Temporary work around due to customer calling class method
# directly with 'self' as first argument. Need to work out best
# practice for dealing with this.
if len(args) == 2:
# Assume called as unbound method with (self, request).
instance, request = args
else:
# Assume called as bound method with (request).
instance = self._nr_instance
request = args[-1]
assert instance != None
transaction = newrelic.api.transaction.current_transaction()
if transaction is None:
return self._nr_next_object(*args)
# This is wrapping the render() function of the resource. We
# name the function node and the web transaction after the name
# of the handler function augmented with the method type for the
# request.
name = "%s.render_%s" % (
newrelic.api.object_wrapper.callable_name(
instance), request.method)
transaction.set_transaction_name(name, priority=1)
with newrelic.api.function_trace.FunctionTrace(transaction, name):
return self._nr_next_object(*args)
class DeferredUserList(UserList.UserList):
def pop(self, i=-1):
import twisted.internet.defer
item = super(DeferredUserList, self).pop(i)
item0 = item[0]
item1 = item[1]
if item0[0] != twisted.internet.defer._CONTINUE:
item0 = (newrelic.api.function_trace.FunctionTraceWrapper(
item0[0], group='Python/Twisted/Callback'),
item0[1], item0[2])
if item1[0] != twisted.internet.defer._CONTINUE:
item1 = (newrelic.api.function_trace.FunctionTraceWrapper(
item1[0], group='Python/Twisted/Errback'),
item1[1], item1[2])
return (item0, item1)
class DeferredWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
# This is wrapping the __init__() function so call that first.
self._nr_next_object(*args, **kwargs)
# We now wrap the list of deferred callbacks so can track when
# each callback is actually called.
if self._nr_instance:
transaction = newrelic.api.transaction.current_transaction()
if transaction:
self._nr_instance._nr_transaction = transaction
self._nr_instance.callbacks = DeferredUserList(
self._nr_instance.callbacks)
class DeferredCallbacksWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self):
assert self._nr_instance != None
transaction = newrelic.api.transaction.current_transaction()
# If there is an active transaction then deferred is being
# called within a context of another deferred so simply call the
# callback and return.
if transaction:
return self._nr_next_object()
# If there is no transaction recorded against the deferred then
# don't need to do anything and can simply call the callback and
# return.
if not hasattr(self._nr_instance, '_nr_transaction'):
return self._nr_next_object()
transaction = self._nr_instance._nr_transaction
# If we can't find a Twisted.Web request object associated with
# the transaction or it is no longer valid then simply call the
# callback and return.
if not hasattr(transaction, '_nr_current_request'):
return self._nr_next_object()
request = transaction._nr_current_request()
if not request:
return self._nr_next_object()
try:
# Save the transaction recorded against the deferred as the
# active transaction.
transaction.save_transaction()
# Record that are calling a deferred. This changes what we
# do if the request finish() method is being called.
request._nr_is_deferred_callback = True
# We should always be calling into a deferred when we are
# in the wait state for the request. We need to exit that
# wait state.
if request._nr_wait_function_trace:
request._nr_wait_function_trace.__exit__(None, None, None)
request._nr_wait_function_trace = None
else:
_logger.debug('Called a Twisted.Web deferred when we were '
'not in a wait state.')
# Call the deferred and capture any errors that may come
# back from it.
with newrelic.api.error_trace.ErrorTrace(transaction):
with newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Call',
group='Python/Twisted'):
return self._nr_next_object()
finally:
# If the request finish() method was called from the
# deferred then we need to exit the transaction. Other wise
# we need to create a new function trace node for a new wait
# state and pop the transaction.
if request._nr_is_request_finished:
transaction.__exit__(None, None, None)
self._nr_instance._nr_transaction = None
else:
# XXX Should we be removing the transaction from the
# deferred object as well. Can the same deferred be
# called multiple times for same request. It probably
# can be reregistered.
request._nr_wait_function_trace = \
newrelic.api.function_trace.FunctionTrace(
transaction, name='Deferred/Wait',
group='Python/Twisted')
request._nr_wait_function_trace.__enter__()
transaction.drop_transaction()
request._nr_is_deferred_callback = False
class InlineGeneratorWrapper(object):
def __init__(self, wrapped, generator):
self._nr_wrapped = wrapped
self._nr_generator = generator
def __iter__(self):
name = newrelic.api.object_wrapper.callable_name(self._nr_wrapped)
iterable = iter(self._nr_generator)
while 1:
transaction = newrelic.api.transaction.current_transaction()
with newrelic.api.function_trace.FunctionTrace(
transaction, name, group='Python/Twisted/Generator'):
yield next(iterable)
class InlineCallbacksWrapper(object):
def __init__(self, wrapped):
if isinstance(wrapped, tuple):
(instance, wrapped) = wrapped
else:
instance = None
newrelic.api.object_wrapper.update_wrapper(self, wrapped)
self._nr_instance = instance
self._nr_next_object = wrapped
if not hasattr(self, '_nr_last_object'):
self._nr_last_object = wrapped
def __get__(self, instance, klass):
if instance is None:
return self
descriptor = self._nr_next_object.__get__(instance, klass)
return self.__class__((instance, descriptor))
def __call__(self, *args, **kwargs):
transaction = newrelic.api.transaction.current_transaction()
if not transaction:
return self._nr_next_object(*args, **kwargs)
result = self._nr_next_object(*args, **kwargs)
if not result:
return result
return iter(InlineGeneratorWrapper(self._nr_next_object, result))
def instrument_twisted_web_server(module):
module.Request.process = RequestProcessWrapper(module.Request.process)
def instrument_twisted_web_http(module):
module.Request.finish = RequestFinishWrapper(module.Request.finish)
def instrument_twisted_web_resource(module):
module.Resource.render = ResourceRenderWrapper(module.Resource.render)
def instrument_twisted_internet_defer(module):
module.Deferred.__init__ = DeferredWrapper(module.Deferred.__init__)
module.Deferred._runCallbacks = DeferredCallbacksWrapper(
module.Deferred._runCallbacks)
#_inlineCallbacks = module.inlineCallbacks
#def inlineCallbacks(f):
# return _inlineCallbacks(InlineCallbacksWrapper(f))
#module.inlineCallbacks = inlineCallbacks
| agpl-3.0 |
YanTangZhai/tf | tensorflow/python/kernel_tests/summary_ops_test.py | 8 | 3802 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import tensorflow as tf
class SummaryOpsTest(tf.test.TestCase):
def _AsSummary(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const, name="mysumm")
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testScalarSummaryDefaultName(self):
with self.test_session() as sess:
const = tf.constant([10.0, 20.0])
summ = tf.scalar_summary(["c1", "c2"], const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testMergeSummary(self):
with self.test_session() as sess:
const = tf.constant(10.0)
summ1 = tf.histogram_summary("h", const, name="histo")
summ2 = tf.scalar_summary("c", const, name="summ")
merge = tf.merge_summary([summ1, summ2])
value = sess.run(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
value {
tag: "h"
histo {
min: 10.0
max: 10.0
num: 1.0
sum: 10.0
sum_squares: 100.0
bucket_limit: 9.93809490288
bucket_limit: 10.9319043932
bucket_limit: 1.7976931348623157e+308
bucket: 0.0
bucket: 1.0
bucket: 0.0
}
}
value { tag: "c" simple_value: 10.0 }
""", self._AsSummary(value))
def testMergeAllSummaries(self):
with tf.Graph().as_default():
const = tf.constant(10.0)
summ1 = tf.histogram_summary("h", const, name="histo")
summ2 = tf.scalar_summary("o", const, name="oops",
collections=["foo_key"])
summ3 = tf.scalar_summary("c", const, name="summ")
merge = tf.merge_all_summaries()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
merge = tf.merge_all_summaries("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
self.assertTrue(tf.merge_all_summaries("bar_key") is None)
def testHistogramSummaryTypes(self):
with tf.Graph().as_default():
for dtype in (tf.int8, tf.uint8, tf.int16, tf.int32,
tf.float32, tf.float64):
const = tf.constant(10, dtype=dtype)
tf.histogram_summary("h", const, name="histo")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
roncampbell/django-calaccess-raw-data | calaccess_raw/admin/common.py | 29 | 1378 | from __future__ import unicode_literals
from django.contrib import admin
from calaccess_raw import models
from .base import BaseAdmin
@admin.register(models.FilernameCd)
class FilernameCdAdmin(BaseAdmin):
list_display = (
"id",
"xref_filer_id",
"filer_id",
"naml",
"namf",
"filer_type",
"status",
"effect_dt",
)
list_filter = ("filer_type", "status")
search_fields = (
"xref_filer_id",
"filer_id",
"naml",
"namf",
)
date_hierarchy = "effect_dt"
@admin.register(models.FilerFilingsCd)
class FilerFilingsCdAdmin(BaseAdmin):
search_fields = ("filing_id", "filer_id")
list_display = (
"id", "filer_id", "form_id",
"filing_id", "filing_sequence"
)
@admin.register(models.FilingsCd)
class FilingsCdAdmin(BaseAdmin):
pass
@admin.register(models.SmryCd)
class SmryCdAdmin(BaseAdmin):
list_display = (
'filing_id',
'amend_id',
'form_type',
'line_item',
'pretty_amount_a',
'pretty_amount_b',
'pretty_amount_c',
)
list_filter = ('form_type',)
search_fields = ('filing_id', 'form_type', 'line_item')
@admin.register(models.TextMemoCd)
class TextMemoCdAdmin(BaseAdmin):
pass
@admin.register(models.CvrE530Cd)
class CvrE530CdAdmin(BaseAdmin):
pass
| mit |
JulienBalestra/enjoliver | app/tests/inte/test_cockroach.py | 2 | 4019 | """
Manual test suite for CockroachDB integration
./runtime/runtime.rkt run --net=host --insecure-options=all --interactive enjoliver.local/cockroach:latest \
--exec /usr/bin/cockroach -- start --port 26257 --http-port 8081 --insecure
./runtime/runtime.rkt run --net=host --insecure-options=all --interactive enjoliver.local/cockroach:latest \
--exec /usr/bin/cockroach -- start --port 26258 --http-port 8082 --insecure --join=127.0.0.1:26257,127.0.0.1:26259
./runtime/runtime.rkt run --net=host --insecure-options=all --interactive enjoliver.local/cockroach:latest \
--exec /usr/bin/cockroach -- start --port 26259 --http-port 8083 --insecure --join=127.0.0.1:26257,127.0.0.1:26258
./runtime/runtime.rkt run --net=host --insecure-options=all --interactive enjoliver.local/cockroach:latest \
--exec /usr/bin/cockroach -- sql
"""
import os
import sys
import time
import unittest
from multiprocessing import Process
import requests
from app import configs
from app import smartdb
EC = configs.EnjoliverConfig(importer=__file__)
EC.api_uri = "http://127.0.0.1:5000"
EC.db_uri = "cockroachdb://root@localhost:26257,cockroachdb://root@localhost:26258,cockroachdb://root@localhost:26259"
@unittest.skip("Manual Trigger -> TODO")
class TestEnjoliverCockroach(unittest.TestCase):
p_matchbox = Process
p_api = Process
inte_path = "%s" % os.path.dirname(__file__)
dbs_path = "%s/dbs" % inte_path
tests_path = "%s" % os.path.dirname(inte_path)
app_path = os.path.dirname(tests_path)
project_path = os.path.dirname(app_path)
matchbox_path = "%s/matchbox" % project_path
assets_path = "%s/matchbox/assets" % project_path
test_matchbox_path = "%s/test_matchbox" % tests_path
@classmethod
def setUpClass(cls):
cls.smart = smartdb.SmartDatabaseClient(EC.db_uri)
cls.p_matchbox = Process(target=TestEnjoliverCockroach.process_target_matchbox)
cls.p_api = Process(target=TestEnjoliverCockroach.process_target_api)
print("PPID -> %s\n" % os.getpid())
cls.p_matchbox.start()
assert cls.p_matchbox.is_alive() is True
cls.p_api.start()
assert cls.p_api.is_alive() is True
cls.api_running(EC.api_uri, cls.p_api)
cls.smart.create_base()
@classmethod
def tearDownClass(cls):
cls.p_matchbox.terminate()
cls.p_matchbox.join(timeout=5)
cls.p_api.terminate()
cls.p_api.join(timeout=5)
time.sleep(0.2)
@staticmethod
def api_running(api_endpoint, p_api):
response_code = 404
for i in range(10):
assert p_api.is_alive() is True
try:
request = requests.get(api_endpoint)
response_code = request.status_code
request.close()
break
except requests.exceptions.ConnectionError:
pass
time.sleep(0.2)
assert 200 == response_code
@staticmethod
def process_target_matchbox():
os.environ["ENJOLIVER_MATCHBOX_PATH"] = TestEnjoliverCockroach.test_matchbox_path
os.environ["ENJOLIVER_MATCHBOX_ASSETS"] = TestEnjoliverCockroach.assets_path
cmd = [
"%s" % sys.executable,
"%s/manage.py" % TestEnjoliverCockroach.project_path,
"matchbox"
]
print("PID -> %s\n"
"exec -> %s\n" % (
os.getpid(), " ".join(cmd)))
os.execve(cmd[0], cmd, os.environ)
@staticmethod
def process_target_api():
os.environ["ENJOLIVER_DB_URI"] = EC.db_uri
os.environ["ENJOLIVER_API_URI"] = EC.api_uri
os.environ["ENJOLIVER_GUNICORN_WORKERS"] = "3"
os.environ["ENJOLIVER_LOGGING_LEVEL"] = "INFO"
cmd = [
"%s/manage.py" % TestEnjoliverCockroach.project_path,
"gunicorn"
]
os.execve(cmd[0], cmd, os.environ)
def test_00(self):
for i in range(10000):
requests.get("%s/healthz" % EC.api_uri)
| mit |
40223136/w17test1 | static/Brython3.1.1-20150328-091302/Lib/unittest/suite.py | 748 | 9715 | """TestSuite"""
import sys
from . import case
from . import util
__unittest = True
def _call_if_exists(parent, attr):
func = getattr(parent, attr, lambda: None)
func()
class BaseTestSuite(object):
"""A simple test suite that doesn't provide class or module shared fixtures.
"""
def __init__(self, tests=()):
self._tests = []
self.addTests(tests)
def __repr__(self):
return "<%s tests=%s>" % (util.strclass(self.__class__), list(self))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return list(self) == list(other)
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._tests)
def countTestCases(self):
cases = 0
for test in self:
cases += test.countTestCases()
return cases
def addTest(self, test):
# sanity checks
if not callable(test):
raise TypeError("{} is not callable".format(repr(test)))
if isinstance(test, type) and issubclass(test,
(case.TestCase, TestSuite)):
raise TypeError("TestCases and TestSuites must be instantiated "
"before passing them to addTest()")
self._tests.append(test)
def addTests(self, tests):
if isinstance(tests, str):
raise TypeError("tests must be an iterable of tests, not a string")
for test in tests:
self.addTest(test)
def run(self, result):
for test in self:
if result.shouldStop:
break
test(result)
return result
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
for test in self:
test.debug()
class TestSuite(BaseTestSuite):
"""A test suite is a composite test consisting of a number of TestCases.
For use, create an instance of TestSuite, then add test case instances.
When all tests have been added, the suite can be passed to a test
runner, such as TextTestRunner. It will run the individual test cases
in the order in which they were added, aggregating the results. When
subclassing, do not forget to call the base class constructor.
"""
def run(self, result, debug=False):
topLevel = False
if getattr(result, '_testRunEntered', False) is False:
result._testRunEntered = topLevel = True
for test in self:
if result.shouldStop:
break
if _isnotsuite(test):
self._tearDownPreviousClass(test, result)
self._handleModuleFixture(test, result)
self._handleClassSetUp(test, result)
result._previousTestClass = test.__class__
if (getattr(test.__class__, '_classSetupFailed', False) or
getattr(result, '_moduleSetUpFailed', False)):
continue
if not debug:
test(result)
else:
test.debug()
if topLevel:
self._tearDownPreviousClass(None, result)
self._handleModuleTearDown(result)
result._testRunEntered = False
return result
def debug(self):
"""Run the tests without collecting errors in a TestResult"""
debug = _DebugResult()
self.run(debug, True)
################################
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if result._moduleSetUpFailed:
return
if getattr(currentClass, "__unittest_skip__", False):
return
try:
currentClass._classSetupFailed = False
except TypeError:
# test may actually be a function
# so its class will be a builtin-type
pass
setUpClass = getattr(currentClass, 'setUpClass', None)
if setUpClass is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
currentClass._classSetupFailed = True
className = util.strclass(currentClass)
errorName = 'setUpClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _get_previous_module(self, result):
previousModule = None
previousClass = getattr(result, '_previousTestClass', None)
if previousClass is not None:
previousModule = previousClass.__module__
return previousModule
def _handleModuleFixture(self, test, result):
previousModule = self._get_previous_module(result)
currentModule = test.__class__.__module__
if currentModule == previousModule:
return
self._handleModuleTearDown(result)
result._moduleSetUpFailed = False
try:
module = sys.modules[currentModule]
except KeyError:
return
setUpModule = getattr(module, 'setUpModule', None)
if setUpModule is not None:
_call_if_exists(result, '_setupStdout')
try:
setUpModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
result._moduleSetUpFailed = True
errorName = 'setUpModule (%s)' % currentModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _addClassOrModuleLevelException(self, result, exception, errorName):
error = _ErrorHolder(errorName)
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None and isinstance(exception, case.SkipTest):
addSkip(error, str(exception))
else:
result.addError(error, sys.exc_info())
def _handleModuleTearDown(self, result):
previousModule = self._get_previous_module(result)
if previousModule is None:
return
if result._moduleSetUpFailed:
return
try:
module = sys.modules[previousModule]
except KeyError:
return
tearDownModule = getattr(module, 'tearDownModule', None)
if tearDownModule is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownModule()
except Exception as e:
if isinstance(result, _DebugResult):
raise
errorName = 'tearDownModule (%s)' % previousModule
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
def _tearDownPreviousClass(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if getattr(previousClass, '_classSetupFailed', False):
return
if getattr(result, '_moduleSetUpFailed', False):
return
if getattr(previousClass, "__unittest_skip__", False):
return
tearDownClass = getattr(previousClass, 'tearDownClass', None)
if tearDownClass is not None:
_call_if_exists(result, '_setupStdout')
try:
tearDownClass()
except Exception as e:
if isinstance(result, _DebugResult):
raise
className = util.strclass(previousClass)
errorName = 'tearDownClass (%s)' % className
self._addClassOrModuleLevelException(result, e, errorName)
finally:
_call_if_exists(result, '_restoreStdout')
class _ErrorHolder(object):
"""
Placeholder for a TestCase inside a result. As far as a TestResult
is concerned, this looks exactly like a unit test. Used to insert
arbitrary errors into a test suite run.
"""
# Inspired by the ErrorHolder from Twisted:
# http://twistedmatrix.com/trac/browser/trunk/twisted/trial/runner.py
# attribute used by TestResult._exc_info_to_string
failureException = None
def __init__(self, description):
self.description = description
def id(self):
return self.description
def shortDescription(self):
return None
def __repr__(self):
return "<ErrorHolder description=%r>" % (self.description,)
def __str__(self):
return self.id()
def run(self, result):
# could call result.addError(...) - but this test-like object
# shouldn't be run anyway
pass
def __call__(self, result):
return self.run(result)
def countTestCases(self):
return 0
def _isnotsuite(test):
"A crude way to tell apart testcases and suites with duck-typing"
try:
iter(test)
except TypeError:
return True
return False
class _DebugResult(object):
"Used by the TestSuite to hold previous class when running in debug."
_previousTestClass = None
_moduleSetUpFailed = False
shouldStop = False
| gpl-3.0 |
jomyhuang/sdwle | jsonschema/validators.py | 9 | 14530 | from __future__ import division
import contextlib
import json
import numbers
try:
import requests
except ImportError:
requests = None
from jsonschema import _utils, _validators
from jsonschema.compat import (
Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
str_types, int_types, iteritems,
)
from jsonschema.exceptions import ErrorTree # Backwards compatibility # noqa
from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
_unset = _utils.Unset()
validators = {}
meta_schemas = _utils.URIDict()
def validates(version):
"""
Register the decorated validator for a ``version`` of the specification.
Registered validators and their meta schemas will be considered when
parsing ``$schema`` properties' URIs.
:argument str version: an identifier to use as the version's name
:returns: a class decorator to decorate the validator with the version
"""
def _validates(cls):
validators[version] = cls
if "id" in cls.META_SCHEMA:
meta_schemas[cls.META_SCHEMA["id"]] = cls
return cls
return _validates
def create(meta_schema, validators=(), version=None, default_types=None): # noqa
if default_types is None:
default_types = {
"array" : list, "boolean" : bool, "integer" : int_types,
"null" : type(None), "number" : numbers.Number, "object" : dict,
"string" : str_types,
}
class Validator(object):
VALIDATORS = dict(validators)
META_SCHEMA = dict(meta_schema)
DEFAULT_TYPES = dict(default_types)
def __init__(
self, schema, types=(), resolver=None, format_checker=None,
):
self._types = dict(self.DEFAULT_TYPES)
self._types.update(types)
if resolver is None:
resolver = RefResolver.from_schema(schema)
self.resolver = resolver
self.format_checker = format_checker
self.schema = schema
@classmethod
def check_schema(cls, schema):
for error in cls(cls.META_SCHEMA).iter_errors(schema):
raise SchemaError.create_from(error)
def iter_errors(self, instance, _schema=None):
if _schema is None:
_schema = self.schema
with self.resolver.in_scope(_schema.get("id", "")):
ref = _schema.get("$ref")
if ref is not None:
validators = [("$ref", ref)]
else:
validators = iteritems(_schema)
for k, v in validators:
validator = self.VALIDATORS.get(k)
if validator is None:
continue
errors = validator(self, v, instance, _schema) or ()
for error in errors:
# set details if not already set by the called fn
error._set(
validator=k,
validator_value=v,
instance=instance,
schema=_schema,
)
if k != "$ref":
error.schema_path.appendleft(k)
yield error
def descend(self, instance, schema, path=None, schema_path=None):
for error in self.iter_errors(instance, schema):
if path is not None:
error.path.appendleft(path)
if schema_path is not None:
error.schema_path.appendleft(schema_path)
yield error
def validate(self, *args, **kwargs):
for error in self.iter_errors(*args, **kwargs):
raise error
def is_type(self, instance, type):
if type not in self._types:
raise UnknownType(type, instance, self.schema)
pytypes = self._types[type]
# bool inherits from int, so ensure bools aren't reported as ints
if isinstance(instance, bool):
pytypes = _utils.flatten(pytypes)
is_number = any(
issubclass(pytype, numbers.Number) for pytype in pytypes
)
if is_number and bool not in pytypes:
return False
return isinstance(instance, pytypes)
def is_valid(self, instance, _schema=None):
error = next(self.iter_errors(instance, _schema), None)
return error is None
if version is not None:
Validator = validates(version)(Validator)
Validator.__name__ = version.title().replace(" ", "") + "Validator"
return Validator
def extend(validator, validators, version=None):
all_validators = dict(validator.VALIDATORS)
all_validators.update(validators)
return create(
meta_schema=validator.META_SCHEMA,
validators=all_validators,
version=version,
default_types=validator.DEFAULT_TYPES,
)
Draft3Validator = create(
meta_schema=_utils.load_schema("draft3"),
validators={
"$ref" : _validators.ref,
"additionalItems" : _validators.additionalItems,
"additionalProperties" : _validators.additionalProperties,
"dependencies" : _validators.dependencies,
"disallow" : _validators.disallow_draft3,
"divisibleBy" : _validators.multipleOf,
"enum" : _validators.enum,
"extends" : _validators.extends_draft3,
"format" : _validators.format,
"items" : _validators.items,
"maxItems" : _validators.maxItems,
"maxLength" : _validators.maxLength,
"maximum" : _validators.maximum,
"minItems" : _validators.minItems,
"minLength" : _validators.minLength,
"minimum" : _validators.minimum,
"multipleOf" : _validators.multipleOf,
"pattern" : _validators.pattern,
"patternProperties" : _validators.patternProperties,
"properties" : _validators.properties_draft3,
"type" : _validators.type_draft3,
"uniqueItems" : _validators.uniqueItems,
},
version="draft3",
)
Draft4Validator = create(
meta_schema=_utils.load_schema("draft4"),
validators={
"$ref" : _validators.ref,
"additionalItems" : _validators.additionalItems,
"additionalProperties" : _validators.additionalProperties,
"allOf" : _validators.allOf_draft4,
"anyOf" : _validators.anyOf_draft4,
"dependencies" : _validators.dependencies,
"enum" : _validators.enum,
"format" : _validators.format,
"items" : _validators.items,
"maxItems" : _validators.maxItems,
"maxLength" : _validators.maxLength,
"maxProperties" : _validators.maxProperties_draft4,
"maximum" : _validators.maximum,
"minItems" : _validators.minItems,
"minLength" : _validators.minLength,
"minProperties" : _validators.minProperties_draft4,
"minimum" : _validators.minimum,
"multipleOf" : _validators.multipleOf,
"not" : _validators.not_draft4,
"oneOf" : _validators.oneOf_draft4,
"pattern" : _validators.pattern,
"patternProperties" : _validators.patternProperties,
"properties" : _validators.properties_draft4,
"required" : _validators.required_draft4,
"type" : _validators.type_draft4,
"uniqueItems" : _validators.uniqueItems,
},
version="draft4",
)
class RefResolver(object):
"""
Resolve JSON References.
:argument str base_uri: URI of the referring document
:argument referrer: the actual referring document
:argument dict store: a mapping from URIs to documents to cache
:argument bool cache_remote: whether remote refs should be cached after
first resolution
:argument dict handlers: a mapping from URI schemes to functions that
should be used to retrieve them
"""
def __init__(
self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
):
self.base_uri = base_uri
self.resolution_scope = base_uri
# This attribute is not used, it is for backwards compatibility
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self.store = _utils.URIDict(
(id, validator.META_SCHEMA)
for id, validator in iteritems(meta_schemas)
)
self.store.update(store)
self.store[base_uri] = referrer
@classmethod
def from_schema(cls, schema, *args, **kwargs):
"""
Construct a resolver from a JSON schema object.
:argument schema schema: the referring schema
:rtype: :class:`RefResolver`
"""
return cls(schema.get("id", ""), schema, *args, **kwargs)
@contextlib.contextmanager
def in_scope(self, scope):
old_scope = self.resolution_scope
self.resolution_scope = urljoin(old_scope, scope)
try:
yield
finally:
self.resolution_scope = old_scope
@contextlib.contextmanager
def resolving(self, ref):
"""
Context manager which resolves a JSON ``ref`` and enters the
resolution scope of this ref.
:argument str ref: reference to resolve
"""
full_uri = urljoin(self.resolution_scope, ref)
uri, fragment = urldefrag(full_uri)
if not uri:
uri = self.base_uri
if uri in self.store:
document = self.store[uri]
else:
try:
document = self.resolve_remote(uri)
except Exception as exc:
raise RefResolutionError(exc)
old_base_uri, self.base_uri = self.base_uri, uri
try:
with self.in_scope(uri):
yield self.resolve_fragment(document, fragment)
finally:
self.base_uri = old_base_uri
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
:argument document: the referrant document
:argument str fragment: a URI fragment to resolve within it
"""
fragment = fragment.lstrip("/")
parts = unquote(fragment).split("/") if fragment else []
for part in parts:
part = part.replace("~1", "/").replace("~0", "~")
if isinstance(document, Sequence):
# Array indexes should be turned into integers
try:
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError):
raise RefResolutionError(
"Unresolvable JSON pointer: %r" % fragment
)
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
:argument str uri: the URI to resolve
:returns: the retrieved document
.. _requests: http://pypi.python.org/pypi/requests/
"""
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif (
scheme in ["http", "https"] and
requests and
getattr(requests.Response, "json", None) is not None
):
# Requests has support for detecting the correct encoding of
# json over http
if callable(requests.Response.json):
result = requests.get(uri).json()
else:
result = requests.get(uri).json
else:
# Otherwise, pass off to urllib and assume utf-8
result = json.loads(urlopen(uri).read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
def validator_for(schema, default=_unset):
if default is _unset:
default = Draft4Validator
return meta_schemas.get(schema.get("$schema", ""), default)
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance)
| mit |
jakevdp/scipy | scipy/special/_precompute/gammainc_data.py | 49 | 4175 | """Compute gammainc and gammaincc for large arguments and parameters
and save the values to data files for use in tests. We can't just
compare to mpmath's gammainc in test_mpmath.TestSystematic because it
would take too long.
Note that mpmath's gammainc is computed using hypercomb, but since it
doesn't allow the user to increase the maximum number of terms used in
the series it doesn't converge for many arguments. To get around this
we copy the mpmath implementation but use more terms.
This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
ram.
Sources:
[1] Fredrik Johansson and others. mpmath: a Python library for
arbitrary-precision floating-point arithmetic (version 0.19),
December 2013. http://mpmath.org/.
"""
from __future__ import division, print_function, absolute_import
import os
from time import time
import numpy as np
from numpy import pi
from scipy.special._mptestutils import mpf2float
try:
import mpmath as mp
except ImportError:
pass
def gammainc(a, x, dps=50, maxterms=10**8):
"""Compute gammainc exactly like mpmath does but allow for more
summands in hypercomb. See
mpmath/functions/expintegrals.py#L134
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
G = [z]
negb = mp.fneg(b, exact=True)
def h(z):
T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
res = mp.hypercomb(h, [z], maxterms=maxterms)
return mpf2float(res)
def gammaincc(a, x, dps=50, maxterms=10**8):
"""Compute gammaincc exactly like mpmath does but allow for more
terms in hypercomb. See
mpmath/functions/expintegrals.py#L187
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a = a, x
if mp.isint(z):
try:
# mpmath has a fast integer path
return mpf2float(mp.gammainc(z, a=a, regularized=True))
except mp.libmp.NoConvergence:
pass
nega = mp.fneg(a, exact=True)
G = [z]
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return mpf2float(mp.hypercomb(h, [z], force_series=True))
except mp.libmp.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
def main():
t0 = time()
# It would be nice to have data for larger values, but either this
# requires prohibitively large precision (dps > 800) or mpmath has
# a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
# value around 0.03, while the true value should be close to 0.5
# (DLMF 8.12.15).
print(__doc__)
pwd = os.path.dirname(__file__)
r = np.logspace(4, 14, 30)
ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
regimes = [(gammainc, ltheta), (gammaincc, utheta)]
for func, theta in regimes:
rg, thetag = np.meshgrid(r, theta)
a, x = rg*np.cos(thetag), rg*np.sin(thetag)
a, x = a.flatten(), x.flatten()
dataset = []
for i, (a0, x0) in enumerate(zip(a, x)):
if func == gammaincc:
# Exploit the fast integer path in gammaincc whenever
# possible so that the computation doesn't take too
# long
a0, x0 = np.floor(a0), np.floor(x0)
dataset.append((a0, x0, func(a0, x0)))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
'{}.txt'.format(func.__name__))
np.savetxt(filename, dataset)
print("{} minutes elapsed".format((time() - t0)/60))
if __name__ == "__main__":
main()
| bsd-3-clause |
yitian134/chromium | net/tools/testserver/xmppserver.py | 14 | 18473 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A bare-bones and non-compliant XMPP server.
Just enough of the protocol is implemented to get it to work with
Chrome's sync notification system.
"""
import asynchat
import asyncore
import base64
import re
import socket
from xml.dom import minidom
# pychecker complains about the use of fileno(), which is implemented
# by asyncore by forwarding to an internal object via __getattr__.
__pychecker__ = 'no-classattr'
class Error(Exception):
"""Error class for this module."""
pass
class UnexpectedXml(Error):
"""Raised when an unexpected XML element has been encountered."""
def __init__(self, xml_element):
xml_text = xml_element.toxml()
Error.__init__(self, 'Unexpected XML element', xml_text)
def ParseXml(xml_string):
"""Parses the given string as XML and returns a minidom element
object.
"""
dom = minidom.parseString(xml_string)
# minidom handles xmlns specially, but there's a bug where it sets
# the attribute value to None, which causes toxml() or toprettyxml()
# to break.
def FixMinidomXmlnsBug(xml_element):
if xml_element.getAttribute('xmlns') is None:
xml_element.setAttribute('xmlns', '')
def ApplyToAllDescendantElements(xml_element, fn):
fn(xml_element)
for node in xml_element.childNodes:
if node.nodeType == node.ELEMENT_NODE:
ApplyToAllDescendantElements(node, fn)
root = dom.documentElement
ApplyToAllDescendantElements(root, FixMinidomXmlnsBug)
return root
def CloneXml(xml):
"""Returns a deep copy of the given XML element.
Args:
xml: The XML element, which should be something returned from
ParseXml() (i.e., a root element).
"""
return xml.ownerDocument.cloneNode(True).documentElement
class StanzaParser(object):
"""A hacky incremental XML parser.
StanzaParser consumes data incrementally via FeedString() and feeds
its delegate complete parsed stanzas (i.e., XML documents) via
FeedStanza(). Any stanzas passed to FeedStanza() are unlinked after
the callback is done.
Use like so:
class MyClass(object):
...
def __init__(self, ...):
...
self._parser = StanzaParser(self)
...
def SomeFunction(self, ...):
...
self._parser.FeedString(some_data)
...
def FeedStanza(self, stanza):
...
print stanza.toprettyxml()
...
"""
# NOTE(akalin): The following regexps are naive, but necessary since
# none of the existing Python 2.4/2.5 XML libraries support
# incremental parsing. This works well enough for our purposes.
#
# The regexps below assume that any present XML element starts at
# the beginning of the string, but there may be trailing whitespace.
# Matches an opening stream tag (e.g., '<stream:stream foo="bar">')
# (assumes that the stream XML namespace is defined in the tag).
_stream_re = re.compile(r'^(<stream:stream [^>]*>)\s*')
# Matches an empty element tag (e.g., '<foo bar="baz"/>').
_empty_element_re = re.compile(r'^(<[^>]*/>)\s*')
# Matches a non-empty element (e.g., '<foo bar="baz">quux</foo>').
# Does *not* handle nested elements.
_non_empty_element_re = re.compile(r'^(<([^ >]*)[^>]*>.*?</\2>)\s*')
# The closing tag for a stream tag. We have to insert this
# ourselves since all XML stanzas are children of the stream tag,
# which is never closed until the connection is closed.
_stream_suffix = '</stream:stream>'
def __init__(self, delegate):
self._buffer = ''
self._delegate = delegate
def FeedString(self, data):
"""Consumes the given string data, possibly feeding one or more
stanzas to the delegate.
"""
self._buffer += data
while (self._ProcessBuffer(self._stream_re, self._stream_suffix) or
self._ProcessBuffer(self._empty_element_re) or
self._ProcessBuffer(self._non_empty_element_re)):
pass
def _ProcessBuffer(self, regexp, xml_suffix=''):
"""If the buffer matches the given regexp, removes the match from
the buffer, appends the given suffix, parses it, and feeds it to
the delegate.
Returns:
Whether or not the buffer matched the given regexp.
"""
results = regexp.match(self._buffer)
if not results:
return False
xml_text = self._buffer[:results.end()] + xml_suffix
self._buffer = self._buffer[results.end():]
stanza = ParseXml(xml_text)
self._delegate.FeedStanza(stanza)
# Needed because stanza may have cycles.
stanza.unlink()
return True
class Jid(object):
"""Simple struct for an XMPP jid (essentially an e-mail address with
an optional resource string).
"""
def __init__(self, username, domain, resource=''):
self.username = username
self.domain = domain
self.resource = resource
def __str__(self):
jid_str = "%s@%s" % (self.username, self.domain)
if self.resource:
jid_str += '/' + self.resource
return jid_str
def GetBareJid(self):
return Jid(self.username, self.domain)
class IdGenerator(object):
"""Simple class to generate unique IDs for XMPP messages."""
def __init__(self, prefix):
self._prefix = prefix
self._id = 0
def GetNextId(self):
next_id = "%s.%s" % (self._prefix, self._id)
self._id += 1
return next_id
class HandshakeTask(object):
"""Class to handle the initial handshake with a connected XMPP
client.
"""
# The handshake states in order.
(_INITIAL_STREAM_NEEDED,
_AUTH_NEEDED,
_AUTH_STREAM_NEEDED,
_BIND_NEEDED,
_SESSION_NEEDED,
_FINISHED) = range(6)
# Used when in the _INITIAL_STREAM_NEEDED and _AUTH_STREAM_NEEDED
# states. Not an XML object as it's only the opening tag.
#
# The from and id attributes are filled in later.
_STREAM_DATA = (
'<stream:stream from="%s" id="%s" '
'version="1.0" xmlns:stream="http://etherx.jabber.org/streams" '
'xmlns="jabber:client">')
# Used when in the _INITIAL_STREAM_NEEDED state.
_AUTH_STANZA = ParseXml(
'<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
' <mechanisms xmlns="urn:ietf:params:xml:ns:xmpp-sasl">'
' <mechanism>PLAIN</mechanism>'
' <mechanism>X-GOOGLE-TOKEN</mechanism>'
' </mechanisms>'
'</stream:features>')
# Used when in the _AUTH_NEEDED state.
_AUTH_SUCCESS_STANZA = ParseXml(
'<success xmlns="urn:ietf:params:xml:ns:xmpp-sasl"/>')
# Used when in the _AUTH_STREAM_NEEDED state.
_BIND_STANZA = ParseXml(
'<stream:features xmlns:stream="http://etherx.jabber.org/streams">'
' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind"/>'
' <session xmlns="urn:ietf:params:xml:ns:xmpp-session"/>'
'</stream:features>')
# Used when in the _BIND_NEEDED state.
#
# The id and jid attributes are filled in later.
_BIND_RESULT_STANZA = ParseXml(
'<iq id="" type="result">'
' <bind xmlns="urn:ietf:params:xml:ns:xmpp-bind">'
' <jid/>'
' </bind>'
'</iq>')
# Used when in the _SESSION_NEEDED state.
#
# The id attribute is filled in later.
_IQ_RESPONSE_STANZA = ParseXml('<iq id="" type="result"/>')
def __init__(self, connection, resource_prefix):
self._connection = connection
self._id_generator = IdGenerator(resource_prefix)
self._username = ''
self._domain = ''
self._jid = None
self._resource_prefix = resource_prefix
self._state = self._INITIAL_STREAM_NEEDED
def FeedStanza(self, stanza):
"""Inspects the given stanza and changes the handshake state if needed.
Called when a stanza is received from the client. Inspects the
stanza to make sure it has the expected attributes given the
current state, advances the state if needed, and sends a reply to
the client if needed.
"""
def ExpectStanza(stanza, name):
if stanza.tagName != name:
raise UnexpectedXml(stanza)
def ExpectIq(stanza, type, name):
ExpectStanza(stanza, 'iq')
if (stanza.getAttribute('type') != type or
stanza.firstChild.tagName != name):
raise UnexpectedXml(stanza)
def GetStanzaId(stanza):
return stanza.getAttribute('id')
def HandleStream(stanza):
ExpectStanza(stanza, 'stream:stream')
domain = stanza.getAttribute('to')
if domain:
self._domain = domain
SendStreamData()
def SendStreamData():
next_id = self._id_generator.GetNextId()
stream_data = self._STREAM_DATA % (self._domain, next_id)
self._connection.SendData(stream_data)
def GetUserDomain(stanza):
encoded_username_password = stanza.firstChild.data
username_password = base64.b64decode(encoded_username_password)
(_, username_domain, _) = username_password.split('\0')
# The domain may be omitted.
#
# If we were using python 2.5, we'd be able to do:
#
# username, _, domain = username_domain.partition('@')
# if not domain:
# domain = self._domain
at_pos = username_domain.find('@')
if at_pos != -1:
username = username_domain[:at_pos]
domain = username_domain[at_pos+1:]
else:
username = username_domain
domain = self._domain
return (username, domain)
if self._state == self._INITIAL_STREAM_NEEDED:
HandleStream(stanza)
self._connection.SendStanza(self._AUTH_STANZA, False)
self._state = self._AUTH_NEEDED
elif self._state == self._AUTH_NEEDED:
ExpectStanza(stanza, 'auth')
(self._username, self._domain) = GetUserDomain(stanza)
self._connection.SendStanza(self._AUTH_SUCCESS_STANZA, False)
self._state = self._AUTH_STREAM_NEEDED
elif self._state == self._AUTH_STREAM_NEEDED:
HandleStream(stanza)
self._connection.SendStanza(self._BIND_STANZA, False)
self._state = self._BIND_NEEDED
elif self._state == self._BIND_NEEDED:
ExpectIq(stanza, 'set', 'bind')
stanza_id = GetStanzaId(stanza)
resource_element = stanza.getElementsByTagName('resource')[0]
resource = resource_element.firstChild.data
full_resource = '%s.%s' % (self._resource_prefix, resource)
response = CloneXml(self._BIND_RESULT_STANZA)
response.setAttribute('id', stanza_id)
self._jid = Jid(self._username, self._domain, full_resource)
jid_text = response.parentNode.createTextNode(str(self._jid))
response.getElementsByTagName('jid')[0].appendChild(jid_text)
self._connection.SendStanza(response)
self._state = self._SESSION_NEEDED
elif self._state == self._SESSION_NEEDED:
ExpectIq(stanza, 'set', 'session')
stanza_id = GetStanzaId(stanza)
xml = CloneXml(self._IQ_RESPONSE_STANZA)
xml.setAttribute('id', stanza_id)
self._connection.SendStanza(xml)
self._state = self._FINISHED
self._connection.HandshakeDone(self._jid)
def AddrString(addr):
return '%s:%d' % addr
class XmppConnection(asynchat.async_chat):
"""A single XMPP client connection.
This class handles the connection to a single XMPP client (via a
socket). It does the XMPP handshake and also implements the (old)
Google notification protocol.
"""
# Used for acknowledgements to the client.
#
# The from and id attributes are filled in later.
_IQ_RESPONSE_STANZA = ParseXml('<iq from="" id="" type="result"/>')
def __init__(self, sock, socket_map, delegate, addr):
"""Starts up the xmpp connection.
Args:
sock: The socket to the client.
socket_map: A map from sockets to their owning objects.
delegate: The delegate, which is notified when the XMPP
handshake is successful, when the connection is closed, and
when a notification has to be broadcast.
addr: The host/port of the client.
"""
# We do this because in versions of python < 2.6,
# async_chat.__init__ doesn't take a map argument nor pass it to
# dispatcher.__init__. We rely on the fact that
# async_chat.__init__ calls dispatcher.__init__ as the last thing
# it does, and that calling dispatcher.__init__ with socket=None
# and map=None is essentially a no-op.
asynchat.async_chat.__init__(self)
asyncore.dispatcher.__init__(self, sock, socket_map)
self.set_terminator(None)
self._delegate = delegate
self._parser = StanzaParser(self)
self._jid = None
self._addr = addr
addr_str = AddrString(self._addr)
self._handshake_task = HandshakeTask(self, addr_str)
print 'Starting connection to %s' % self
def __str__(self):
if self._jid:
return str(self._jid)
else:
return AddrString(self._addr)
# async_chat implementation.
def collect_incoming_data(self, data):
self._parser.FeedString(data)
# This is only here to make pychecker happy.
def found_terminator(self):
asynchat.async_chat.found_terminator(self)
def close(self):
print "Closing connection to %s" % self
self._delegate.OnXmppConnectionClosed(self)
asynchat.async_chat.close(self)
# Called by self._parser.FeedString().
def FeedStanza(self, stanza):
if self._handshake_task:
self._handshake_task.FeedStanza(stanza)
elif stanza.tagName == 'iq' and stanza.getAttribute('type') == 'result':
# Ignore all client acks.
pass
elif (stanza.firstChild and
stanza.firstChild.namespaceURI == 'google:push'):
self._HandlePushCommand(stanza)
else:
raise UnexpectedXml(stanza)
# Called by self._handshake_task.
def HandshakeDone(self, jid):
self._jid = jid
self._handshake_task = None
self._delegate.OnXmppHandshakeDone(self)
print "Handshake done for %s" % self
def _HandlePushCommand(self, stanza):
if stanza.tagName == 'iq' and stanza.firstChild.tagName == 'subscribe':
# Subscription request.
self._SendIqResponseStanza(stanza)
elif stanza.tagName == 'message' and stanza.firstChild.tagName == 'push':
# Send notification request.
self._delegate.ForwardNotification(self, stanza)
else:
raise UnexpectedXml(command_xml)
def _SendIqResponseStanza(self, iq):
stanza = CloneXml(self._IQ_RESPONSE_STANZA)
stanza.setAttribute('from', str(self._jid.GetBareJid()))
stanza.setAttribute('id', iq.getAttribute('id'))
self.SendStanza(stanza)
def SendStanza(self, stanza, unlink=True):
"""Sends a stanza to the client.
Args:
stanza: The stanza to send.
unlink: Whether to unlink stanza after sending it. (Pass in
False if stanza is a constant.)
"""
self.SendData(stanza.toxml())
if unlink:
stanza.unlink()
def SendData(self, data):
"""Sends raw data to the client.
"""
# We explicitly encode to ascii as that is what the client expects
# (some minidom library functions return unicode strings).
self.push(data.encode('ascii'))
def ForwardNotification(self, notification_stanza):
"""Forwards a notification to the client."""
notification_stanza.setAttribute('from', str(self._jid.GetBareJid()))
notification_stanza.setAttribute('to', str(self._jid))
self.SendStanza(notification_stanza, False)
class XmppServer(asyncore.dispatcher):
"""The main XMPP server class.
The XMPP server starts accepting connections on the given address
and spawns off XmppConnection objects for each one.
Use like so:
socket_map = {}
xmpp_server = xmppserver.XmppServer(socket_map, ('127.0.0.1', 5222))
asyncore.loop(30.0, False, socket_map)
"""
# Used when sending a notification.
_NOTIFICATION_STANZA = ParseXml(
'<message>'
' <push xmlns="google:push">'
' <data/>'
' </push>'
'</message>')
def __init__(self, socket_map, addr):
asyncore.dispatcher.__init__(self, None, socket_map)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind(addr)
self.listen(5)
self._socket_map = socket_map
self._connections = set()
self._handshake_done_connections = set()
self._notifications_enabled = True
def handle_accept(self):
(sock, addr) = self.accept()
xmpp_connection = XmppConnection(sock, self._socket_map, self, addr)
self._connections.add(xmpp_connection)
# Return the new XmppConnection for testing.
return xmpp_connection
def close(self):
# A copy is necessary since calling close on each connection
# removes it from self._connections.
for connection in self._connections.copy():
connection.close()
asyncore.dispatcher.close(self)
def EnableNotifications(self):
self._notifications_enabled = True
def DisableNotifications(self):
self._notifications_enabled = False
def MakeNotification(self, channel, data):
"""Makes a notification from the given channel and encoded data.
Args:
channel: The channel on which to send the notification.
data: The notification payload.
"""
notification_stanza = CloneXml(self._NOTIFICATION_STANZA)
push_element = notification_stanza.getElementsByTagName('push')[0]
push_element.setAttribute('channel', channel)
data_element = push_element.getElementsByTagName('data')[0]
encoded_data = base64.b64encode(data)
data_text = notification_stanza.parentNode.createTextNode(encoded_data)
data_element.appendChild(data_text)
return notification_stanza
def SendNotification(self, channel, data):
"""Sends a notification to all connections.
Args:
channel: The channel on which to send the notification.
data: The notification payload.
"""
notification_stanza = self.MakeNotification(channel, data)
self.ForwardNotification(None, notification_stanza)
notification_stanza.unlink()
# XmppConnection delegate methods.
def OnXmppHandshakeDone(self, xmpp_connection):
self._handshake_done_connections.add(xmpp_connection)
def OnXmppConnectionClosed(self, xmpp_connection):
self._connections.discard(xmpp_connection)
self._handshake_done_connections.discard(xmpp_connection)
def ForwardNotification(self, unused_xmpp_connection, notification_stanza):
if self._notifications_enabled:
for connection in self._handshake_done_connections:
print 'Sending notification to %s' % connection
connection.ForwardNotification(notification_stanza)
else:
print 'Notifications disabled; dropping notification'
| bsd-3-clause |
forkable/p2pScrapper | BitTorrent-5.2.2/BitTorrent/MultiTorrent.py | 3 | 34318 | # The contents of this file are subject to the BitTorrent Open Source License
# Version 1.0 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Author: Steve Hazel, Bram Cohen, and Uoti Urpala.
import os
import sys
import shutil
import socket
import cPickle
import logging
import traceback
from copy import copy
from BTL.translation import _
from BitTorrent.Choker import Choker
from BTL.platform import bttime, encode_for_filesystem, get_filesystem_encoding
from BitTorrent.platform import old_broken_config_subencoding
from BitTorrent.Torrent import Feedback, Torrent
from BTL.bencode import bdecode
from BTL.ConvertedMetainfo import ConvertedMetainfo
from BTL.exceptions import str_exc
from BitTorrent.prefs import Preferences
from BitTorrent.NatTraversal import NatTraverser
from BitTorrent.BandwidthManager import BandwidthManager
from BitTorrent.InternetWatcher import get_internet_watcher
from BitTorrent.NewRateLimiter import MultiRateLimiter as RateLimiter
from BitTorrent.DownloadRateLimiter import DownloadRateLimiter
from BitTorrent.ConnectionManager import SingleportListener
from BitTorrent.CurrentRateMeasure import Measure
from BitTorrent.Storage import FilePool
from BTL.yielddefer import launch_coroutine
from BTL.defer import Deferred, DeferredEvent, wrap_task
from BitTorrent import BTFailure, InfoHashType
from BitTorrent import configfile
from khashmir.utkhashmir import UTKhashmir
class TorrentException(BTFailure):
pass
class TorrentAlreadyInQueue(TorrentException):
pass
class TorrentAlreadyRunning(TorrentException):
pass
class TorrentNotInitialized(TorrentException):
pass
class TorrentNotRunning(TorrentException):
pass
class UnknownInfohash(TorrentException):
pass
class TorrentShutdownFailed(TorrentException):
pass
class TooManyTorrents(TorrentException):
pass
#class DummyTorrent(object):
# def __init__(self, infohash):
# self.metainfo = object()
# self.metainfo.infohash = infohash
BUTLE_INTERVAL = 1
class MultiTorrent(Feedback):
"""A MultiTorrent object represents a set of BitTorrent file transfers.
It acts as a factory for Torrent objects, and it acts as
the interface through which communication is performed to and from
torrent file transfers.
If you wish to instantiate MultiTorrent to download only a single
torrent then pass is_single_torrent=True.
If you want to avoid resuming from prior torrent config state then
pass resume_from_torrent_config = False.
It will still use fast resume if available.
"""
def __init__(self, config, rawserver,
data_dir, listen_fail_ok=False, init_torrents=True,
is_single_torrent=False, resume_from_torrent_config=True):
"""
@param config: program-wide configuration object.
@param rawserver: object that manages main event loop and event
scheduling.
@param data_dir: where variable data such as fastresume information
and GUI state is saved.
@param listen_fail_ok: if false, a BTFailure is raised if
a server socket cannot be opened to accept incoming peer
connections.
@param init_torrents: restore fast resume state from prior
instantiations of MultiTorrent.
@param is_single_torrent: if true then allow only one torrent
at a time in this MultiTorrent.
@param resume_from_torrent_config: resume from ui_state files.
"""
# is_single_torrent will go away when we move MultiTorrent into
# a separate process, in which case, single torrent applications like
# curses and console will act as a client to the MultiTorrent daemon.
# --Dave
# init_torrents refers to fast resume rather than torrent config.
# If init_torrents is set to False, the UI state file is still
# read and the paths to existing downloads still used. This is
# not what we want for launchmany.
#
# resume_from_torrent_config is separate from
# is_single_torrent because launchmany must be able to have
# multiple torrents while not resuming from torrent config
# state. If launchmany resumes from torrent config then it
# saves or seeds from the path in the torrent config even if
# the file has moved in the directory tree. Because
# launchmany has no mechanism for removing torrents other than
# to change the directory tree, the only way for the user to
# eliminate the old state is to wipe out the files in the
# .bittorrent/launchmany-*/ui_state directory. This is highly
# counterintuitive. Best to simply ignore the ui_state
# directory altogether. --Dave
assert isinstance(config, Preferences)
#assert isinstance(data_dir, unicode) # temporarily commented -Dave
assert isinstance(listen_fail_ok, bool)
assert not (is_single_torrent and resume_from_torrent_config)
self.config = config
self.data_dir = data_dir
self.last_save_time = 0
self.policies = []
self.torrents = {}
self.running = {}
self.log_root = "core.MultiTorrent"
self.logger = logging.getLogger(self.log_root)
self.is_single_torrent = is_single_torrent
self.resume_from_torrent_config = resume_from_torrent_config
self.auto_update_policy_index = None
self.dht = None
self.rawserver = rawserver
nattraverser = NatTraverser(self.rawserver)
self.internet_watcher = get_internet_watcher(self.rawserver)
self.singleport_listener = SingleportListener(self.rawserver,
nattraverser,
self.log_root,
config['use_local_discovery'])
self.choker = Choker(self.config, self.rawserver.add_task)
self.up_ratelimiter = RateLimiter(self.rawserver.add_task)
self.up_ratelimiter.set_parameters(config['max_upload_rate'],
config['upload_unit_size'])
self.down_ratelimiter = DownloadRateLimiter(
config['download_rate_limiter_interval'],
self.config['max_download_rate'])
self.total_downmeasure = Measure(config['max_rate_period'])
self._find_port(listen_fail_ok)
self.filepool_doneflag = DeferredEvent()
self.filepool = FilePool(self.filepool_doneflag,
self.rawserver.add_task,
self.rawserver.external_add_task,
config['max_files_open'],
config['num_disk_threads'])
if self.resume_from_torrent_config:
try:
self._restore_state(init_torrents)
except BTFailure:
# don't be retarted.
self.logger.exception("_restore_state failed")
def no_dump_set_option(option, value):
self.set_option(option, value, dump=False)
self.bandwidth_manager = BandwidthManager(
self.rawserver.external_add_task, config,
no_dump_set_option, self.rawserver.get_remote_endpoints,
get_rates=self.get_total_rates )
self.rawserver.add_task(0, self.butle)
def butle(self):
policy = None
try:
for policy in self.policies:
policy.butle()
except:
# You had something to hide, should have hidden it shouldn't you?
self.logger.error("Butler error", exc_info=sys.exc_info())
# Should we remove policies?
#if policy:
# self.policies.remove(policy)
self.rawserver.add_task(BUTLE_INTERVAL, self.butle)
def _find_port(self, listen_fail_ok=True):
"""Run BitTorrent on the first available port found starting
from minport in the range [minport, maxport]."""
exc_info = None
self.config['minport'] = max(1024, self.config['minport'])
self.config['maxport'] = max(self.config['minport'],
self.config['maxport'])
e = (_("maxport less than minport - no ports to check") +
(": %s %s" % (self.config['minport'], self.config['maxport'])))
for port in xrange(self.config['minport'], self.config['maxport'] + 1):
try:
self.singleport_listener.open_port(port, self.config)
if self.config['start_trackerless_client']:
self.dht = UTKhashmir(self.config['bind'],
self.singleport_listener.get_port(),
self.data_dir, self.rawserver,
int(self.config['max_upload_rate'] * 0.01),
rlcount=self.up_ratelimiter.increase_offset,
config=self.config)
break
except socket.error, e:
exc_info = sys.exc_info()
else:
if not listen_fail_ok:
raise BTFailure, (_("Could not open a listening port: %s.") %
str_exc(e) )
self.global_error(logging.CRITICAL,
(_("Could not open a listening port: %s. ") % e) +
(_("Check your port range settings (%s:%s-%s).") %
(self.config['bind'], self.config['minport'],
self.config['maxport'])),
exc_info=exc_info)
def shutdown(self):
df = launch_coroutine(wrap_task(self.rawserver.add_task), self._shutdown)
df.addErrback(lambda f : self.logger.error('shutdown failed!',
exc_info=f.exc_info()))
return df
def _shutdown(self):
self.choker.shutdown()
self.singleport_listener.close_sockets()
for t in self.torrents.itervalues():
try:
df = t.shutdown()
yield df
df.getResult()
totals = t.get_total_transfer()
t.uptotal = t.uptotal_old + totals[0]
t.downtotal = t.downtotal_old + totals[1]
except:
t.logger.debug("Torrent shutdown failed in state: %s", t.state)
print "Torrent shutdown failed in state:", t.state
traceback.print_exc()
# the filepool must be shut down after the torrents,
# or pending ops could never complete
self.filepool_doneflag.set()
if self.resume_from_torrent_config:
self._dump_torrents()
def set_option(self, option, value, infohash=None, dump=True):
if infohash is not None:
t = self.get_torrent(infohash)
t.config[option] = value
if dump:
t._dump_torrent_config()
else:
self.config[option] = value
if dump:
self._dump_global_config()
if option in ['max_upload_rate', 'upload_unit_size']:
self.up_ratelimiter.set_parameters(self.config['max_upload_rate'],
self.config['upload_unit_size'])
elif option == 'max_download_rate':
self.down_ratelimiter.set_parameters(
self.config['max_download_rate'])
#pass # polled from the config automatically by MultiDownload
elif option == 'max_files_open':
self.filepool.set_max_files_open(value)
elif option == 'maxport':
if not self.config['minport'] <= self.singleport_listener.port <= \
self.config['maxport']:
self._find_port()
def add_policy(self, policy):
self.policies.append(policy)
def add_auto_update_policy(self, policy):
self.add_policy(policy)
self.auto_update_policy_index = self.policies.index(policy)
def global_error(self, severity, message, exc_info=None):
self.logger.log(severity, message, exc_info=exc_info)
def create_torrent_non_suck(self, torrent_filename, path_to_data,
hidden=False, feedback=None):
data = open(torrent_filename, 'rb').read()
metainfo = ConvertedMetainfo(bdecode(data))
return self.create_torrent(metainfo, path_to_data, path_to_data,
hidden=hidden, feedback=feedback)
def create_torrent(self, metainfo, save_incomplete_as, save_as,
hidden=False, is_auto_update=False, feedback=None):
if self.is_single_torrent and len(self.torrents) > 0:
raise TooManyTorrents(_("MultiTorrent is set to download only "
"a single torrent, but tried to create more than one."))
infohash = metainfo.infohash
if self.torrent_known(infohash):
if self.torrent_running(infohash):
msg = _("This torrent (or one with the same contents) is "
"already running.")
raise TorrentAlreadyRunning(msg)
else:
raise TorrentAlreadyInQueue(_("This torrent (or one with "
"the same contents) is "
"already waiting to run."))
self._dump_metainfo(metainfo)
#BUG. Use _read_torrent_config for 5.0? --Dave
config = configfile.read_torrent_config(self.config,
self.data_dir,
infohash,
lambda s : self.global_error(logging.ERROR, s))
t = Torrent(metainfo, save_incomplete_as, save_as, self.config,
self.data_dir, self.rawserver, self.choker,
self.singleport_listener, self.up_ratelimiter,
self.down_ratelimiter, self.total_downmeasure,
self.filepool, self.dht, self,
self.log_root, hidden=hidden,
is_auto_update=is_auto_update)
if feedback:
t.add_feedback(feedback)
retdf = Deferred()
def torrent_started(*args):
if config:
t.update_config(config)
t._dump_torrent_config()
if self.resume_from_torrent_config:
self._dump_torrents()
t.metainfo.show_encoding_errors(self.logger.log)
retdf.callback(t)
df = self._init_torrent(t, use_policy=False)
df.addCallback(torrent_started)
return retdf
def remove_torrent(self, ihash, del_files=False):
# this feels redundant. the torrent will stop the download itself,
# can't we accomplish the rest through a callback or something?
if self.torrent_running(ihash):
self.stop_torrent(ihash)
t = self.torrents[ihash]
# super carefully determine whether these are really incomplete files
fs_save_incomplete_in, junk = encode_for_filesystem(
self.config['save_incomplete_in']
)
inco = ((not t.completed) and
(t.working_path != t.destination_path) and
t.working_path.startswith(fs_save_incomplete_in))
del_files = del_files and inco
df = t.shutdown()
df.addCallback(lambda *args: t.remove_state_files(del_files=del_files))
if ihash in self.running:
del self.running[ihash]
# give the torrent a blank feedback, so post-mortem errors don't
# confuse multitorrent
t.feedback = Feedback()
del self.torrents[ihash]
if self.resume_from_torrent_config:
self._dump_torrents()
return df
def reinitialize_torrent(self, infohash):
t = self.get_torrent(infohash)
if self.torrent_running(infohash):
assert t.is_running(), "torrent not running, but in running set"
raise TorrentAlreadyRunning(infohash.encode("hex"))
assert t.state == "failed", "state not failed"
df = self._init_torrent(t, use_policy=False)
return df
def start_torrent(self, infohash):
if self.is_single_torrent and len(self.torrents) > 1:
raise TooManyTorrents(_("MultiTorrent is set to download only "
"a single torrent, but tried to create more than one."))
t = self.get_torrent(infohash)
if self.torrent_running(infohash):
assert t.is_running()
raise TorrentAlreadyRunning(infohash.encode("hex"))
if not t.is_initialized():
raise TorrentNotInitialized(infohash.encode("hex"))
t.logger.debug("starting torrent")
self.running[infohash] = t
t.start_download()
t._dump_torrent_config()
return t.state
def stop_torrent(self, infohash, pause=False):
if not self.torrent_running(infohash):
raise TorrentNotRunning()
t = self.get_torrent(infohash)
assert t.is_running()
t.logger.debug("stopping torrent")
t.stop_download(pause=pause)
del self.running[infohash]
t._dump_torrent_config()
return t.state
def torrent_status(self, infohash, spew=False, fileinfo=False):
torrent = self.get_torrent(infohash)
status = torrent.get_status(spew, fileinfo)
return torrent, status
def get_torrent(self, infohash):
try:
t = self.torrents[infohash]
except KeyError:
raise UnknownInfohash(infohash.encode("hex"))
return t
def get_torrents(self):
return self.torrents.values()
def get_running(self):
return self.running.keys()
def get_visible_torrents(self):
return [t for t in self.torrents.values() if not t.hidden]
def get_visible_running(self):
return [i for i in self.running.keys() if not self.torrents[i].hidden]
def torrent_running(self, ihash):
return ihash in self.running
def torrent_known(self, ihash):
return ihash in self.torrents
def pause(self):
for i in self.running.keys():
self.stop_torrent(i, pause=True)
def unpause(self):
for i in [t.metainfo.infohash for t in self.torrents.values() if t.is_initialized()]:
self.start_torrent(i)
def set_file_priority(self, infohash, filename, priority):
torrent = self.get_torrent(infohash)
if torrent is None or not self.torrent_running(infohash):
return
torrent.set_file_priority(filename, priority)
def set_torrent_priority(self, infohash, priority):
torrent = self.get_torrent(infohash)
if torrent is None:
return
torrent.priority = priority
torrent._dump_torrent_config()
def set_torrent_policy(self, infohash, policy):
torrent = self.get_torrent(infohash)
if torrent is None:
return
torrent.policy = policy
torrent._dump_torrent_config()
def get_all_rates(self):
rates = {}
for infohash, torrent in self.torrents.iteritems():
rates[infohash] = (torrent.get_uprate() or 0,
torrent.get_downrate() or 0)
return rates
def get_variance(self):
return self.bandwidth_manager.current_std, self.bandwidth_manager.max_std
def get_total_rates(self):
u = 0.0
d = 0.0
for torrent in self.torrents.itervalues():
u += torrent.get_uprate() or 0
d += torrent.get_downrate() or 0
return u, d
def get_total_totals(self):
u = 0.0
d = 0.0
for torrent in self.torrents.itervalues():
u += torrent.get_uptotal() or 0
d += torrent.get_downtotal() or 0
return u, d
def auto_update_status(self):
if self.auto_update_policy_index is not None:
aub = self.policies[self.auto_update_policy_index]
return aub.get_auto_update_status()
return None, None, None
def remove_auto_updates_except(self, infohash):
for t in self.torrents.values():
if t.is_auto_update and t.metainfo.infohash != infohash:
self.logger.warning(_("Cleaning up old autoupdate %s") % t.metainfo.name)
self.remove_torrent(t.metainfo.infohash, del_files=True)
## singletorrent callbacks
def started(self, torrent):
torrent.logger.debug("started torrent")
assert torrent.infohash in self.torrents
torrent._dump_torrent_config()
for policy in self.policies:
policy.started(torrent)
def failed(self, torrent):
torrent.logger.debug("torrent failed")
if torrent.infohash not in self.running:
return
del self.running[torrent.infohash]
t = self.get_torrent(torrent.infohash)
for policy in self.policies:
policy.failed(t)
def finishing(self, torrent):
torrent.logger.debug("torrent finishing")
t = self.get_torrent(torrent.infohash)
def finished(self, torrent):
torrent.logger.debug("torrent finished")
t = self.get_torrent(torrent.infohash)
t._dump_torrent_config()
for policy in self.policies:
policy.finished(t)
def exception(self, torrent, text):
torrent.logger.debug("torrent threw exception: " + text)
if torrent.infohash not in self.torrents:
return
for policy in self.policies:
policy.exception(torrent, text)
def error(self, torrent, level, text):
torrent.logger.log(level, text)
if torrent.infohash not in self.torrents:
return
for policy in self.policies:
policy.error(torrent, level, text)
### persistence
## These should be the .torrent file!
#################
def _dump_metainfo(self, metainfo):
infohash = metainfo.infohash
path = os.path.join(self.data_dir, 'metainfo',
infohash.encode('hex'))
f = file(path+'.new', 'wb')
f.write(metainfo.to_data())
f.close()
shutil.move(path+'.new', path)
def _read_metainfo(self, infohash):
path = os.path.join(self.data_dir, 'metainfo',
infohash.encode('hex'))
f = file(path, 'rb')
data = f.read()
f.close()
return ConvertedMetainfo(bdecode(data))
#################
def _read_torrent_config(self, infohash):
path = os.path.join(self.data_dir, 'torrents', infohash.encode('hex'))
if not os.path.exists(path):
raise BTFailure,_("Coult not open the torrent config: " + infohash.encode('hex'))
f = file(path, 'rb')
data = f.read()
f.close()
try:
torrent_config = cPickle.loads(data)
except:
# backward compatibility with <= 4.9.3
torrent_config = bdecode(data)
for k, v in torrent_config.iteritems():
try:
torrent_config[k] = v.decode('utf8')
if k in ('destination_path', 'working_path'):
torrent_config[k] = encode_for_filesystem(torrent_config[k])[0]
except:
pass
if not torrent_config.get('destination_path'):
raise BTFailure( _("Invalid torrent config file"))
if not torrent_config.get('working_path'):
raise BTFailure( _("Invalid torrent config file"))
if get_filesystem_encoding() == None:
# These paths should both be unicode. If they aren't, they are the
# broken product of some old version, and probably are in the
# encoding we used to use in config files. Attempt to recover.
dp = torrent_config['destination_path']
if isinstance(dp, str):
try:
dp = dp.decode(old_broken_config_subencoding)
torrent_config['destination_path'] = dp
except:
raise BTFailure( _("Invalid torrent config file"))
wp = torrent_config['working_path']
if isinstance(wp, str):
try:
wp = wp.decode(old_broken_config_subencoding)
torrent_config['working_path'] = wp
except:
raise BTFailure( _("Invalid torrent config file"))
return torrent_config
def _dump_global_config(self):
# BUG: we can save to different sections later
section = 'bittorrent'
configfile.save_global_config(self.config, section,
lambda *e : self.logger.error(*e))
def _dump_torrents(self):
assert self.resume_from_torrent_config
self.last_save_time = bttime()
r = []
def write_entry(infohash, t):
r.append(' '.join((infohash.encode('hex'),
str(t.uptotal), str(t.downtotal))))
r.append('BitTorrent UI state file, version 5')
r.append('Queued torrents')
for t in self.torrents.values():
write_entry(t.metainfo.infohash, self.torrents[t.metainfo.infohash])
r.append('End')
f = None
try:
path = os.path.join(self.data_dir, 'ui_state')
f = file(path+'.new', 'wb')
f.write('\n'.join(r) + '\n')
f.close()
shutil.move(path+'.new', path)
except Exception, e:
self.logger.error(_("Could not save UI state: ") + str_exc(e))
if f is not None:
f.close()
def _init_torrent(self, t, initialize=True, use_policy=True):
self.torrents[t.infohash] = t
if not initialize:
t.logger.debug("created torrent")
return
t.logger.debug("created torrent, initializing")
df = t.initialize()
if use_policy and t.policy == "start":
df.addCallback(lambda r, t: self.start_torrent(t.infohash), t)
return df
def initialize_torrents(self):
df = launch_coroutine(wrap_task(self.rawserver.add_task), self._initialize_torrents)
df.addErrback(lambda f : self.logger.error('initialize_torrents failed!',
exc_info=f.exc_info()))
return df
def _initialize_torrents(self):
self.logger.debug("initializing torrents")
for t in copy(self.torrents).itervalues():
if t in self.torrents.values() and t.state == "created":
df = self._init_torrent(t)
# HACK
#yield df
#df.getResult()
# this function is so nasty!
def _restore_state(self, init_torrents):
def decode_line(line):
hashtext = line[:40]
try:
infohash = InfoHashType(hashtext.decode('hex'))
except:
raise BTFailure(_("Invalid state file contents"))
if len(infohash) != 20:
raise BTFailure(_("Invalid state file contents"))
if infohash in self.torrents:
raise BTFailure(_("Invalid state file (duplicate entry)"))
try:
metainfo = self._read_metainfo(infohash)
except OSError, e:
try:
f.close()
except:
pass
self.logger.error((_("Error reading metainfo file \"%s\".") %
hashtext) + " (" + str_exc(e)+ "), " +
_("cannot restore state completely"))
return None
except Exception, e:
self.logger.error((_("Corrupt data in metainfo \"%s\", cannot restore torrent.") % hashtext) +
'('+str_exc(e)+')')
return None
b = encode_for_filesystem(u'')[0]
t = Torrent(metainfo, b, b, self.config, self.data_dir,
self.rawserver, self.choker,
self.singleport_listener, self.up_ratelimiter,
self.down_ratelimiter,
self.total_downmeasure, self.filepool, self.dht, self,
self.log_root)
t.metainfo.reported_errors = True # suppress redisplay on restart
if infohash != t.metainfo.infohash:
self.logger.error((_("Corrupt data in \"%s\", cannot restore torrent.") % hashtext) +
_("(infohash mismatch)"))
return None
if len(line) == 41:
t.working_path = None
t.destination_path = None
return infohash, t
try:
if version < 2:
t.working_path = line[41:-1].decode('string_escape')
t.working_path = t.working_path.decode('utf-8')
t.working_path = encode_for_filesystem(t.working_path)[0]
t.destination_path = t.working_path
elif version == 3:
up, down, working_path = line[41:-1].split(' ', 2)
t.uptotal = t.uptotal_old = int(up)
t.downtotal = t.downtotal_old = int(down)
t.working_path = working_path.decode('string_escape')
t.working_path = t.working_path.decode('utf-8')
t.working_path = encode_for_filesystem(t.working_path)[0]
t.destination_path = t.working_path
elif version >= 4:
up, down = line[41:-1].split(' ', 1)
t.uptotal = t.uptotal_old = int(up)
t.downtotal = t.downtotal_old = int(down)
except ValueError: # unpack, int(), decode()
raise BTFailure(_("Invalid state file (bad entry)"))
torrent_config = self.config
try:
if version < 5:
torrent_config = configfile.read_torrent_config(
self.config,
self.data_dir,
infohash,
lambda s : self.global_error(logging.ERROR, s))
else:
torrent_config = self._read_torrent_config(infohash)
t.update_config(torrent_config)
except BTFailure, e:
self.logger.error("Read torrent config failed",
exc_info=sys.exc_info())
# if read_torrent_config fails then ignore the torrent...
return None
return infohash, t
# BEGIN _restore_state
assert self.resume_from_torrent_config
filename = os.path.join(self.data_dir, 'ui_state')
if not os.path.exists(filename):
return
f = None
try:
f = file(filename, 'rb')
lines = f.readlines()
f.close()
except Exception, e:
if f is not None:
f.close()
raise BTFailure(str_exc(e))
i = iter(lines)
try:
txt = 'BitTorrent UI state file, version '
version = i.next()
if not version.startswith(txt):
raise BTFailure(_("Bad UI state file"))
try:
version = int(version[len(txt):-1])
except:
raise BTFailure(_("Bad UI state file version"))
if version > 5:
raise BTFailure(_("Unsupported UI state file version (from "
"newer client version?)"))
if version < 3:
if i.next() != 'Running/queued torrents\n':
raise BTFailure(_("Invalid state file contents"))
else:
if i.next() != 'Running torrents\n' and version != 5:
raise BTFailure(_("Invalid state file contents"))
while version < 5:
line = i.next()
if line == 'Queued torrents\n':
break
t = decode_line(line)
if t is None:
continue
infohash, t = t
df = self._init_torrent(t, initialize=init_torrents)
while True:
line = i.next()
if (version < 5 and line == 'Known torrents\n') or (version == 5 and line == 'End\n'):
break
t = decode_line(line)
if t is None:
continue
infohash, t = t
if t.destination_path is None:
raise BTFailure(_("Invalid state file contents"))
df = self._init_torrent(t, initialize=init_torrents)
while version < 5:
line = i.next()
if line == 'End\n':
break
t = decode_line(line)
if t is None:
continue
infohash, t = t
df = self._init_torrent(t, initialize=init_torrents)
except StopIteration:
raise BTFailure(_("Invalid state file contents"))
| mit |
MoguCloud/shadowsocks | shadowsocks/tcprelay.py | 922 | 28870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
| apache-2.0 |
dsfsdgsbngfggb/odoo | addons/event_sale/__openerp__.py | 306 | 2163 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Events Sales',
'version': '0.1',
'category': 'Tools',
'website' : 'https://www.odoo.com/page/events',
'description': """
Creating registration with sale orders.
=======================================
This module allows you to automate and connect your registration creation with
your main sale flow and therefore, to enable the invoicing feature of registrations.
It defines a new kind of service products that offers you the possibility to
choose an event category associated with it. When you encode a sale order for
that product, you will be able to choose an existing event of that category and
when you confirm your sale order it will automatically create a registration for
this event.
""",
'author': 'OpenERP SA',
'depends': ['event', 'sale_crm'],
'data': [
'event_sale_view.xml',
'event_sale_data.xml',
'event_sale_report.xml',
'views/report_registrationbadge.xml',
'security/ir.model.access.csv',
],
'demo': ['event_demo.xml'],
'test': ['test/confirm.yml'],
'installable': True,
'auto_install': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GroestlCoin/electrum-grs | electrum_grs/gui/kivy/uix/screens.py | 1 | 17538 | from weakref import ref
from decimal import Decimal
import re
import datetime
import traceback, sys
from kivy.app import App
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.compat import string_types
from kivy.properties import (ObjectProperty, DictProperty, NumericProperty,
ListProperty, StringProperty)
from kivy.uix.recycleview import RecycleView
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.factory import Factory
from kivy.utils import platform
from electrum_grs.util import profiler, parse_URI, format_time, InvalidPassword, NotEnoughFunds, Fiat
from electrum_grs import bitcoin
from electrum_grs.transaction import TxOutput, Transaction, tx_from_str
from electrum_grs.util import send_exception_to_crash_reporter, parse_URI, InvalidBitcoinURI
from electrum_grs.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum_grs.plugin import run_hook
from electrum_grs.wallet import InternalAddressCorruption
from electrum_grs import simple_config
from .context_menu import ContextMenu
from electrum_grs.gui.kivy.i18n import _
class HistoryRecycleView(RecycleView):
pass
class CScreen(Factory.Screen):
__events__ = ('on_activate', 'on_deactivate', 'on_enter', 'on_leave')
action_view = ObjectProperty(None)
loaded = False
kvname = None
context_menu = None
menu_actions = []
app = App.get_running_app()
def _change_action_view(self):
app = App.get_running_app()
action_bar = app.root.manager.current_screen.ids.action_bar
_action_view = self.action_view
if (not _action_view) or _action_view.parent:
return
action_bar.clear_widgets()
action_bar.add_widget(_action_view)
def on_enter(self):
# FIXME: use a proper event don't use animation time of screen
Clock.schedule_once(lambda dt: self.dispatch('on_activate'), .25)
pass
def update(self):
pass
@profiler
def load_screen(self):
self.screen = Builder.load_file('electrum_grs/gui/kivy/uix/ui_screens/' + self.kvname + '.kv')
self.add_widget(self.screen)
self.loaded = True
self.update()
setattr(self.app, self.kvname + '_screen', self)
def on_activate(self):
if self.kvname and not self.loaded:
self.load_screen()
#Clock.schedule_once(lambda dt: self._change_action_view())
def on_leave(self):
self.dispatch('on_deactivate')
def on_deactivate(self):
self.hide_menu()
def hide_menu(self):
if self.context_menu is not None:
self.remove_widget(self.context_menu)
self.context_menu = None
def show_menu(self, obj):
self.hide_menu()
self.context_menu = ContextMenu(obj, self.menu_actions)
self.add_widget(self.context_menu)
# note: this list needs to be kept in sync with another in qt
TX_ICONS = [
"unconfirmed",
"close",
"unconfirmed",
"close",
"clock1",
"clock2",
"clock3",
"clock4",
"clock5",
"confirmed",
]
class HistoryScreen(CScreen):
tab = ObjectProperty(None)
kvname = 'history'
cards = {}
def __init__(self, **kwargs):
self.ra_dialog = None
super(HistoryScreen, self).__init__(**kwargs)
self.menu_actions = [ ('Label', self.label_dialog), ('Details', self.show_tx)]
def show_tx(self, obj):
tx_hash = obj.tx_hash
tx = self.app.wallet.db.get_transaction(tx_hash)
if not tx:
return
self.app.tx_dialog(tx)
def label_dialog(self, obj):
from .dialogs.label_dialog import LabelDialog
key = obj.tx_hash
text = self.app.wallet.get_label(key)
def callback(text):
self.app.wallet.set_label(key, text)
self.update()
d = LabelDialog(_('Enter Transaction Label'), text, callback)
d.open()
def get_card(self, tx_hash, tx_mined_status, value, balance):
status, status_str = self.app.wallet.get_tx_status(tx_hash, tx_mined_status)
icon = "atlas://electrum_grs/gui/kivy/theming/light/" + TX_ICONS[status]
label = self.app.wallet.get_label(tx_hash) if tx_hash else _('Pruned transaction outputs')
ri = {}
ri['screen'] = self
ri['tx_hash'] = tx_hash
ri['icon'] = icon
ri['date'] = status_str
ri['message'] = label
ri['confirmations'] = tx_mined_status.conf
if value is not None:
ri['is_mine'] = value < 0
if value < 0: value = - value
ri['amount'] = self.app.format_amount_and_units(value)
if self.app.fiat_unit:
fx = self.app.fx
fiat_value = value / Decimal(bitcoin.COIN) * self.app.wallet.price_at_timestamp(tx_hash, fx.timestamp_rate)
fiat_value = Fiat(fiat_value, fx.ccy)
ri['quote_text'] = fiat_value.to_ui_string()
return ri
def update(self, see_all=False):
if self.app.wallet is None:
return
history = reversed(self.app.wallet.get_history())
history_card = self.screen.ids.history_container
history_card.data = [self.get_card(*item) for item in history]
class SendScreen(CScreen):
kvname = 'send'
payment_request = None
payment_request_queued = None
def set_URI(self, text):
if not self.app.wallet:
self.payment_request_queued = text
return
try:
uri = parse_URI(text, self.app.on_pr, loop=self.app.asyncio_loop)
except InvalidBitcoinURI as e:
self.app.show_info(_("Error parsing URI") + f":\n{e}")
return
amount = uri.get('amount')
self.screen.address = uri.get('address', '')
self.screen.message = uri.get('message', '')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.payment_request = None
self.screen.is_pr = False
def update(self):
if self.app.wallet and self.payment_request_queued:
self.set_URI(self.payment_request_queued)
self.payment_request_queued = None
def do_clear(self):
self.screen.amount = ''
self.screen.message = ''
self.screen.address = ''
self.payment_request = None
self.screen.is_pr = False
def set_request(self, pr):
self.screen.address = pr.get_requestor()
amount = pr.get_amount()
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
self.screen.message = pr.get_memo()
if pr.is_pr():
self.screen.is_pr = True
self.payment_request = pr
else:
self.screen.is_pr = False
self.payment_request = None
def do_save(self):
if not self.screen.address:
return
if self.screen.is_pr:
# it should be already saved
return
# save address as invoice
from electrum_grs.paymentrequest import make_unsigned_request, PaymentRequest
req = {'address':self.screen.address, 'memo':self.screen.message}
amount = self.app.get_amount(self.screen.amount) if self.screen.amount else 0
req['amount'] = amount
pr = make_unsigned_request(req).SerializeToString()
pr = PaymentRequest(pr)
self.app.wallet.invoices.add(pr)
self.app.show_info(_("Invoice saved"))
if pr.is_pr():
self.screen.is_pr = True
self.payment_request = pr
else:
self.screen.is_pr = False
self.payment_request = None
def do_paste(self):
data = self.app._clipboard.paste()
if not data:
self.app.show_info(_("Clipboard is empty"))
return
# try to decode as transaction
try:
raw_tx = tx_from_str(data)
tx = Transaction(raw_tx)
tx.deserialize()
except:
tx = None
if tx:
self.app.tx_dialog(tx)
return
# try to decode as URI/address
self.set_URI(data)
def do_send(self):
if self.screen.is_pr:
if self.payment_request.has_expired():
self.app.show_error(_('Payment request has expired'))
return
outputs = self.payment_request.get_outputs()
else:
address = str(self.screen.address)
if not address:
self.app.show_error(_('Recipient not specified.') + ' ' + _('Please scan a Groestlcoin address or a payment request'))
return
if not bitcoin.is_address(address):
self.app.show_error(_('Invalid Groestlcoin Address') + ':\n' + address)
return
try:
amount = self.app.get_amount(self.screen.amount)
except:
self.app.show_error(_('Invalid amount') + ':\n' + self.screen.amount)
return
outputs = [TxOutput(bitcoin.TYPE_ADDRESS, address, amount)]
message = self.screen.message
amount = sum(map(lambda x:x[2], outputs))
if self.app.electrum_config.get('use_rbf'):
from .dialogs.question import Question
d = Question(_('Should this transaction be replaceable?'), lambda b: self._do_send(amount, message, outputs, b))
d.open()
else:
self._do_send(amount, message, outputs, False)
def _do_send(self, amount, message, outputs, rbf):
# make unsigned transaction
config = self.app.electrum_config
coins = self.app.wallet.get_spendable_coins(None, config)
try:
tx = self.app.wallet.make_unsigned_transaction(coins, outputs, config, None)
except NotEnoughFunds:
self.app.show_error(_("Not enough funds"))
return
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.app.show_error(str(e))
return
if rbf:
tx.set_rbf(True)
fee = tx.get_fee()
msg = [
_("Amount to be sent") + ": " + self.app.format_amount_and_units(amount),
_("Mining fee") + ": " + self.app.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.app.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append(_("Additional fees") + ": " + self.app.format_amount_and_units(x_fee_amount))
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
msg.append(_("Enter your PIN code to proceed"))
self.app.protected('\n'.join(msg), self.send_tx, (tx, message))
def send_tx(self, tx, message, password):
if self.app.wallet.has_password() and password is None:
return
def on_success(tx):
if tx.is_complete():
self.app.broadcast(tx, self.payment_request)
self.app.wallet.set_label(tx.txid(), message)
else:
self.app.tx_dialog(tx)
def on_failure(error):
self.app.show_error(error)
if self.app.wallet.can_sign(tx):
self.app.show_info("Signing...")
self.app.sign_tx(tx, password, on_success, on_failure)
else:
self.app.tx_dialog(tx)
class ReceiveScreen(CScreen):
kvname = 'receive'
def update(self):
if not self.screen.address:
self.get_new_address()
else:
status = self.app.wallet.get_request_status(self.screen.address)
self.screen.status = _('Payment received') if status == PR_PAID else ''
def clear(self):
self.screen.address = ''
self.screen.amount = ''
self.screen.message = ''
def get_new_address(self) -> bool:
"""Sets the address field, and returns whether the set address
is unused."""
if not self.app.wallet:
return False
self.clear()
unused = True
try:
addr = self.app.wallet.get_unused_address()
if addr is None:
addr = self.app.wallet.get_receiving_address() or ''
unused = False
except InternalAddressCorruption as e:
addr = ''
self.app.show_error(str(e))
send_exception_to_crash_reporter(e)
self.screen.address = addr
return unused
def on_address(self, addr):
req = self.app.wallet.get_payment_request(addr, self.app.electrum_config)
self.screen.status = ''
if req:
self.screen.message = req.get('memo', '')
amount = req.get('amount')
self.screen.amount = self.app.format_amount_and_units(amount) if amount else ''
status = req.get('status', PR_UNKNOWN)
self.screen.status = _('Payment received') if status == PR_PAID else ''
Clock.schedule_once(lambda dt: self.update_qr())
def get_URI(self):
from electrum_grs.util import create_bip21_uri
amount = self.screen.amount
if amount:
a, u = self.screen.amount.split()
assert u == self.app.base_unit
amount = Decimal(a) * pow(10, self.app.decimal_point())
return create_bip21_uri(self.screen.address, amount, self.screen.message)
@profiler
def update_qr(self):
uri = self.get_URI()
qr = self.screen.ids.qr
qr.set_data(uri)
def do_share(self):
uri = self.get_URI()
self.app.do_share(uri, _("Share Groestlcoin Request"))
def do_copy(self):
uri = self.get_URI()
self.app._clipboard.copy(uri)
self.app.show_info(_('Request copied to clipboard'))
def save_request(self):
addr = self.screen.address
if not addr:
return False
amount = self.screen.amount
message = self.screen.message
amount = self.app.get_amount(amount) if amount else 0
req = self.app.wallet.make_payment_request(addr, amount, message, None)
try:
self.app.wallet.add_payment_request(req, self.app.electrum_config)
added_request = True
except Exception as e:
self.app.show_error(_('Error adding payment request') + ':\n' + str(e))
added_request = False
finally:
self.app.update_tab('requests')
return added_request
def on_amount_or_message(self):
Clock.schedule_once(lambda dt: self.update_qr())
def do_new(self):
is_unused = self.get_new_address()
if not is_unused:
self.app.show_info(_('Please use the existing requests first.'))
def do_save(self):
if self.save_request():
self.app.show_info(_('Request was saved.'))
class TabbedCarousel(Factory.TabbedPanel):
'''Custom TabbedPanel using a carousel used in the Main Screen
'''
carousel = ObjectProperty(None)
def animate_tab_to_center(self, value):
scrlv = self._tab_strip.parent
if not scrlv:
return
idx = self.tab_list.index(value)
n = len(self.tab_list)
if idx in [0, 1]:
scroll_x = 1
elif idx in [n-1, n-2]:
scroll_x = 0
else:
scroll_x = 1. * (n - idx - 1) / (n - 1)
mation = Factory.Animation(scroll_x=scroll_x, d=.25)
mation.cancel_all(scrlv)
mation.start(scrlv)
def on_current_tab(self, instance, value):
self.animate_tab_to_center(value)
def on_index(self, instance, value):
current_slide = instance.current_slide
if not hasattr(current_slide, 'tab'):
return
tab = current_slide.tab
ct = self.current_tab
try:
if ct.text != tab.text:
carousel = self.carousel
carousel.slides[ct.slide].dispatch('on_leave')
self.switch_to(tab)
carousel.slides[tab.slide].dispatch('on_enter')
except AttributeError:
current_slide.dispatch('on_enter')
def switch_to(self, header):
# we have to replace the functionality of the original switch_to
if not header:
return
if not hasattr(header, 'slide'):
header.content = self.carousel
super(TabbedCarousel, self).switch_to(header)
try:
tab = self.tab_list[-1]
except IndexError:
return
self._current_tab = tab
tab.state = 'down'
return
carousel = self.carousel
self.current_tab.state = "normal"
header.state = 'down'
self._current_tab = header
# set the carousel to load the appropriate slide
# saved in the screen attribute of the tab head
slide = carousel.slides[header.slide]
if carousel.current_slide != slide:
carousel.current_slide.dispatch('on_leave')
carousel.load_slide(slide)
slide.dispatch('on_enter')
def add_widget(self, widget, index=0):
if isinstance(widget, Factory.CScreen):
self.carousel.add_widget(widget)
return
super(TabbedCarousel, self).add_widget(widget, index=index)
| gpl-3.0 |
appliedx/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/split.py | 12 | 147531 | """
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import Path as path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index, bulk_write_record.course_key)
else:
self.db_connection.update_course_index(
bulk_write_record.index,
from_index=bulk_write_record.initial_index,
course_context=bulk_write_record.course_key
)
return dirty
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry, course_key)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry, course_key)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid, course_key)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid, course_key)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure, course_key)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid, course_key)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid, course_key)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = self.db_connection.get_definitions(list(ids), course_key)
# Add the retrieved definitions to the cache.
bulk_write_record.definitions.update({d.get('_id'): d for d in defs_from_db})
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition, course_key)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(branch, search_targets, org_target)
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
indexes[index] = altered_index
return
indexes.append(altered_index)
# add any being built but not yet persisted or in the process of being updated
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in search_targets.iteritems()
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if not hasattr(indexes, 'append'): # Just in time conversion to list from cursor
indexes = list(indexes)
_replace_or_append_index(record.index)
return indexes
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
self.db = self.db_connection.database
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
if self.request_cache is not None:
self.services["request_cache"] = self.request_cache
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db.connection.close()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.db.connection.max_wire_version
def _drop_database(self):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database()
connection = self.db.connection
connection.drop_database(self.db.name)
connection.close()
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in new_module_data.itervalues()
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in new_module_data.itervalues():
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
lazy = kwargs.pop('lazy', True)
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
# if we pass in a 'org' parameter that means to
# only get the course which match the passed in
# ORG
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org')
)
# collect ids and then query for those
version_guids = []
id_version_map = {}
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid] = course_index
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
yield entry, id_version_map[entry['_id']]
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course.structure['blocks'].iteritems():
if block_name == block_id.id and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
for block_id, value in course.structure['blocks'].iteritems():
if _block_matches_all(value):
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator, course_context=None):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id, course_context)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=struct['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in result.iteritems():
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in new_def_data.iteritems():
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in old_definition.get('fields', {}).iteritems():
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(
self, user_id, course_key, block_type, block_id=None,
definition_locator=None, fields=None,
force=False, **kwargs
):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if definition_locator is None or isinstance(definition_locator.definition_id, LocalId):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(
self, user_id, course_key, block_key, partitioned_fields,
definition_locator, allow_not_found, force, **kwargs
):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, block_id=block_key.id, fields=fields, force=force,
)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
# if updated, rev the structure
if is_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
new_id = new_structure['_id']
# source_version records which revision a block was copied from. In this method, we're updating
# the block, so it's no longer a direct copy, and we can remove the source_version reference.
block_data.edit_info.source_version = None
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
# pylint: disable=unused-argument
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id) # pylint: disable=protected-access
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = original_fields.keys()
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = settings.keys()
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
unicode(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
new_block_info.fields = existing_block_info.fields # Preserve any existing overrides
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = unicode(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete a block that does not exist")
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
# remove the source_version reference
parent_block.edit_info.source_version = None
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
return result
@contract(block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, block_key, blocks):
"""
Remove the subtree rooted at block_key
"""
for child in blocks[block_key].fields.get('children', []):
self._remove_subtree(BlockKey(*child), blocks)
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
self._emit_course_deleted_signal(course_key)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = all_assets_updated.as_list()
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in assets_by_type.iteritems():
new_structure['assets'][asset_type] = assets.as_list()
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = mdata.to_storable()
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in new_structure['blocks'].itervalues():
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in output_fields.iteritems():
if value:
field = xblock_class.fields.get(field_name)
if field is None:
continue
elif isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if is_head or force:
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in fields.iteritems():
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, basestring):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in structure['blocks'].iteritems()
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in new_block.edit_info.to_storable().iteritems():
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# If the block we are copying from was itself a copy, then just
# reference the original source, rather than the copy.
destination_block.edit_info.source_version = (
new_block.edit_info.source_version or new_block.edit_info.update_version
)
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
for child in structure['blocks'][orphan].fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
del structure['blocks'][orphan]
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=self.services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
| agpl-3.0 |
ltilve/ChromiumGStreamerBackend | tools/telemetry/third_party/gsutilz/gslib/tests/test_perfdiag.py | 14 | 5038 | # -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for perfdiag command."""
from __future__ import absolute_import
import os
import socket
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import unittest
from gslib.util import IS_WINDOWS
class TestPerfDiag(testcase.GsUtilIntegrationTestCase):
"""Integration tests for perfdiag command."""
# We want to test that perfdiag works both when connecting to the standard gs
# endpoint, and when connecting to a specific IP or host while setting the
# host header. For the 2nd case we resolve storage.googleapis.com to a
# specific IP and connect to that explicitly.
_gs_ip = socket.gethostbyname('storage.googleapis.com')
_custom_endpoint_flags = [
'-o', 'Credentials:gs_host=' + _gs_ip,
'-o', 'Credentials:gs_host_header=storage.googleapis.com',
# TODO: gsutil-beta: Add host header support for JSON
'-o', 'Boto:https_validate_certificates=False']
def _should_run_with_custom_endpoints(self):
# Host headers are only supported for XML, and not when
# using environment variables for proxies.
return self.test_api == 'XML' and not (os.environ.get('http_proxy') or
os.environ.get('https_proxy') or
os.environ.get('HTTPS_PROXY'))
def test_latency(self):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', '1', '-t', 'lat', suri(bucket_uri)]
self.RunGsUtil(cmd)
if self._should_run_with_custom_endpoints():
self.RunGsUtil(self._custom_endpoint_flags + cmd)
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
def _run_basic_wthru_or_rthru(self, test_name, num_processes, num_threads):
bucket_uri = self.CreateBucket()
cmd = ['perfdiag', '-n', str(num_processes * num_threads),
'-s', '1024', '-c', str(num_processes),
'-k', str(num_threads), '-t', test_name, suri(bucket_uri)]
self.RunGsUtil(cmd)
if self._should_run_with_custom_endpoints():
self.RunGsUtil(self._custom_endpoint_flags + cmd)
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
def test_write_throughput_single_process_single_thread(self):
self._run_basic_wthru_or_rthru('wthru', 1, 1)
def test_write_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_write_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('wthru', 2, 2)
def test_read_throughput_single_process_single_thread(self):
self._run_basic_wthru_or_rthru('rthru', 1, 1)
def test_read_throughput_single_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 1, 2)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_single_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 1)
@unittest.skipIf(IS_WINDOWS, 'Multiprocessing is not supported on Windows')
def test_read_throughput_multi_process_multi_thread(self):
self._run_basic_wthru_or_rthru('rthru', 2, 2)
def test_input_output(self):
outpath = self.CreateTempFile()
bucket_uri = self.CreateBucket()
self.RunGsUtil(['perfdiag', '-o', outpath, '-n', '1', '-t', 'lat',
suri(bucket_uri)])
self.RunGsUtil(['perfdiag', '-i', outpath])
def test_invalid_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', 'foo', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Invalid -s', stderr)
def test_toobig_size(self):
stderr = self.RunGsUtil(
['perfdiag', '-n', '1', '-s', '3pb', '-t', 'wthru', 'gs://foobar'],
expected_status=1, return_stderr=True)
self.assertIn('Maximum throughput file size', stderr)
def test_listing(self):
bucket_uri = self.CreateBucket()
stdout = self.RunGsUtil(
['perfdiag', '-n', '1', '-t', 'list', suri(bucket_uri)],
return_stdout=True)
self.assertIn('Number of listing calls made:', stdout)
self.AssertNObjectsInBucket(bucket_uri, 0, versioned=True)
| bsd-3-clause |
tony810430/flink | flink-python/setup.py | 2 | 14665 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import platform
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 6):
print("Python versions prior to 3.6 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def copy_files(src_paths, output_directory):
for src_path, file_mode in src_paths:
if os.path.isdir(src_path):
child_files = os.listdir(src_path)
for child_file in child_files:
dst_path = copy(os.path.join(src_path, child_file), output_directory)
os.chmod(dst_path, file_mode)
else:
dst_path = copy(src_path, os.path.join(output_directory, os.path.basename(src_path)))
os.chmod(dst_path, file_mode)
def has_unsupported_tag(file_element):
unsupported_tags = ['includes', 'exclueds']
for unsupported_tag in unsupported_tags:
if file_element.getElementsByTagName(unsupported_tag):
print('Unsupported <{0}></{1}> tag'.format(unsupported_tag, unsupported_tag))
return True
return False
def extracted_output_files(base_dir, file_path, output_directory):
extracted_file_paths = []
from xml.dom.minidom import parse
dom = parse(file_path)
root_data = dom.documentElement
file_elements = (root_data.getElementsByTagName("files")[0]).getElementsByTagName("file")
# extracted <files><file></file></files>
for file_element in file_elements:
source = ((file_element.getElementsByTagName('source')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
# extracted <fileSets><fileSet></fileSet></fileSets>
file_elements = (root_data.getElementsByTagName("fileSets")[0]).getElementsByTagName("fileSet")
for file_element in file_elements:
source = ((file_element.getElementsByTagName('directory')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
return extracted_file_paths
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream",
sources=["pyflink/fn_execution/beam/beam_stream.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
])
except ImportError:
if os.path.exists("pyflink/fn_execution/coder_impl_fast.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream",
sources=["pyflink/fn_execution/beam/beam_stream.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
FLINK_ROOT = os.path.abspath("..")
FLINK_DIST = os.path.join(FLINK_ROOT, "flink-dist")
FLINK_BIN = os.path.join(FLINK_DIST, "src/main/flink-bin")
EXAMPLES_PATH = os.path.join(this_directory, "pyflink/table/examples")
LICENSE_FILE_PATH = os.path.join(FLINK_ROOT, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_BIN, "README.txt")
FLINK_BIN_XML_FILE = os.path.join(FLINK_BIN, '../assemblies/bin.xml')
# copy conf files
os.mkdir(CONF_TEMP_PATH)
conf_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'conf')
copy_files(conf_paths, CONF_TEMP_PATH)
# copy bin files
os.mkdir(SCRIPTS_TEMP_PATH)
script_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'bin')
copy_files(script_paths, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
try:
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
except BaseException: # pylint: disable=broad-except
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
else:
if not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
if VERSION.find('dev0') != -1:
apache_flink_libraries_dependency = 'apache-flink-libraries==%s' % VERSION
else:
split_versions = VERSION.split('.')
split_versions[-1] = str(int(split_versions[-1]) + 1)
NEXT_VERSION = '.'.join(split_versions)
apache_flink_libraries_dependency = 'apache-flink-libraries>=%s,<%s' % \
(VERSION, NEXT_VERSION)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.fn_execution.beam',
'pyflink.fn_execution.datastream',
'pyflink.fn_execution.table',
'pyflink.fn_execution.utils',
'pyflink.metrics',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.bin']
PACKAGE_DIR = {
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.bin': ['*']}
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='dev@flink.apache.org',
python_requires='>=3.6',
install_requires=['py4j==0.10.8.1', 'python-dateutil==2.8.0', 'apache-beam==2.27.0',
'cloudpickle==1.2.2', 'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'pandas>=1.0,<1.2.0', 'pyarrow>=0.15.1,<3.0.0',
'pytz>=2018.3', 'numpy>=1.14.3,<1.20', 'fastavro>=0.21.4,<0.24',
apache_flink_libraries_dependency],
cmdclass={'build_ext': build_ext},
tests_require=['pytest==4.4.1'],
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| apache-2.0 |
katyhuff/moose | gui/gui/InputFileWidget.py | 22 | 10291 | #!/usr/bin/python
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
from GenSyntax import *
from ActionSyntax import *
from YamlData import *
from GetPotData import *
from InputFileTreeWidget import *
from InputFileTextbox import *
from ParseGetPot import readInputFile, GPNode
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class InputFileWidget(QtGui.QWidget):
directory_changed = QtCore.Signal()
input_file_opened = QtCore.Signal()
def __init__(self, app_path, options, peacock_ui, qt_app, application, win_parent=None):
QtGui.QWidget.__init__(self, win_parent)
self.app_path = app_path
self.options = options
self.peacock_ui = peacock_ui
self.qt_app = qt_app
self.application = application
self.yaml_data = None
self.recache()
self.action_syntax = ActionSyntax(app_path, options.use_cached_syntax)
# Start with an input file template if this application has one
input_file_template_name = os.path.dirname(app_path) + '/input_template'
self.input_file_template_root_node = None
if os.path.isfile(input_file_template_name):
self.input_file_template_root_node = readInputFile(input_file_template_name)
self.input_file_template_getpot_data = GetPotData(self.input_file_template_root_node, self)
else: # If they haven't specified their own template... let's use a default one:
input_file_template_name = os.path.dirname(os.path.realpath(sys.argv[0])) + '/input_template'
self.input_file_template_root_node = readInputFile(input_file_template_name)
self.input_file_template_getpot_data = GetPotData(self.input_file_template_root_node, self)
self.input_file_root_node = None
self.constructed_data = {}
self.initUI()
if options.input_file:
abs_input_file = os.path.abspath(options.input_file)
if os.path.isfile(abs_input_file):
self.openInputFile(abs_input_file)
else:
msgBox = QtGui.QMessageBox()
msgBox.setText("Warning: Input file, " + options.input_file + ", not found!")
msgBox.setStandardButtons(QtGui.QMessageBox.Ok)
msgBox.setDefaultButton(QtGui.QMessageBox.Ok)
msgBox.exec_()
self.modifyUI()
''' This will be called after the interface is completely setup to allow an application to modify this tab '''
def modifyUI(self):
pass
def initUI(self):
# Just a holder so the edit param_widget can go in where we want
self.edit_param_layout_spot = QtGui.QVBoxLayout()
self.tree_widget_layout_widget = QtGui.QWidget()
self.tree_widget_layout = QtGui.QVBoxLayout()
self.tree_widget_layout_widget.setLayout(self.tree_widget_layout)
self.layoutH = QtGui.QHBoxLayout()
self.layout_with_textbox = QtGui.QSplitter()
# self.layout_with_textbox.setChildrenCollapsible(False)
self.input_file_textbox = InputFileTextbox(self)
# self.input_file_textbox.hide()
self.tree_widget = InputFileTreeWidget(self)
self.tree_widget_layout.addWidget(self.tree_widget)
self.init_buttons(self.layoutH)
self.tree_widget_layout.addLayout(self.layoutH)
self.layout_with_textbox.addWidget(self.tree_widget_layout_widget)
# self.layout_with_textbox.addLayout(self.edit_param_layout_spot)
self.mesh_render_widget = self.application.meshRenderWidget(self)
if not self.application.showMeshRenderWidgetByDefault():
self.mesh_render_widget.hide()
self.layout_with_textbox.addWidget(self.mesh_render_widget)
self.input_file_textbox_layout_widget = QtGui.QWidget()
self.input_file_textbox_layout_widget.setLayout(self.input_file_textbox.getLayout())
self.layout_with_textbox.addWidget(self.input_file_textbox_layout_widget)
self.layout_with_textbox.setStretchFactor(0,0.1)
self.layout_with_textbox.setStretchFactor(1,0.9)
self.layout_with_textbox.setStretchFactor(1,0.2)
self.layout_with_textbox.setSizes([30,600,0])
self.main_layout = QtGui.QHBoxLayout()
self.main_layout.addWidget(self.layout_with_textbox)
self.setLayout(self.main_layout)
self.menubar = self.peacock_ui.menuBar()
# build menu
self.file_menu = self.menubar.addMenu('&File')
open_file_action = QtGui.QAction("Open...", self)
open_file_action.setShortcut('Ctrl+O')
open_file_action.triggered.connect(self.click_open)
self.file_menu.addAction(open_file_action)
save_file_action = QtGui.QAction("Save...", self)
save_file_action.setShortcut('Ctrl+S')
save_file_action.triggered.connect(self.click_save)
self.file_menu.addAction(save_file_action)
self.edit_menu = self.menubar.addMenu('&Edit')
main_comment_action = QtGui.QAction("Main Comment", self)
main_comment_action.triggered.connect(self._edit_main_comment)
self.edit_menu.addAction(main_comment_action)
self.view_menu = self.menubar.addMenu('&View')
input_file_action = QtGui.QAction("Input File", self)
input_file_action.triggered.connect(self._view_input_file)
self.view_menu.addAction(input_file_action)
mesh_view_action = QtGui.QAction("Mesh View", self)
mesh_view_action.triggered.connect(self._view_mesh_view)
self.view_menu.addAction(mesh_view_action)
''' Return the name to use for this tab '''
def name(self):
return 'Input File'
def init_buttons(self, layout):
self.buttonOpen = QtGui.QPushButton("Open")
self.buttonOpen.setToolTip("Open existing input file")
self.buttonSave = QtGui.QPushButton("Save")
self.buttonSave.setToolTip("Save current tree items to an input file")
self.buttonClear = QtGui.QPushButton("Clear")
self.buttonClear.setToolTip("Clear the current tree items")
QtCore.QObject.connect(self.buttonOpen, QtCore.SIGNAL("clicked()"), self.click_open)
QtCore.QObject.connect(self.buttonSave, QtCore.SIGNAL("clicked()"), self.click_save)
QtCore.QObject.connect(self.buttonClear, QtCore.SIGNAL("clicked()"), self.click_clear)
layout.addWidget(self.buttonOpen)
layout.addWidget(self.buttonSave)
self.application.addRelapSave(layout)
layout.addWidget(self.buttonClear)
def getOutputFileAndBlockNames(self):
return self.tree_widget.getOutputFileAndBlockNames()
def openInputFile(self, file_name):
if file_name and file_name != '':
progress = QtGui.QProgressDialog("Reading Input File...", "Abort", 0, 100, self)
progress.setWindowModality(QtCore.Qt.WindowModal)
counter = 0
counter+=1
progress.setValue(counter)
# Clear the tree
self.tree_widget.clear()
counter+=1
progress.setValue(counter)
self.tree_widget.addHardPathsToTree()
counter+=1
progress.setValue(counter)
os.chdir(os.path.dirname(str(file_name)))
self.directory_changed.emit()
counter+=1
progress.setValue(counter)
try:
self.input_file_root_node = readInputFile(file_name)
except Exception as e:
print '\nError parsing input file: \n', e.msg, '\n'
raise e
self.input_file_getpot_data = GetPotData(self.input_file_root_node, self)
counter+=1
progress.setValue(counter)
main_comment = '\n'.join(self.input_file_root_node.comments)
self.tree_widget.comment = main_comment
main_sections = self.input_file_root_node.children
self.tree_widget.loadData(counter, progress, main_sections)
self.input_file_opened.emit()
def click_open(self):
file_name = QtGui.QFileDialog.getOpenFileName(self, "Open Input File", "~/", "Input Files (*.i)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name:
self.tree_widget.clear()
self.tree_widget.addHardPathsToTree()
self.openInputFile(file_name)
def click_clear(self):
msgBox = QtGui.QMessageBox()
msgBox.setText("Clear Tree?")
msgBox.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
msgBox.setDefaultButton(QtGui.QMessageBox.No)
ret = msgBox.exec_()
if ret == QtGui.QMessageBox.Yes:
self.tree_widget.clear()
self.tree_widget.addHardPathsToTree()
def click_save(self):
file_name = QtGui.QFileDialog.getSaveFileName(self, "Save Input File", "~/", "Input Files (*.i)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name != '':
file = open(file_name,'w')
output_string = self.input_file_textbox.buildInputString()
file.write(output_string)
os.chdir(os.path.dirname(str(file_name)))
self.directory_changed.emit()
def _edit_main_comment(self):
ce = CommentEditor(self.tree_widget)
if ce.exec_():
self.tree_widget._itemChanged(self.tree_widget, 0)
def _view_input_file(self):
if self.input_file_textbox.isVisible():
self.input_file_textbox.hide()
sizes = self.layout_with_textbox.sizes()
sizes[2] = 0
self.layout_with_textbox.setSizes(sizes)
else:
self.input_file_textbox.show()
sizes = self.layout_with_textbox.sizes()
sizes[2] = 50
self.layout_with_textbox.setSizes(sizes)
def _view_mesh_view(self):
if self.mesh_render_widget.isVisible():
self.mesh_render_widget.hide()
sizes = self.layout_with_textbox.sizes()
sizes[1] = 0
self.layout_with_textbox.setSizes(sizes)
else:
self.mesh_render_widget.show()
sizes = self.layout_with_textbox.sizes()
sizes[1] = 600
self.layout_with_textbox.setSizes(sizes)
def _selected_recache(self):
self.recache(True)
def recache(self, force_recache = False):
if not self.yaml_data:
self.yaml_data = YamlData(self.qt_app, self.app_path, force_recache or self.options.recache, self.options.use_cached_syntax)
else:
self.yaml_data.recache(False)
| lgpl-2.1 |
saquiba2/numpy2 | numpy/f2py/tests/test_callback.py | 145 | 3040 | from __future__ import division, absolute_import, print_function
import math
import textwrap
from numpy import array
from numpy.testing import run_module_suite, assert_, assert_equal, dec
import util
class TestF77Callback(util.F2PyTest):
code = """
subroutine t(fun,a)
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine func(a)
cf2py intent(in,out) a
integer a
a = a + 11
end
subroutine func0(a)
cf2py intent(out) a
integer a
a = 11
end
subroutine t2(a)
cf2py intent(callback) fun
integer a
cf2py intent(out) a
external fun
call fun(a)
end
subroutine string_callback(callback, a)
external callback
double precision callback
double precision a
character*1 r
cf2py intent(out) a
r = 'r'
a = callback(r)
end
"""
@dec.slow
def test_all(self):
for name in "t,t2".split(","):
self.check_function(name)
@dec.slow
def test_docstring(self):
expected = """
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
"""
assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip())
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert_(r == 4, repr(r))
r = t(lambda a: 5, fun_extra_args=(6,))
assert_(r == 5, repr(r))
r = t(lambda a: a, fun_extra_args=(6,))
assert_(r == 6, repr(r))
r = t(lambda a: 5 + a, fun_extra_args=(7,))
assert_(r == 12, repr(r))
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(math.degrees, fun_extra_args=(math.pi,))
assert_(r == 180, repr(r))
r = t(self.module.func, fun_extra_args=(6,))
assert_(r == 17, repr(r))
r = t(self.module.func0)
assert_(r == 11, repr(r))
r = t(self.module.func0._cpointer)
assert_(r == 11, repr(r))
class A(object):
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert_(r == 7, repr(r))
r = t(a.mth)
assert_(r == 9, repr(r))
def test_string_callback(self):
def callback(code):
if code == 'r':
return 0
else:
return 1
f = getattr(self.module, 'string_callback')
r = f(callback)
assert_(r == 0, repr(r))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ClimbsRocks/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 28 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
hkchenhongyi/django | tests/app_loading/tests.py | 288 | 3113 | from __future__ import unicode_literals
import os
from django.apps import apps
from django.test import SimpleTestCase
from django.test.utils import extend_sys_path
from django.utils import six
from django.utils._os import upath
class EggLoadingTest(SimpleTestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def tearDown(self):
apps.clear_cache()
def test_egg1(self):
"""Models module can be loaded from an app in an egg"""
egg_name = '%s/modelapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['app_with_models']):
models_module = apps.get_app_config('app_with_models').models_module
self.assertIsNotNone(models_module)
del apps.all_models['app_with_models']
def test_egg2(self):
"""Loading an app from an egg that has no models returns no models (and no error)"""
egg_name = '%s/nomodelapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['app_no_models']):
models_module = apps.get_app_config('app_no_models').models_module
self.assertIsNone(models_module)
del apps.all_models['app_no_models']
def test_egg3(self):
"""Models module can be loaded from an app located under an egg's top-level package"""
egg_name = '%s/omelet.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['omelet.app_with_models']):
models_module = apps.get_app_config('app_with_models').models_module
self.assertIsNotNone(models_module)
del apps.all_models['app_with_models']
def test_egg4(self):
"""Loading an app with no models from under the top-level egg package generates no error"""
egg_name = '%s/omelet.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['omelet.app_no_models']):
models_module = apps.get_app_config('app_no_models').models_module
self.assertIsNone(models_module)
del apps.all_models['app_no_models']
def test_egg5(self):
"""Loading an app from an egg that has an import error in its models module raises that error"""
egg_name = '%s/brokenapp.egg' % self.egg_dir
with extend_sys_path(egg_name):
with six.assertRaisesRegex(self, ImportError, 'modelz'):
with self.settings(INSTALLED_APPS=['broken_app']):
pass
class GetModelsTest(SimpleTestCase):
def setUp(self):
from .not_installed import models
self.not_installed_module = models
def test_get_model_only_returns_installed_models(self):
with self.assertRaises(LookupError):
apps.get_model("not_installed", "NotInstalledModel")
def test_get_models_only_returns_installed_models(self):
self.assertNotIn(
"NotInstalledModel",
[m.__name__ for m in apps.get_models()])
| bsd-3-clause |
babluboy/bookworm | data/scripts/mobi_lib/mobi_header.py | 2 | 38933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
DEBUG_USE_ORDERED_DICTIONARY = False # OrderedDict is supoorted >= python 2.7.
""" set to True to use OrderedDict for MobiHeader.metadata."""
if DEBUG_USE_ORDERED_DICTIONARY:
from collections import OrderedDict as dict_
else:
dict_ = dict
from .compatibility_utils import PY2, unicode_str, hexlify, bord
if PY2:
range = xrange
import struct
import uuid
# import the mobiunpack support libraries
from .mobi_utils import getLanguage
from .mobi_uncompress import HuffcdicReader, PalmdocReader, UncompressedReader
class unpackException(Exception):
pass
def sortedHeaderKeys(mheader):
hdrkeys = sorted(list(mheader.keys()), key=lambda akey: mheader[akey][0])
return hdrkeys
# HD Containers have their own headers and their own EXTH
# this is just guesswork so far, making big assumption that
# metavalue key numbers remain the same in the CONT EXTH
# Note: The layout of the CONT Header is still unknown
# so just deal with their EXTH sections for now
def dump_contexth(cpage, extheader):
# determine text encoding
codec = 'windows-1252'
codec_map = {
1252 : 'windows-1252',
65001: 'utf-8',
}
if cpage in codec_map:
codec = codec_map[cpage]
if extheader == b'':
return
id_map_strings = {
1 : 'Drm Server Id',
2 : 'Drm Commerce Id',
3 : 'Drm Ebookbase Book Id',
4 : 'Drm Ebookbase Dep Id',
100 : 'Creator',
101 : 'Publisher',
102 : 'Imprint',
103 : 'Description',
104 : 'ISBN',
105 : 'Subject',
106 : 'Published',
107 : 'Review',
108 : 'Contributor',
109 : 'Rights',
110 : 'SubjectCode',
111 : 'Type',
112 : 'Source',
113 : 'ASIN',
114 : 'versionNumber',
117 : 'Adult',
118 : 'Retail-Price',
119 : 'Retail-Currency',
120 : 'TSC',
122 : 'fixed-layout',
123 : 'book-type',
124 : 'orientation-lock',
126 : 'original-resolution',
127 : 'zero-gutter',
128 : 'zero-margin',
129 : 'MetadataResourceURI',
132 : 'RegionMagnification',
150 : 'LendingEnabled',
200 : 'DictShortName',
501 : 'cdeType',
502 : 'last_update_time',
503 : 'Updated_Title',
504 : 'CDEContentKey',
505 : 'AmazonContentReference',
506 : 'Title-Language',
507 : 'Title-Display-Direction',
508 : 'Title-Pronunciation',
509 : 'Title-Collation',
510 : 'Secondary-Title',
511 : 'Secondary-Title-Language',
512 : 'Secondary-Title-Direction',
513 : 'Secondary-Title-Pronunciation',
514 : 'Secondary-Title-Collation',
515 : 'Author-Language',
516 : 'Author-Display-Direction',
517 : 'Author-Pronunciation',
518 : 'Author-Collation',
519 : 'Author-Type',
520 : 'Publisher-Language',
521 : 'Publisher-Display-Direction',
522 : 'Publisher-Pronunciation',
523 : 'Publisher-Collation',
524 : 'Content-Language-Tag',
525 : 'primary-writing-mode',
526 : 'NCX-Ingested-By-Software',
527 : 'page-progression-direction',
528 : 'override-kindle-fonts',
529 : 'Compression-Upgraded',
530 : 'Soft-Hyphens-In-Content',
531 : 'Dictionary_In_Langague',
532 : 'Dictionary_Out_Language',
533 : 'Font_Converted',
534 : 'Amazon_Creator_Info',
535 : 'Creator-Build-Tag',
536 : 'HD-Media-Containers-Info', # CONT_Header is 0, Ends with CONTAINER_BOUNDARY (or Asset_Type?)
538 : 'Resource-Container-Fidelity',
539 : 'HD-Container-Mimetype',
540 : 'Sample-For_Special-Purpose',
541 : 'Kindletool-Operation-Information',
542 : 'Container_Id',
543 : 'Asset-Type', # FONT_CONTAINER, BW_CONTAINER, HD_CONTAINER
544 : 'Unknown_544',
}
id_map_values = {
115 : 'sample',
116 : 'StartOffset',
121 : 'Mobi8-Boundary-Section',
125 : 'Embedded-Record-Count',
130 : 'Offline-Sample',
131 : 'Metadata-Record-Offset',
201 : 'CoverOffset',
202 : 'ThumbOffset',
203 : 'HasFakeCover',
204 : 'Creator-Software',
205 : 'Creator-Major-Version',
206 : 'Creator-Minor-Version',
207 : 'Creator-Build-Number',
401 : 'Clipping-Limit',
402 : 'Publisher-Limit',
404 : 'Text-to-Speech-Disabled',
406 : 'Rental-Expiration-Time',
}
id_map_hexstrings = {
208 : 'Watermark_(hex)',
209 : 'Tamper-Proof-Keys_(hex)',
300 : 'Font-Signature_(hex)',
403 : 'Unknown_(403)_(hex)',
405 : 'Ownership-Type_(hex)',
407 : 'Unknown_(407)_(hex)',
420 : 'Multimedia-Content-Reference_(hex)',
450 : 'Locations_Match_(hex)',
451 : 'Full-Story-Length_(hex)',
452 : 'Sample-Start_Location_(hex)',
453 : 'Sample-End-Location_(hex)',
}
_length, num_items = struct.unpack(b'>LL', extheader[4:12])
extheader = extheader[12:]
pos = 0
for _ in range(num_items):
id, size = struct.unpack(b'>LL', extheader[pos:pos+8])
content = extheader[pos + 8: pos + size]
if id in id_map_strings:
name = id_map_strings[id]
print('\n Key: "%s"\n Value: "%s"' % (name, content.decode(codec, errors='replace')))
elif id in id_map_values:
name = id_map_values[id]
if size == 9:
value, = struct.unpack(b'B',content)
print('\n Key: "%s"\n Value: 0x%01x' % (name, value))
elif size == 10:
value, = struct.unpack(b'>H',content)
print('\n Key: "%s"\n Value: 0x%02x' % (name, value))
elif size == 12:
value, = struct.unpack(b'>L',content)
print('\n Key: "%s"\n Value: 0x%04x' % (name, value))
else:
print("\nError: Value for %s has unexpected size of %s" % (name, size))
elif id in id_map_hexstrings:
name = id_map_hexstrings[id]
print('\n Key: "%s"\n Value: 0x%s' % (name, hexlify(content)))
else:
print("\nWarning: Unknown metadata with id %s found" % id)
name = str(id) + ' (hex)'
print(' Key: "%s"\n Value: 0x%s' % (name, hexlify(content)))
pos += size
return
class MobiHeader:
# all values are packed in big endian format
palmdoc_header = {
'compression_type' : (0x00, b'>H', 2),
'fill0' : (0x02, b'>H', 2),
'text_length' : (0x04, b'>L', 4),
'text_records' : (0x08, b'>H', 2),
'max_section_size' : (0x0a, b'>H', 2),
'read_pos ' : (0x0c, b'>L', 4),
}
mobi6_header = {
'compression_type' : (0x00, b'>H', 2),
'fill0' : (0x02, b'>H', 2),
'text_length' : (0x04, b'>L', 4),
'text_records' : (0x08, b'>H', 2),
'max_section_size' : (0x0a, b'>H', 2),
'crypto_type' : (0x0c, b'>H', 2),
'fill1' : (0x0e, b'>H', 2),
'magic' : (0x10, b'4s', 4),
'header_length (from MOBI)' : (0x14, b'>L', 4),
'type' : (0x18, b'>L', 4),
'codepage' : (0x1c, b'>L', 4),
'unique_id' : (0x20, b'>L', 4),
'version' : (0x24, b'>L', 4),
'metaorthindex' : (0x28, b'>L', 4),
'metainflindex' : (0x2c, b'>L', 4),
'index_names' : (0x30, b'>L', 4),
'index_keys' : (0x34, b'>L', 4),
'extra_index0' : (0x38, b'>L', 4),
'extra_index1' : (0x3c, b'>L', 4),
'extra_index2' : (0x40, b'>L', 4),
'extra_index3' : (0x44, b'>L', 4),
'extra_index4' : (0x48, b'>L', 4),
'extra_index5' : (0x4c, b'>L', 4),
'first_nontext' : (0x50, b'>L', 4),
'title_offset' : (0x54, b'>L', 4),
'title_length' : (0x58, b'>L', 4),
'language_code' : (0x5c, b'>L', 4),
'dict_in_lang' : (0x60, b'>L', 4),
'dict_out_lang' : (0x64, b'>L', 4),
'min_version' : (0x68, b'>L', 4),
'first_resc_offset' : (0x6c, b'>L', 4),
'huff_offset' : (0x70, b'>L', 4),
'huff_num' : (0x74, b'>L', 4),
'huff_tbl_offset' : (0x78, b'>L', 4),
'huff_tbl_len' : (0x7c, b'>L', 4),
'exth_flags' : (0x80, b'>L', 4),
'fill3_a' : (0x84, b'>L', 4),
'fill3_b' : (0x88, b'>L', 4),
'fill3_c' : (0x8c, b'>L', 4),
'fill3_d' : (0x90, b'>L', 4),
'fill3_e' : (0x94, b'>L', 4),
'fill3_f' : (0x98, b'>L', 4),
'fill3_g' : (0x9c, b'>L', 4),
'fill3_h' : (0xa0, b'>L', 4),
'unknown0' : (0xa4, b'>L', 4),
'drm_offset' : (0xa8, b'>L', 4),
'drm_count' : (0xac, b'>L', 4),
'drm_size' : (0xb0, b'>L', 4),
'drm_flags' : (0xb4, b'>L', 4),
'fill4_a' : (0xb8, b'>L', 4),
'fill4_b' : (0xbc, b'>L', 4),
'first_content' : (0xc0, b'>H', 2),
'last_content' : (0xc2, b'>H', 2),
'unknown0' : (0xc4, b'>L', 4),
'fcis_offset' : (0xc8, b'>L', 4),
'fcis_count' : (0xcc, b'>L', 4),
'flis_offset' : (0xd0, b'>L', 4),
'flis_count' : (0xd4, b'>L', 4),
'unknown1' : (0xd8, b'>L', 4),
'unknown2' : (0xdc, b'>L', 4),
'srcs_offset' : (0xe0, b'>L', 4),
'srcs_count' : (0xe4, b'>L', 4),
'unknown3' : (0xe8, b'>L', 4),
'unknown4' : (0xec, b'>L', 4),
'fill5' : (0xf0, b'>H', 2),
'traildata_flags' : (0xf2, b'>H', 2),
'ncx_index' : (0xf4, b'>L', 4),
'unknown5' : (0xf8, b'>L', 4),
'unknown6' : (0xfc, b'>L', 4),
'datp_offset' : (0x100, b'>L', 4),
'unknown7' : (0x104, b'>L', 4),
'Unknown ' : (0x108, b'>L', 4),
'Unknown ' : (0x10C, b'>L', 4),
'Unknown ' : (0x110, b'>L', 4),
'Unknown ' : (0x114, b'>L', 4),
'Unknown ' : (0x118, b'>L', 4),
'Unknown ' : (0x11C, b'>L', 4),
'Unknown ' : (0x120, b'>L', 4),
'Unknown ' : (0x124, b'>L', 4),
'Unknown ' : (0x128, b'>L', 4),
'Unknown ' : (0x12C, b'>L', 4),
'Unknown ' : (0x130, b'>L', 4),
'Unknown ' : (0x134, b'>L', 4),
'Unknown ' : (0x138, b'>L', 4),
'Unknown ' : (0x11C, b'>L', 4),
}
mobi8_header = {
'compression_type' : (0x00, b'>H', 2),
'fill0' : (0x02, b'>H', 2),
'text_length' : (0x04, b'>L', 4),
'text_records' : (0x08, b'>H', 2),
'max_section_size' : (0x0a, b'>H', 2),
'crypto_type' : (0x0c, b'>H', 2),
'fill1' : (0x0e, b'>H', 2),
'magic' : (0x10, b'4s', 4),
'header_length (from MOBI)' : (0x14, b'>L', 4),
'type' : (0x18, b'>L', 4),
'codepage' : (0x1c, b'>L', 4),
'unique_id' : (0x20, b'>L', 4),
'version' : (0x24, b'>L', 4),
'metaorthindex' : (0x28, b'>L', 4),
'metainflindex' : (0x2c, b'>L', 4),
'index_names' : (0x30, b'>L', 4),
'index_keys' : (0x34, b'>L', 4),
'extra_index0' : (0x38, b'>L', 4),
'extra_index1' : (0x3c, b'>L', 4),
'extra_index2' : (0x40, b'>L', 4),
'extra_index3' : (0x44, b'>L', 4),
'extra_index4' : (0x48, b'>L', 4),
'extra_index5' : (0x4c, b'>L', 4),
'first_nontext' : (0x50, b'>L', 4),
'title_offset' : (0x54, b'>L', 4),
'title_length' : (0x58, b'>L', 4),
'language_code' : (0x5c, b'>L', 4),
'dict_in_lang' : (0x60, b'>L', 4),
'dict_out_lang' : (0x64, b'>L', 4),
'min_version' : (0x68, b'>L', 4),
'first_resc_offset' : (0x6c, b'>L', 4),
'huff_offset' : (0x70, b'>L', 4),
'huff_num' : (0x74, b'>L', 4),
'huff_tbl_offset' : (0x78, b'>L', 4),
'huff_tbl_len' : (0x7c, b'>L', 4),
'exth_flags' : (0x80, b'>L', 4),
'fill3_a' : (0x84, b'>L', 4),
'fill3_b' : (0x88, b'>L', 4),
'fill3_c' : (0x8c, b'>L', 4),
'fill3_d' : (0x90, b'>L', 4),
'fill3_e' : (0x94, b'>L', 4),
'fill3_f' : (0x98, b'>L', 4),
'fill3_g' : (0x9c, b'>L', 4),
'fill3_h' : (0xa0, b'>L', 4),
'unknown0' : (0xa4, b'>L', 4),
'drm_offset' : (0xa8, b'>L', 4),
'drm_count' : (0xac, b'>L', 4),
'drm_size' : (0xb0, b'>L', 4),
'drm_flags' : (0xb4, b'>L', 4),
'fill4_a' : (0xb8, b'>L', 4),
'fill4_b' : (0xbc, b'>L', 4),
'fdst_offset' : (0xc0, b'>L', 4),
'fdst_flow_count' : (0xc4, b'>L', 4),
'fcis_offset' : (0xc8, b'>L', 4),
'fcis_count' : (0xcc, b'>L', 4),
'flis_offset' : (0xd0, b'>L', 4),
'flis_count' : (0xd4, b'>L', 4),
'unknown1' : (0xd8, b'>L', 4),
'unknown2' : (0xdc, b'>L', 4),
'srcs_offset' : (0xe0, b'>L', 4),
'srcs_count' : (0xe4, b'>L', 4),
'unknown3' : (0xe8, b'>L', 4),
'unknown4' : (0xec, b'>L', 4),
'fill5' : (0xf0, b'>H', 2),
'traildata_flags' : (0xf2, b'>H', 2),
'ncx_index' : (0xf4, b'>L', 4),
'fragment_index' : (0xf8, b'>L', 4),
'skeleton_index' : (0xfc, b'>L', 4),
'datp_offset' : (0x100, b'>L', 4),
'guide_index' : (0x104, b'>L', 4),
'Unknown ' : (0x108, b'>L', 4),
'Unknown ' : (0x10C, b'>L', 4),
'Unknown ' : (0x110, b'>L', 4),
'Unknown ' : (0x114, b'>L', 4),
'Unknown ' : (0x118, b'>L', 4),
'Unknown ' : (0x11C, b'>L', 4),
'Unknown ' : (0x120, b'>L', 4),
'Unknown ' : (0x124, b'>L', 4),
'Unknown ' : (0x128, b'>L', 4),
'Unknown ' : (0x12C, b'>L', 4),
'Unknown ' : (0x130, b'>L', 4),
'Unknown ' : (0x134, b'>L', 4),
'Unknown ' : (0x138, b'>L', 4),
'Unknown ' : (0x11C, b'>L', 4),
}
palmdoc_header_sorted_keys = sortedHeaderKeys(palmdoc_header)
mobi6_header_sorted_keys = sortedHeaderKeys(mobi6_header)
mobi8_header_sorted_keys = sortedHeaderKeys(mobi8_header)
id_map_strings = {
1 : 'Drm Server Id',
2 : 'Drm Commerce Id',
3 : 'Drm Ebookbase Book Id',
4 : 'Drm Ebookbase Dep Id',
100 : 'Creator',
101 : 'Publisher',
102 : 'Imprint',
103 : 'Description',
104 : 'ISBN',
105 : 'Subject',
106 : 'Published',
107 : 'Review',
108 : 'Contributor',
109 : 'Rights',
110 : 'SubjectCode',
111 : 'Type',
112 : 'Source',
113 : 'ASIN',
114 : 'versionNumber',
117 : 'Adult',
118 : 'Retail-Price',
119 : 'Retail-Currency',
120 : 'TSC',
122 : 'fixed-layout',
123 : 'book-type',
124 : 'orientation-lock',
126 : 'original-resolution',
127 : 'zero-gutter',
128 : 'zero-margin',
129 : 'MetadataResourceURI',
132 : 'RegionMagnification',
150 : 'LendingEnabled',
200 : 'DictShortName',
501 : 'cdeType',
502 : 'last_update_time',
503 : 'Updated_Title',
504 : 'CDEContentKey',
505 : 'AmazonContentReference',
506 : 'Title-Language',
507 : 'Title-Display-Direction',
508 : 'Title-Pronunciation',
509 : 'Title-Collation',
510 : 'Secondary-Title',
511 : 'Secondary-Title-Language',
512 : 'Secondary-Title-Direction',
513 : 'Secondary-Title-Pronunciation',
514 : 'Secondary-Title-Collation',
515 : 'Author-Language',
516 : 'Author-Display-Direction',
517 : 'Author-Pronunciation',
518 : 'Author-Collation',
519 : 'Author-Type',
520 : 'Publisher-Language',
521 : 'Publisher-Display-Direction',
522 : 'Publisher-Pronunciation',
523 : 'Publisher-Collation',
524 : 'Content-Language-Tag',
525 : 'primary-writing-mode',
526 : 'NCX-Ingested-By-Software',
527 : 'page-progression-direction',
528 : 'override-kindle-fonts',
529 : 'Compression-Upgraded',
530 : 'Soft-Hyphens-In-Content',
531 : 'Dictionary_In_Langague',
532 : 'Dictionary_Out_Language',
533 : 'Font_Converted',
534 : 'Amazon_Creator_Info',
535 : 'Creator-Build-Tag',
536 : 'HD-Media-Containers-Info', # CONT_Header is 0, Ends with CONTAINER_BOUNDARY (or Asset_Type?)
538 : 'Resource-Container-Fidelity',
539 : 'HD-Container-Mimetype',
540 : 'Sample-For_Special-Purpose',
541 : 'Kindletool-Operation-Information',
542 : 'Container_Id',
543 : 'Asset-Type', # FONT_CONTAINER, BW_CONTAINER, HD_CONTAINER
544 : 'Unknown_544',
}
id_map_values = {
115 : 'sample',
116 : 'StartOffset',
121 : 'Mobi8-Boundary-Section',
125 : 'Embedded-Record-Count',
130 : 'Offline-Sample',
131 : 'Metadata-Record-Offset',
201 : 'CoverOffset',
202 : 'ThumbOffset',
203 : 'HasFakeCover',
204 : 'Creator-Software',
205 : 'Creator-Major-Version',
206 : 'Creator-Minor-Version',
207 : 'Creator-Build-Number',
401 : 'Clipping-Limit',
402 : 'Publisher-Limit',
404 : 'Text-to-Speech-Disabled',
406 : 'Rental-Expiration-Time',
}
id_map_hexstrings = {
208 : 'Watermark_(hex)',
209 : 'Tamper-Proof-Keys_(hex)',
300 : 'Font-Signature_(hex)',
403 : 'Unknown_(403)_(hex)',
405 : 'Ownership-Type_(hex)',
407 : 'Unknown_(407)_(hex)',
420 : 'Multimedia-Content-Reference_(hex)',
450 : 'Locations_Match_(hex)',
451 : 'Full-Story-Length_(hex)',
452 : 'Sample-Start_Location_(hex)',
453 : 'Sample-End-Location_(hex)',
}
def __init__(self, sect, sectNumber):
self.sect = sect
self.start = sectNumber
self.header = self.sect.loadSection(self.start)
if len(self.header)>20 and self.header[16:20] == b'MOBI':
self.sect.setsectiondescription(0,"Mobipocket Header")
self.palm = False
elif self.sect.ident == b'TEXtREAd':
self.sect.setsectiondescription(0, "PalmDOC Header")
self.palm = True
else:
raise unpackException('Unknown File Format')
self.records, = struct.unpack_from(b'>H', self.header, 0x8)
# set defaults in case this is a PalmDOC
self.title = self.sect.palmname.decode('latin-1', errors='replace')
self.length = len(self.header)-16
self.type = 3
self.codepage = 1252
self.codec = 'windows-1252'
self.unique_id = 0
self.version = 0
self.hasExth = False
self.exth = b''
self.exth_offset = self.length + 16
self.exth_length = 0
self.crypto_type = 0
self.firstnontext = self.start+self.records + 1
self.firstresource = self.start+self.records + 1
self.ncxidx = 0xffffffff
self.metaOrthIndex = 0xffffffff
self.metaInflIndex = 0xffffffff
self.skelidx = 0xffffffff
self.fragidx = 0xffffffff
self.guideidx = 0xffffffff
self.fdst = 0xffffffff
self.mlstart = self.sect.loadSection(self.start+1)[:4]
self.rawSize = 0
self.metadata = dict_()
# set up for decompression/unpacking
self.compression, = struct.unpack_from(b'>H', self.header, 0x0)
if self.compression == 0x4448:
reader = HuffcdicReader()
huffoff, huffnum = struct.unpack_from(b'>LL', self.header, 0x70)
huffoff = huffoff + self.start
self.sect.setsectiondescription(huffoff,"Huffman Compression Seed")
reader.loadHuff(self.sect.loadSection(huffoff))
for i in range(1, huffnum):
self.sect.setsectiondescription(huffoff+i,"Huffman CDIC Compression Seed %d" % i)
reader.loadCdic(self.sect.loadSection(huffoff+i))
self.unpack = reader.unpack
elif self.compression == 2:
self.unpack = PalmdocReader().unpack
elif self.compression == 1:
self.unpack = UncompressedReader().unpack
else:
raise unpackException('invalid compression type: 0x%4x' % self.compression)
if self.palm:
return
self.length, self.type, self.codepage, self.unique_id, self.version = struct.unpack(b'>LLLLL', self.header[20:40])
codec_map = {
1252 : 'windows-1252',
65001: 'utf-8',
}
if self.codepage in codec_map:
self.codec = codec_map[self.codepage]
# title
toff, tlen = struct.unpack(b'>II', self.header[0x54:0x5c])
tend = toff + tlen
self.title=self.header[toff:tend].decode(self.codec, errors='replace')
exth_flag, = struct.unpack(b'>L', self.header[0x80:0x84])
self.hasExth = exth_flag & 0x40
self.exth_offset = self.length + 16
self.exth_length = 0
if self.hasExth:
self.exth_length, = struct.unpack_from(b'>L', self.header, self.exth_offset+4)
self.exth_length = ((self.exth_length + 3)>>2)<<2 # round to next 4 byte boundary
self.exth = self.header[self.exth_offset:self.exth_offset+self.exth_length]
# parse the exth / metadata
self.parseMetaData()
# self.mlstart = self.sect.loadSection(self.start+1)
# self.mlstart = self.mlstart[0:4]
self.crypto_type, = struct.unpack_from(b'>H', self.header, 0xC)
# Start sector for additional files such as images, fonts, resources, etc
# Can be missing so fall back to default set previously
ofst, = struct.unpack_from(b'>L', self.header, 0x6C)
if ofst != 0xffffffff:
self.firstresource = ofst + self.start
ofst, = struct.unpack_from(b'>L', self.header, 0x50)
if ofst != 0xffffffff:
self.firstnontext = ofst + self.start
if self.isPrintReplica():
return
if self.version < 8:
# Dictionary metaOrthIndex
self.metaOrthIndex, = struct.unpack_from(b'>L', self.header, 0x28)
if self.metaOrthIndex != 0xffffffff:
self.metaOrthIndex += self.start
# Dictionary metaInflIndex
self.metaInflIndex, = struct.unpack_from(b'>L', self.header, 0x2C)
if self.metaInflIndex != 0xffffffff:
self.metaInflIndex += self.start
# handle older headers without any ncxindex info and later
# specifically 0xe4 headers
if self.length + 16 < 0xf8:
return
# NCX Index
self.ncxidx, = struct.unpack(b'>L', self.header[0xf4:0xf8])
if self.ncxidx != 0xffffffff:
self.ncxidx += self.start
# K8 specific Indexes
if self.start != 0 or self.version == 8:
# Index into <xml> file skeletons in RawML
self.skelidx, = struct.unpack_from(b'>L', self.header, 0xfc)
if self.skelidx != 0xffffffff:
self.skelidx += self.start
# Index into <div> sections in RawML
self.fragidx, = struct.unpack_from(b'>L', self.header, 0xf8)
if self.fragidx != 0xffffffff:
self.fragidx += self.start
# Index into Other files
self.guideidx, = struct.unpack_from(b'>L', self.header, 0x104)
if self.guideidx != 0xffffffff:
self.guideidx += self.start
# dictionaries do not seem to use the same approach in K8's
# so disable them
self.metaOrthIndex = 0xffffffff
self.metaInflIndex = 0xffffffff
# need to use the FDST record to find out how to properly unpack
# the rawML into pieces
# it is simply a table of start and end locations for each flow piece
self.fdst, = struct.unpack_from(b'>L', self.header, 0xc0)
self.fdstcnt, = struct.unpack_from(b'>L', self.header, 0xc4)
# if cnt is 1 or less, fdst section mumber can be garbage
if self.fdstcnt <= 1:
self.fdst = 0xffffffff
if self.fdst != 0xffffffff:
self.fdst += self.start
# setting of fdst section description properly handled in mobi_kf8proc
def dump_exth(self):
# determine text encoding
codec=self.codec
if (not self.hasExth) or (self.exth_length) == 0 or (self.exth == b''):
return
num_items, = struct.unpack(b'>L', self.exth[8:12])
pos = 12
print("Key Size Decription Value")
for _ in range(num_items):
id, size = struct.unpack(b'>LL', self.exth[pos:pos+8])
contentsize = size-8
content = self.exth[pos + 8: pos + size]
if id in MobiHeader.id_map_strings:
exth_name = MobiHeader.id_map_strings[id]
print('{0: >3d} {1: >4d} {2: <30s} {3:s}'.format(id, contentsize, exth_name, content.decode(codec, errors='replace')))
elif id in MobiHeader.id_map_values:
exth_name = MobiHeader.id_map_values[id]
if size == 9:
value, = struct.unpack(b'B',content)
print('{0:3d} byte {1:<30s} {2:d}'.format(id, exth_name, value))
elif size == 10:
value, = struct.unpack(b'>H',content)
print('{0:3d} word {1:<30s} 0x{2:0>4X} ({2:d})'.format(id, exth_name, value))
elif size == 12:
value, = struct.unpack(b'>L',content)
print('{0:3d} long {1:<30s} 0x{2:0>8X} ({2:d})'.format(id, exth_name, value))
else:
print('{0: >3d} {1: >4d} {2: <30s} (0x{3:s})'.format(id, contentsize, "Bad size for "+exth_name, hexlify(content)))
elif id in MobiHeader.id_map_hexstrings:
exth_name = MobiHeader.id_map_hexstrings[id]
print('{0:3d} {1:4d} {2:<30s} 0x{3:s}'.format(id, contentsize, exth_name, hexlify(content)))
else:
exth_name = "Unknown EXTH ID {0:d}".format(id)
print("{0: >3d} {1: >4d} {2: <30s} 0x{3:s}".format(id, contentsize, exth_name, hexlify(content)))
pos += size
return
def dumpheader(self):
# first 16 bytes are not part of the official mobiheader
# but we will treat it as such
# so section 0 is 16 (decimal) + self.length in total == at least 0x108 bytes for Mobi 8 headers
print("Dumping section %d, Mobipocket Header version: %d, total length %d" % (self.start,self.version, self.length+16))
self.hdr = {}
# set it up for the proper header version
if self.version == 0:
self.mobi_header = MobiHeader.palmdoc_header
self.mobi_header_sorted_keys = MobiHeader.palmdoc_header_sorted_keys
elif self.version < 8:
self.mobi_header = MobiHeader.mobi6_header
self.mobi_header_sorted_keys = MobiHeader.mobi6_header_sorted_keys
else:
self.mobi_header = MobiHeader.mobi8_header
self.mobi_header_sorted_keys = MobiHeader.mobi8_header_sorted_keys
# parse the header information
for key in self.mobi_header_sorted_keys:
(pos, format, tot_len) = self.mobi_header[key]
if pos < (self.length + 16):
val, = struct.unpack_from(format, self.header, pos)
self.hdr[key] = val
if 'title_offset' in self.hdr:
title_offset = self.hdr['title_offset']
title_length = self.hdr['title_length']
else:
title_offset = 0
title_length = 0
if title_offset == 0:
title_offset = len(self.header)
title_length = 0
self.title = self.sect.palmname.decode('latin-1', errors='replace')
else:
self.title = self.header[title_offset:title_offset+title_length].decode(self.codec, errors='replace')
# title record always padded with two nul bytes and then padded with nuls to next 4 byte boundary
title_length = ((title_length+2+3)>>2)<<2
self.extra1 = self.header[self.exth_offset+self.exth_length:title_offset]
self.extra2 = self.header[title_offset+title_length:]
print("Mobipocket header from section %d" % self.start)
print(" Offset Value Hex Dec Description")
for key in self.mobi_header_sorted_keys:
(pos, format, tot_len) = self.mobi_header[key]
if pos < (self.length + 16):
if key != 'magic':
fmt_string = "0x{0:0>3X} ({0:3d}){1: >" + str(9-2*tot_len) +"s}0x{2:0>" + str(2*tot_len) + "X} {2:10d} {3:s}"
else:
self.hdr[key] = unicode_str(self.hdr[key])
fmt_string = "0x{0:0>3X} ({0:3d}){2:>11s} {3:s}"
print(fmt_string.format(pos, " ",self.hdr[key], key))
print("")
if self.exth_length > 0:
print("EXTH metadata, offset %d, padded length %d" % (self.exth_offset,self.exth_length))
self.dump_exth()
print("")
if len(self.extra1) > 0:
print("Extra data between EXTH and Title, length %d" % len(self.extra1))
print(hexlify(self.extra1))
print("")
if title_length > 0:
print("Title in header at offset %d, padded length %d: '%s'" %(title_offset,title_length,self.title))
print("")
if len(self.extra2) > 0:
print("Extra data between Title and end of header, length %d" % len(self.extra2))
print(hexlify(self.extra2))
print("")
def isPrintReplica(self):
return self.mlstart[0:4] == b"%MOP"
def isK8(self):
return self.start != 0 or self.version == 8
def isEncrypted(self):
return self.crypto_type != 0
def hasNCX(self):
return self.ncxidx != 0xffffffff
def isDictionary(self):
return self.metaOrthIndex != 0xffffffff
def getncxIndex(self):
return self.ncxidx
def decompress(self, data):
return self.unpack(data)
def Language(self):
langcode = struct.unpack(b'!L', self.header[0x5c:0x60])[0]
langid = langcode & 0xFF
sublangid = (langcode >> 8) & 0xFF
return getLanguage(langid, sublangid)
def DictInLanguage(self):
if self.isDictionary():
langcode = struct.unpack(b'!L', self.header[0x60:0x64])[0]
langid = langcode & 0xFF
sublangid = (langcode >> 10) & 0xFF
if langid != 0:
return getLanguage(langid, sublangid)
return False
def DictOutLanguage(self):
if self.isDictionary():
langcode = struct.unpack(b'!L', self.header[0x64:0x68])[0]
langid = langcode & 0xFF
sublangid = (langcode >> 10) & 0xFF
if langid != 0:
return getLanguage(langid, sublangid)
return False
def getRawML(self):
def getSizeOfTrailingDataEntry(data):
num = 0
for v in data[-4:]:
if bord(v) & 0x80:
num = 0
num = (num << 7) | (bord(v) & 0x7f)
return num
def trimTrailingDataEntries(data):
for _ in range(trailers):
num = getSizeOfTrailingDataEntry(data)
data = data[:-num]
if multibyte:
num = (ord(data[-1:]) & 3) + 1
data = data[:-num]
return data
multibyte = 0
trailers = 0
if self.sect.ident == b'BOOKMOBI':
mobi_length, = struct.unpack_from(b'>L', self.header, 0x14)
mobi_version, = struct.unpack_from(b'>L', self.header, 0x68)
if (mobi_length >= 0xE4) and (mobi_version >= 5):
flags, = struct.unpack_from(b'>H', self.header, 0xF2)
multibyte = flags & 1
while flags > 1:
if flags & 2:
trailers += 1
flags = flags >> 1
# get raw mobi markup languge
print("Unpacking raw markup language")
dataList = []
# offset = 0
for i in range(1, self.records+1):
data = trimTrailingDataEntries(self.sect.loadSection(self.start + i))
dataList.append(self.unpack(data))
if self.isK8():
self.sect.setsectiondescription(self.start + i,"KF8 Text Section {0:d}".format(i))
elif self.version == 0:
self.sect.setsectiondescription(self.start + i,"PalmDOC Text Section {0:d}".format(i))
else:
self.sect.setsectiondescription(self.start + i,"Mobipocket Text Section {0:d}".format(i))
rawML = b''.join(dataList)
self.rawSize = len(rawML)
return rawML
# all metadata is stored in a dictionary with key and returns a *list* of values
# a list is used to allow for multiple creators, multiple contributors, etc
def parseMetaData(self):
def addValue(name, value):
if name not in self.metadata:
self.metadata[name] = [value]
else:
self.metadata[name].append(value)
codec=self.codec
if self.hasExth:
extheader=self.exth
_length, num_items = struct.unpack(b'>LL', extheader[4:12])
extheader = extheader[12:]
pos = 0
for _ in range(num_items):
id, size = struct.unpack(b'>LL', extheader[pos:pos+8])
content = extheader[pos + 8: pos + size]
if id in MobiHeader.id_map_strings:
name = MobiHeader.id_map_strings[id]
addValue(name, content.decode(codec, errors='replace'))
elif id in MobiHeader.id_map_values:
name = MobiHeader.id_map_values[id]
if size == 9:
value, = struct.unpack(b'B',content)
addValue(name, unicode_str(str(value)))
elif size == 10:
value, = struct.unpack(b'>H',content)
addValue(name, unicode_str(str(value)))
elif size == 12:
value, = struct.unpack(b'>L',content)
# handle special case of missing CoverOffset or missing ThumbOffset
if id == 201 or id == 202:
if value != 0xffffffff:
addValue(name, unicode_str(str(value)))
else:
addValue(name, unicode_str(str(value)))
else:
print("Warning: Bad key, size, value combination detected in EXTH ", id, size, hexlify(content))
addValue(name, hexlify(content))
elif id in MobiHeader.id_map_hexstrings:
name = MobiHeader.id_map_hexstrings[id]
addValue(name, hexlify(content))
else:
name = unicode_str(str(id)) + ' (hex)'
addValue(name, hexlify(content))
pos += size
# add the basics to the metadata each as a list element
self.metadata['Language'] = [self.Language()]
self.metadata['Title'] = [unicode_str(self.title,self.codec)]
self.metadata['Codec'] = [self.codec]
self.metadata['UniqueID'] = [unicode_str(str(self.unique_id))]
# if no asin create one using a uuid
if 'ASIN' not in self.metadata:
self.metadata['ASIN'] = [unicode_str(str(uuid.uuid4()))]
# if no cdeType set it to "EBOK"
if 'cdeType' not in self.metadata:
self.metadata['cdeType'] = ['EBOK']
def getMetaData(self):
return self.metadata
def describeHeader(self, DUMP):
print("Mobi Version:", self.version)
print("Codec:", self.codec)
print("Title:", self.title)
if 'Updated_Title' in self.metadata:
print("EXTH Title:", self.metadata['Updated_Title'][0])
if self.compression == 0x4448:
print("Huffdic compression")
elif self.compression == 2:
print("Palmdoc compression")
elif self.compression == 1:
print("No compression")
if DUMP:
self.dumpheader()
| gpl-3.0 |
kcpawan/django | tests/custom_managers/models.py | 238 | 6791 | """
Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super(PublishedBookManager, self).get_queryset().filter(is_published=True)
class CustomQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super(CustomQuerySet, self).filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
class BaseCustomManager(models.Manager):
def __init__(self, arg):
super(BaseCustomManager, self).__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super(BaseCustomManager, self).filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
class CustomInitQuerySet(models.QuerySet):
# QuerySet with an __init__() method that takes an additional argument.
def __init__(self, custom_optional_arg=None, model=None, query=None, using=None, hints=None):
super(CustomInitQuerySet, self).__init__(model=model, query=query, using=using, hints=hints)
class DeconstructibleCustomManager(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super(DeconstructibleCustomManager, self).__init__(a)
class FunPeopleManager(models.Manager):
def get_queryset(self):
return super(FunPeopleManager, self).get_queryset().filter(fun=True)
class BoringPeopleManager(models.Manager):
def get_queryset(self):
return super(BoringPeopleManager, self).get_queryset().filter(fun=False)
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=False)
favorite_book = models.ForeignKey('Book', models.SET_NULL, null=True, related_name='favorite_books')
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = PersonManager()
fun_people = FunPeopleManager()
boring_people = BoringPeopleManager()
custom_queryset_default_manager = CustomQuerySet.as_manager()
custom_queryset_custom_manager = CustomManager('hello')
custom_init_queryset_manager = CustomInitQuerySet.as_manager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class FunPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=True)
favorite_book = models.ForeignKey(
'Book',
models.SET_NULL,
null=True,
related_name='fun_people_favorite_books',
)
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = FunPeopleManager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
published_objects = PublishedBookManager()
authors = models.ManyToManyField(Person, related_name='books')
fun_authors = models.ManyToManyField(FunPerson, related_name='books')
favorite_things = GenericRelation(Person,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
fun_people_favorite_things = GenericRelation(FunPerson,
content_type_field='favorite_thing_type', object_id_field='favorite_thing_id')
def __str__(self):
return self.title
class FastCarManager(models.Manager):
def get_queryset(self):
return super(FastCarManager, self).get_queryset().filter(top_speed__gt=150)
@python_2_unicode_compatible
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
def __str__(self):
return self.name
class RestrictedManager(models.Manager):
def get_queryset(self):
return super(RestrictedManager, self).get_queryset().filter(is_public=True)
@python_2_unicode_compatible
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
@python_2_unicode_compatible
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
def __str__(self):
return self.name
class AbstractPerson(models.Model):
abstract_persons = models.Manager()
objects = models.CharField(max_length=30)
class Meta:
abstract = True
class PersonFromAbstract(AbstractPerson):
pass
| bsd-3-clause |
AnotherIvan/calibre | src/calibre/gui2/store/stores/ebooksgratuits_plugin.py | 22 | 1107 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2012, Florent FAYOLLE <florent.fayolle69@gmail.com>'
__docformat__ = 'restructuredtext en'
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.opensearch_store import OpenSearchOPDSStore
from calibre.gui2.store.search_result import SearchResult
from calibre.utils.filenames import ascii_text
class EbooksGratuitsStore(BasicStoreConfig, OpenSearchOPDSStore):
open_search_url = 'http://www.ebooksgratuits.com/opds/opensearch.xml'
web_url = 'http://www.ebooksgratuits.com/'
def strip_accents(self, s):
return ascii_text(s)
def search(self, query, max_results=10, timeout=60):
query = self.strip_accents(unicode(query))
for s in OpenSearchOPDSStore.search(self, query, max_results, timeout):
if s.downloads:
s.drm = SearchResult.DRM_UNLOCKED
s.price = '$0.00'
yield s
| gpl-3.0 |
stephen144/odoo | addons/mail/models/mail_channel.py | 3 | 31573 | # -*- coding: utf-8 -*-
from email.utils import formataddr
import datetime
import uuid
from openerp import _, api, fields, models, modules, tools
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import UserError
from openerp.osv import expression
from openerp.addons.bus.models.bus_presence import AWAY_TIMER
class ChannelPartner(models.Model):
_name = 'mail.channel.partner'
_description = 'Last Seen Many2many'
_table = 'mail_channel_partner'
_rec_name = 'partner_id'
partner_id = fields.Many2one('res.partner', string='Recipient', ondelete='cascade')
channel_id = fields.Many2one('mail.channel', string='Channel', ondelete='cascade')
seen_message_id = fields.Many2one('mail.message', string='Last Seen')
fold_state = fields.Selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')], string='Conversation Fold State', default='open')
is_minimized = fields.Boolean("Conversation is minimied")
is_pinned = fields.Boolean("Is pinned on the interface", default=True)
class Channel(models.Model):
""" A mail.channel is a discussion group that may behave like a listener
on documents. """
_description = 'Discussion channel'
_name = 'mail.channel'
_mail_flat_thread = False
_mail_post_access = 'read'
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_default_image(self):
image_path = modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
name = fields.Char('Name', required=True, translate=True)
channel_type = fields.Selection([
('chat', 'Chat Discussion'),
('channel', 'Channel')],
'Channel Type', default='channel')
description = fields.Text('Description')
uuid = fields.Char('UUID', size=50, select=True, default=lambda self: '%s' % uuid.uuid4())
email_send = fields.Boolean('Send messages by email', default=False)
# multi users channel
channel_last_seen_partner_ids = fields.One2many('mail.channel.partner', 'channel_id', string='Last Seen')
channel_partner_ids = fields.Many2many('res.partner', 'mail_channel_partner', 'channel_id', 'partner_id', string='Listeners')
channel_message_ids = fields.Many2many('mail.message', 'mail_message_mail_channel_rel')
is_member = fields.Boolean('Is a member', compute='_compute_is_member')
# access
public = fields.Selection([
('public', 'Everyone'),
('private', 'Invited people only'),
('groups', 'Selected group of users')],
'Privacy', required=True, default='groups',
help='This group is visible by non members. Invisible groups can add members through the invite button.')
group_public_id = fields.Many2one('res.groups', string='Authorized Group',
default=lambda self: self.env.ref('base.group_user'))
group_ids = fields.Many2many(
'res.groups', rel='mail_channel_res_group_rel',
id1='mail_channel_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "
"Note that they will be able to manage their subscription manually "
"if necessary.")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Photo", default=_get_default_image, attachment=True,
help="This field holds the image used as photo for the group, limited to 1024x1024px.")
image_medium = fields.Binary('Medium-sized photo',
compute='_get_image', inverse='_set_image_medium', store=True, attachment=True,
help="Medium-sized photo of the group. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary('Small-sized photo',
compute='_get_image', inverse='_set_image_small', store=True, attachment=True,
help="Small-sized photo of the group. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically create new topics.")
@api.multi
def _compute_is_member(self):
memberships = self.env['mail.channel.partner'].sudo().search([
('channel_id', 'in', self.ids),
('partner_id', '=', self.env.user.partner_id.id),
])
membership_ids = memberships.mapped('channel_id')
for record in self:
record.is_member = record in membership_ids
@api.one
@api.depends('image')
def _get_image(self):
self.image_medium = tools.image_resize_image_medium(self.image)
self.image_small = tools.image_resize_image_small(self.image)
def _set_image_medium(self):
self.image = tools.image_resize_image_big(self.image_medium)
def _set_image_small(self):
self.image = tools.image_resize_image_big(self.image_small)
@api.model
def create(self, vals):
# Create channel and alias
channel = super(Channel, self.with_context(
alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True, mail_create_nosubscribe=True)
).create(vals)
channel.alias_id.write({"alias_force_thread_id": channel.id, 'alias_parent_thread_id': channel.id})
if vals.get('group_ids'):
channel._subscribe_users()
# make channel listen itself: posting on a channel notifies the channel
if not self._context.get('mail_channel_noautofollow'):
channel.message_subscribe(channel_ids=[channel.id])
return channel
@api.multi
def unlink(self):
aliases = self.mapped('alias_id')
# Delete mail.channel
try:
all_emp_group = self.env.ref('mail.channel_all_employees')
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in self:
raise UserError(_('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(Channel, self).unlink()
# Cascade-delete mail aliases as well, as they should not exist without the mail.channel.
aliases.sudo().unlink()
return res
@api.multi
def write(self, vals):
result = super(Channel, self).write(vals)
if vals.get('group_ids'):
self._subscribe_users()
return result
def _subscribe_users(self):
for mail_channel in self:
mail_channel.write({'channel_partner_ids': [(4, pid) for pid in mail_channel.mapped('group_ids').mapped('users').mapped('partner_id').ids]})
@api.multi
def action_follow(self):
self.ensure_one()
channel_partner = self.mapped('channel_last_seen_partner_ids').filtered(lambda cp: cp.partner_id == self.env.user.partner_id)
if not channel_partner:
return self.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': self.env.user.partner_id.id})]})
@api.multi
def action_unfollow(self):
partner_id = self.env.user.partner_id.id
channel_info = self.channel_info('unsubscribe')[0] # must be computed before leaving the channel (access rights)
result = self.write({'channel_partner_ids': [(3, partner_id)]})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', partner_id), channel_info)
if not self.email_send:
notification = _('<div class="o_mail_notification">left <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
# post 'channel left' message as root since the partner just unsubscribed from the channel
self.sudo().message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner_id)
return result
@api.multi
def _notification_group_recipients(self, message, recipients, done_ids, group_data):
""" All recipients of a message on a channel are considered as partners.
This means they will receive a minimal email, without a link to access
in the backend. Mailing lists should indeed send minimal emails to avoid
the noise. """
for recipient in recipients:
group_data['partner'] |= recipient
done_ids.add(recipient.id)
return super(Channel, self)._notification_group_recipients(message, recipients, done_ids, group_data)
@api.multi
def message_get_email_values(self, notif_mail=None):
self.ensure_one()
res = super(Channel, self).message_get_email_values(notif_mail=notif_mail)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if self.alias_domain and self.alias_name:
headers['List-Id'] = '%s.%s' % (self.alias_name, self.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (self.alias_name, self.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (self.name, self.alias_name, self.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
@api.multi
def message_get_recipient_values(self, notif_message=None, recipient_ids=None):
# real mailing list: multiple recipients (hidden by X-Forge-To)
if self.alias_domain and self.alias_name:
return {
'email_to': ','.join(formataddr((partner.name, partner.email)) for partner in self.env['res.partner'].sudo().browse(recipient_ids)),
'recipient_ids': [],
}
return super(Channel, self).message_get_recipient_values(notif_message=notif_message, recipient_ids=recipient_ids)
@api.multi
@api.returns('self', lambda value: value.id)
def message_post(self, body='', subject=None, message_type='notification', subtype=None, parent_id=False, attachments=None, content_subtype='html', **kwargs):
# auto pin 'direct_message' channel partner
self.filtered(lambda channel: channel.channel_type == 'chat').mapped('channel_last_seen_partner_ids').write({'is_pinned': True})
# apply shortcode (text only) subsitution
body = self.env['mail.shortcode'].apply_shortcode(body, shortcode_type='text')
message = super(Channel, self.with_context(mail_create_nosubscribe=True)).message_post(body=body, subject=subject, message_type=message_type, subtype=subtype, parent_id=parent_id, attachments=attachments, content_subtype=content_subtype, **kwargs)
return message
#------------------------------------------------------
# Instant Messaging API
#------------------------------------------------------
# A channel header should be broadcasted:
# - when adding user to channel (only to the new added partners)
# - when folding/minimizing a channel (only to the user making the action)
# A message should be broadcasted:
# - when a message is posted on a channel (to the channel, using _notify() method)
# Anonymous method
@api.multi
def _broadcast(self, partner_ids):
""" Broadcast the current channel header to the given partner ids
:param partner_ids : the partner to notify
"""
notifications = self._channel_channel_notifications(partner_ids)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_channel_notifications(self, partner_ids):
""" Generate the bus notifications of current channel for the given partner ids
:param partner_ids : the partner to send the current channel header
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
notifications = []
for partner in self.env['res.partner'].browse(partner_ids):
user_id = partner.user_ids and partner.user_ids[0] or False
if user_id:
for channel_info in self.sudo(user_id).channel_info():
notifications.append([(self._cr.dbname, 'res.partner', partner.id), channel_info])
return notifications
@api.multi
def _notify(self, message):
""" Broadcast the given message on the current channels.
Send the message on the Bus Channel (uuid for public mail.channel, and partner private bus channel (the tuple)).
A partner will receive only on message on its bus channel, even if this message belongs to multiple mail channel. Then 'channel_ids' field
of the received message indicates on wich mail channel the message should be displayed.
:param : mail.message to broadcast
"""
message.ensure_one()
notifications = self._channel_message_notifications(message)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_message_notifications(self, message):
""" Generate the bus notifications for the given message
:param message : the mail.message to sent
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
message_values = message.message_format()[0]
notifications = []
for channel in self:
notifications.append([(self._cr.dbname, 'mail.channel', channel.id), dict(message_values)])
# add uuid to allow anonymous to listen
if channel.public == 'public':
notifications.append([channel.uuid, dict(message_values)])
return notifications
@api.multi
def channel_info(self, extra_info = False):
""" Get the informations header for the current channels
:returns a list of channels values
:rtype : list(dict)
"""
channel_infos = []
partner_channels = self.env['mail.channel.partner']
# find the channel partner state, if logged user
if self.env.user and self.env.user.partner_id:
partner_channels = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', 'in', self.ids)])
# for each channel, build the information header and include the logged partner information
for channel in self:
info = {
'id': channel.id,
'name': channel.name,
'uuid': channel.uuid,
'state': 'open',
'is_minimized': False,
'channel_type': channel.channel_type,
'public': channel.public,
'mass_mailing': channel.email_send,
}
if extra_info:
info['info'] = extra_info
# add the partner for 'direct mesage' channel
if channel.channel_type == 'chat':
info['direct_partner'] = (channel.sudo()
.with_context(active_test=False)
.channel_partner_ids
.filtered(lambda p: p.id != self.env.user.partner_id.id)
.read(['id', 'name', 'im_status']))
# add user session state, if available and if user is logged
if partner_channels.ids:
partner_channel = partner_channels.filtered(lambda c: channel.id == c.channel_id.id)
if len(partner_channel) >= 1:
partner_channel = partner_channel[0]
info['state'] = partner_channel.fold_state or 'open'
info['is_minimized'] = partner_channel.is_minimized
info['seen_message_id'] = partner_channel.seen_message_id.id
# add needaction and unread counter, since the user is logged
info['message_needaction_counter'] = channel.message_needaction_counter
info['message_unread_counter'] = channel.message_unread_counter
channel_infos.append(info)
return channel_infos
@api.multi
def channel_fetch_message(self, last_id=False, limit=20):
""" Return message values of the current channel.
:param last_id : last message id to start the research
:param limit : maximum number of messages to fetch
:returns list of messages values
:rtype : list(dict)
"""
self.ensure_one()
domain = [("channel_ids", "in", self.ids)]
if last_id:
domain.append(("id", "<", last_id))
return self.env['mail.message'].message_fetch(domain=domain, limit=limit)
# User methods
@api.model
def channel_get(self, partners_to, pin=True):
""" Get the canonical private channel between some partners, create it if needed.
To reuse an old channel (conversation), this one must be private, and contains
only the given partners.
:param partners_to : list of res.partner ids to add to the conversation
:param pin : True if getting the channel should pin it for the current user
:returns a channel header, or False if the users_to was False
:rtype : dict
"""
if partners_to:
partners_to.append(self.env.user.partner_id.id)
# determine type according to the number of partner in the channel
self.env.cr.execute("""
SELECT P.channel_id as channel_id
FROM mail_channel C, mail_channel_partner P
WHERE P.channel_id = C.id
AND C.public LIKE 'private'
AND P.partner_id IN %s
AND channel_type LIKE 'chat'
GROUP BY P.channel_id
HAVING COUNT(P.partner_id) = %s
""", (tuple(partners_to), len(partners_to),))
result = self.env.cr.dictfetchall()
if result:
# get the existing channel between the given partners
channel = self.browse(result[0].get('channel_id'))
# pin up the channel for the current partner
if pin:
self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)]).write({'is_pinned': True})
else:
# create a new one
channel = self.create({
'channel_partner_ids': [(4, partner_id) for partner_id in partners_to],
'public': 'private',
'channel_type': 'chat',
'email_send': False,
'name': ', '.join(self.env['res.partner'].sudo().browse(partners_to).mapped('name')),
})
# broadcast the channel header to the other partner (not me)
channel._broadcast(partners_to)
return channel.channel_info()[0]
return False
@api.model
def channel_get_and_minimize(self, partners_to):
channel = self.channel_get(partners_to)
if channel:
self.channel_minimize(channel['uuid'])
return channel
@api.model
def channel_fold(self, uuid, state=None):
""" Update the fold_state of the given session. In order to syncronize web browser
tabs, the change will be broadcast to himself (the current user channel).
Note: the user need to be logged
:param state : the new status of the session for the current user.
"""
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
for session_state in self.env['mail.channel.partner'].search(domain):
if not state:
state = session_state.fold_state
if session_state.fold_state == 'open':
state = 'folded'
else:
state = 'open'
session_state.write({
'fold_state': state,
'is_minimized': bool(state != 'closed'),
})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), session_state.channel_id.channel_info()[0])
@api.model
def channel_minimize(self, uuid, minimized=True):
values = {
'fold_state': minimized and 'open' or 'closed',
'is_minimized': minimized
}
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
channel_partners = self.env['mail.channel.partner'].search(domain)
channel_partners.write(values)
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_partners.channel_id.channel_info()[0])
@api.model
def channel_pin(self, uuid, pinned=False):
# add the person in the channel, and pin it (or unpin it)
channel = self.search([('uuid', '=', uuid)])
channel_partners = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)])
if not pinned:
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel.channel_info('unsubscribe')[0])
if channel_partners:
channel_partners.write({'is_pinned': pinned})
@api.multi
def channel_seen(self):
self.ensure_one()
if self.channel_message_ids.ids:
last_message_id = self.channel_message_ids.ids[0] # zero is the index of the last message
self.env['mail.channel.partner'].search([('channel_id', 'in', self.ids), ('partner_id', '=', self.env.user.partner_id.id)]).write({'seen_message_id': last_message_id})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), {'info': 'channel_seen', 'id': self.id, 'last_message_id': last_message_id})
return last_message_id
@api.multi
def channel_invite(self, partner_ids):
""" Add the given partner_ids to the current channels and broadcast the channel header to them.
:param partner_ids : list of partner id to add
"""
partners = self.env['res.partner'].browse(partner_ids)
# add the partner
for channel in self:
partners_to_add = partners - channel.channel_partner_ids
channel.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': partner_id}) for partner_id in partners_to_add.ids]})
for partner in partners_to_add:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner.id)
# broadcast the channel header to the added partner
self._broadcast(partner_ids)
#------------------------------------------------------
# Instant Messaging View Specific (Slack Client Action)
#------------------------------------------------------
@api.model
def get_init_notifications(self):
""" Get unread messages and old messages received less than AWAY_TIMER
ago of minimized channel ONLY. This aims to set the minimized channel
when refreshing the page.
Note : the user need to be logged
"""
# get current user's minimzed channel
minimized_channels = self.env['mail.channel.partner'].search([('is_minimized', '=', True), ('partner_id', '=', self.env.user.partner_id.id)]).mapped('channel_id')
# get the message since the AWAY_TIMER
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
threshold = threshold.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('channel_ids', 'in', minimized_channels.ids), ('create_date', '>', threshold)]
# get the message since the last poll of the user
presence = self.env['bus.presence'].search([('user_id', '=', self._uid)], limit=1)
if presence:
domain.append(('create_date', '>', presence.last_poll))
# do the message search
message_values = self.env['mail.message'].message_fetch(domain=domain)
# create the notifications (channel infos first, then messages)
notifications = []
for channel_info in minimized_channels.channel_info():
notifications.append([(self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info])
for message_value in message_values:
for channel_id in message_value['channel_ids']:
if channel_id in minimized_channels.ids:
message_value['channel_ids'] = [channel_id]
notifications.append([(self._cr.dbname, 'mail.channel', channel_id), dict(message_value)])
return notifications
@api.model
def channel_fetch_slot(self):
""" Return the channels of the user grouped by 'slot' (channel, direct_message or private_group), and
the mapping between partner_id/channel_id for direct_message channels.
:returns dict : the grouped channels and the mapping
"""
values = {}
my_partner_id = self.env.user.partner_id.id
pinned_channels = self.env['mail.channel.partner'].search([('partner_id', '=', my_partner_id), ('is_pinned', '=', True)]).mapped('channel_id')
# get the group/public channels
values['channel_channel'] = self.search([('channel_type', '=', 'channel'), ('public', 'in', ['public', 'groups']), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
# get the pinned 'direct message' channel
direct_message_channels = self.search([('channel_type', '=', 'chat'), ('id', 'in', pinned_channels.ids)])
values['channel_direct_message'] = direct_message_channels.channel_info()
# get the private group
values['channel_private_group'] = self.search([('channel_type', '=', 'channel'), ('public', '=', 'private'), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
return values
@api.model
def channel_search_to_join(self, name=None, domain=None):
""" Return the channel info of the channel the current partner can join
:param name : the name of the researched channels
:param domain : the base domain of the research
:returns dict : channel dict
"""
if not domain:
domain = []
domain = expression.AND([
[('channel_type', '=', 'channel')],
[('channel_partner_ids', 'not in', [self.env.user.partner_id.id])],
[('public', '!=', 'private')],
domain
])
if name:
domain = expression.AND([domain, [('name', 'ilike', '%'+name+'%')]])
return self.search(domain).read(['name', 'public', 'uuid', 'channel_type'])
@api.multi
def channel_join_and_get_info(self):
self.ensure_one()
if self.channel_type == 'channel' and not self.email_send:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.action_follow()
channel_info = self.channel_info()[0]
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def channel_create(self, name, privacy='public'):
""" Create a channel and add the current partner, broadcast it (to make the user directly
listen to it when polling)
:param name : the name of the channel to create
:param privacy : privacy of the channel. Should be 'public' or 'private'.
:return dict : channel header
"""
# create the channel
new_channel = self.create({
'name': name,
'public': privacy,
'email_send': False,
'channel_partner_ids': [(4, self.env.user.partner_id.id)]
})
channel_info = new_channel.channel_info('creation')[0]
notification = _('<div class="o_mail_notification">created <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (new_channel.id, new_channel.name,)
new_channel.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def get_mention_suggestions(self, search, limit=8):
""" Return 'limit'-first channels' id, name and public fields such that the name matches a
'search' string. Exclude channels of type chat (DM), and private channels the current
user isn't registered to. """
domain = expression.AND([
[('name', 'ilike', search)],
[('channel_type', '=', 'channel')],
expression.OR([
[('public', '!=', 'private')],
[('channel_partner_ids', 'in', [self.env.user.partner_id.id])]
])
])
return self.search_read(domain, ['id', 'name', 'public'], limit=limit)
@api.model
def channel_fetch_listeners(self, uuid):
""" Return the id, name and email of partners listening to the given channel """
self._cr.execute("""
SELECT P.id, P.name, P.email
FROM mail_channel_partner CP
INNER JOIN res_partner P ON CP.partner_id = P.id
INNER JOIN mail_channel C ON CP.channel_id = C.id
WHERE C.uuid = %s""", (uuid,))
return self._cr.dictfetchall()
@api.multi
def channel_fetch_preview(self):
""" Return the last message of the given channels """
self._cr.execute("""
SELECT mail_channel_id AS id, MAX(mail_message_id) AS message_id
FROM mail_message_mail_channel_rel
WHERE mail_channel_id IN %s
GROUP BY mail_channel_id
""", (tuple(self.ids),))
channels_preview = dict((r['message_id'], r) for r in self._cr.dictfetchall())
last_messages = self.env['mail.message'].browse(channels_preview.keys()).message_format()
for message in last_messages:
channel = channels_preview[message['id']]
del(channel['message_id'])
channel['last_message'] = message
return channels_preview.values()
| agpl-3.0 |
381426068/MissionPlanner | Lib/site-packages/numpy/core/getlimits.py | 54 | 8734 | """ Machine limits for Float32 and Float64 and (long double) if available...
"""
__all__ = ['finfo','iinfo']
from machar import MachAr
import numeric
import numerictypes as ntypes
from numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0: a.shape = (1,)
return a
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
class finfo(object):
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
eps : floating point number of the appropriate type
The smallest representable number such that ``1.0 + eps != 1.0``.
epsneg : floating point number of the appropriate type
The smallest representable number such that ``1.0 - epsneg != 1.0``.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more detailed
information.
machep : int
The exponent that yields ``eps``.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there being
no leading 0's in the mantissa.
negep : int
The exponent that yields ``epsneg``.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of float
is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.
``10**-precision``.
tiny : floating point number of the appropriate type
The smallest-magnitude usable number.
Parameters
----------
dtype : floating point type, dtype, or instance
The kind of floating point data type to get information about.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
Notes
-----
For developers of NumPy: do not instantiate this at the module level. The
initial calculation of these parameters is expensive and negatively impacts
import times. These objects are cached, so calling ``finfo()`` repeatedly
inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError, "data type %r not inexact" % (dtype)
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
if dtype is ntypes.double:
itype = ntypes.int64
fmt = '%24.16e'
precname = 'double'
elif dtype is ntypes.single:
itype = ntypes.int32
fmt = '%15.7e'
precname = 'single'
elif dtype is ntypes.longdouble:
itype = ntypes.longlong
fmt = '%s'
precname = 'long double'
else:
raise ValueError, repr(dtype)
machar = MachAr(lambda v:array([v], dtype),
lambda v:_frz(v.astype(itype))[0],
lambda v:array(_frz(v)[0], dtype),
lambda v: fmt % array(_frz(v)[0], dtype),
'numpy %s precision floating point number' % precname)
for word in ['precision', 'iexp',
'maxexp','minexp','negep',
'machep']:
setattr(self,word,getattr(machar, word))
for word in ['tiny','resolution','epsneg']:
setattr(self,word,getattr(machar, word).flat[0])
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
precision=%(precision)3s resolution= %(_str_resolution)s
machep=%(machep)6s eps= %(_str_eps)s
negep =%(negep)6s epsneg= %(_str_epsneg)s
minexp=%(minexp)6s tiny= %(_str_tiny)s
maxexp=%(maxexp)6s max= %(_str_max)s
nexp =%(nexp)6s min= -max
---------------------------------------------------------------------
''' % self.__dict__
class iinfo:
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if not self.kind in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1L << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
min = property(min)
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1L << self.bits) - 1)
else:
val = int((1L << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
max = property(max)
def __str__(self):
"""String representation."""
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
min = %(min)s
max = %(max)s
---------------------------------------------------------------------
''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
if __name__ == '__main__':
f = finfo(ntypes.single)
print 'single epsilon:',f.eps
print 'single tiny:',f.tiny
f = finfo(ntypes.float)
print 'float epsilon:',f.eps
print 'float tiny:',f.tiny
f = finfo(ntypes.longfloat)
print 'longfloat epsilon:',f.eps
print 'longfloat tiny:',f.tiny
| gpl-3.0 |
alu042/edx-platform | openedx/core/lib/tests/test_courses.py | 13 | 2372 | """
Tests for functionality in openedx/core/lib/courses.py.
"""
import ddt
from django.test.utils import override_settings
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from ..courses import course_image_url
@ddt.ddt
class CourseImageTestCase(ModuleStoreTestCase):
"""Tests for course image URLs."""
def verify_url(self, expected_url, actual_url):
"""
Helper method for verifying the URL is as expected.
"""
if not expected_url.startswith("/"):
expected_url = "/" + expected_url
self.assertEquals(expected_url, actual_url)
def test_get_image_url(self):
"""Test image URL formatting."""
course = CourseFactory.create()
self.verify_url(
unicode(course.id.make_asset_key('asset', course.course_image)),
course_image_url(course)
)
def test_non_ascii_image_name(self):
""" Verify that non-ascii image names are cleaned """
course_image = u'before_\N{SNOWMAN}_after.jpg'
course = CourseFactory.create(course_image=course_image)
self.verify_url(
unicode(course.id.make_asset_key('asset', course_image.replace(u'\N{SNOWMAN}', '_'))),
course_image_url(course)
)
def test_spaces_in_image_name(self):
""" Verify that image names with spaces in them are cleaned """
course_image = u'before after.jpg'
course = CourseFactory.create(course_image=u'before after.jpg')
self.verify_url(
unicode(course.id.make_asset_key('asset', course_image.replace(" ", "_"))),
course_image_url(course)
)
@override_settings(DEFAULT_COURSE_ABOUT_IMAGE_URL='test.png')
@override_settings(STATIC_URL='static/')
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_empty_image_name(self, default_store):
"""
Verify that if a course has empty `course_image`, `course_image_url` returns
`DEFAULT_COURSE_ABOUT_IMAGE_URL` defined in the settings.
"""
course = CourseFactory.create(course_image='', default_store=default_store)
self.assertEquals(
'static/test.png',
course_image_url(course),
)
| agpl-3.0 |
ltilve/ChromiumGStreamerBackend | third_party/closure_linter/closure_linter/strict_test.py | 125 | 1964 | #!/usr/bin/env python
# Copyright 2013 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --strict.
Tests errors that can be thrown by gjslint when in strict mode.
"""
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
class StrictTest(unittest.TestCase):
"""Tests scenarios where strict generates warnings."""
def testUnclosedString(self):
"""Tests warnings are reported when nothing is disabled.
b/11450054.
"""
original = [
'bug = function() {',
' (\'foo\'\');',
'};',
'',
]
expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
errors.FILE_IN_BLOCK]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors):
"""Asserts that the error fixer corrects original to expected."""
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
pearsontechnology/st2contrib | packs/jira/actions/lib/base.py | 12 | 1048 | from jira import JIRA
# from st2actions.runners.pythonrunner import Action
__all__ = [
'BaseJiraAction'
]
class Action(object):
def __init__(self, config):
self.config = config
class BaseJiraAction(Action):
def __init__(self, config):
super(BaseJiraAction, self).__init__(config=config)
self._client = self._get_client()
def _get_client(self):
config = self.config
options = {'server': config['url']}
rsa_cert_file = config['rsa_cert_file']
rsa_key_content = self._get_file_content(file_path=rsa_cert_file)
oauth_creds = {
'access_token': config['oauth_token'],
'access_token_secret': config['oauth_secret'],
'consumer_key': config['consumer_key'],
'key_cert': rsa_key_content
}
client = JIRA(options=options, oauth=oauth_creds)
return client
def _get_file_content(self, file_path):
with open(file_path, 'r') as fp:
content = fp.read()
return content
| apache-2.0 |
blueskyll/condor | condor-8.4.3/src/condor_contrib/condor_pigeon/src/condor_pigeon_client/skype_linux_tools/Skype4Py/Languages/et.py | 23 | 6754 | apiAttachAvailable = u'Leitud'
apiAttachNotAvailable = u'Kadunud'
apiAttachPendingAuthorization = u'Autoriseerimine'
apiAttachRefused = u'Keeldumine'
apiAttachSuccess = u'\xdchendatud'
apiAttachUnknown = u'M\xe4\xe4ramata'
budDeletedFriend = u'Kustutatud S\xf5ber'
budFriend = u'S\xf5ber'
budNeverBeenFriend = u'Pole Olnud S\xf5ber'
budPendingAuthorization = u'Ootab Autoriseerimist'
budUnknown = u'M\xe4\xe4ramata'
cfrBlockedByRecipient = u'Blokeeritud vastuv\xf5tja poolt'
cfrMiscError = u'M\xe4\xe4ramata viga'
cfrNoCommonCodec = u'pole \xfchist kodekit'
cfrNoProxyFound = u'Ei leitud l\xfc\xfcsi'
cfrNotAuthorizedByRecipient = u'Helistaja pole autoriseeritud'
cfrRecipientNotFriend = u'K\xf5ne vastuv\xf5tja pole s\xf5ber'
cfrRemoteDeviceError = u'Probleem teise poole heliseadmega'
cfrSessionTerminated = u'\xfchendus katkestatud'
cfrSoundIOError = u'Heli viga'
cfrSoundRecordingError = u'Helisalvestuse viga'
cfrUnknown = u'M\xe4\xe4ramata'
cfrUserDoesNotExist = u'Kasutajat v\xf5i numbrit pole olemas'
cfrUserIsOffline = u"Ta ei ole Skype'i sisse logitud"
chsAllCalls = u'Vana Dialoog'
chsDialog = u'Dialoog'
chsIncomingCalls = u'Multiaksept'
chsLegacyDialog = u'Vana Dialoog'
chsMissedCalls = u'Dialoog'
chsMultiNeedAccept = u'Multiaksept'
chsMultiSubscribed = u'Multiteenus'
chsOutgoingCalls = u'Multiteenus'
chsUnknown = u'M\xe4\xe4ramata'
chsUnsubscribed = u'Tellimata'
clsBusy = u'H\xf5ivatud'
clsCancelled = u'Katkestatud'
clsEarlyMedia = u'M\xe4ngib Muusikat'
clsFailed = u'K\xf5ne kahjuks eba\xf5nnestus!'
clsFinished = u'L\xf5petatud'
clsInProgress = u'Aktiivne k\xf5ne'
clsLocalHold = u'Peatatud Lokaalselt'
clsMissed = u'Vastamata k\xf5ne'
clsOnHold = u'Ootel'
clsRefused = u'Keeldutud'
clsRemoteHold = u'Peatatud Eemal'
clsRinging = u'Heliseb'
clsRouting = u'Ruutimine'
clsTransferred = u'M\xe4\xe4ramata'
clsTransferring = u'M\xe4\xe4ramata'
clsUnknown = u'M\xe4\xe4ramata'
clsUnplaced = u'Pole Helistatud'
clsVoicemailBufferingGreeting = u'Tervituse Laadimine'
clsVoicemailCancelled = u'Katkestatud'
clsVoicemailFailed = u'K\xd5nepost eba\xf5nnestus'
clsVoicemailPlayingGreeting = u'Tervituse M\xe4ngimine'
clsVoicemailRecording = u'K\xf5neposti salvestamine'
clsVoicemailSent = u'Saadetud'
clsVoicemailUploading = u'\xdcleslaadimine'
cltIncomingP2P = u'P2P Sisse'
cltIncomingPSTN = u'PSTN Sisse'
cltOutgoingP2P = u'P2P V\xe4lja'
cltOutgoingPSTN = u'PSTN V\xe4lja'
cltUnknown = u'M\xe4\xe4ramata'
cmeAddedMembers = u'Lisas Osalejad'
cmeCreatedChatWith = u'Tegi Jututoa'
cmeEmoted = u'M\xe4\xe4ramata'
cmeLeft = u'Lahkus'
cmeSaid = u'\xdctles'
cmeSawMembers = u'N\xe4gi Osalejaid'
cmeSetTopic = u'Tegi Pealkirja'
cmeUnknown = u'M\xe4\xe4ramata'
cmsRead = u'Loetud'
cmsReceived = u'Vastuv\xf5etud'
cmsSending = u'Saadab...'
cmsSent = u'Saadetud'
cmsUnknown = u'M\xe4\xe4ramata'
conConnecting = u'\xdchendan'
conOffline = u'V\xe4ljas'
conOnline = u'Sees'
conPausing = u'Paus'
conUnknown = u'M\xe4\xe4ramata'
cusAway = u'Eemal'
cusDoNotDisturb = u'H\xf5ivatud'
cusInvisible = u'N\xe4htamatu'
cusLoggedOut = u'V\xe4ljas'
cusNotAvailable = u'Kaua eemal'
cusOffline = u'V\xe4ljas'
cusOnline = u'Sees'
cusSkypeMe = u'Skype Me'
cusUnknown = u'M\xe4\xe4ramata'
cvsBothEnabled = u'Video Saatmine ja Vastuv\xf5tmine'
cvsNone = u'Video Puudub'
cvsReceiveEnabled = u'Video Vastuv\xf5tmine'
cvsSendEnabled = u'Video Saatmine'
cvsUnknown = u''
grpAllFriends = u'K\xf5ik S\xf5brad'
grpAllUsers = u'K\xf5ik Kasutajad'
grpCustomGroup = u'Kasutaja Grupp'
grpOnlineFriends = u'\xdchendatud S\xf5brad'
grpPendingAuthorizationFriends = u'Autoriseerimise Ootel'
grpProposedSharedGroup = u'Pakutud Jagatud Grupp'
grpRecentlyContactedUsers = u'Hiljutised S\xf5brad'
grpSharedGroup = u'Jagatud Grupp'
grpSkypeFriends = u'Skype S\xf5brad'
grpSkypeOutFriends = u'SkypeOut S\xf5brad'
grpUngroupedFriends = u'Grupeerimata'
grpUnknown = u'M\xe4\xe4ramata'
grpUsersAuthorizedByMe = u'Minu Poolt Autoriseeritud'
grpUsersBlockedByMe = u'Minu Poolt Blokeeritud'
grpUsersWaitingMyAuthorization = u'Ootavad Minu Luba'
leaAddDeclined = u'Tagasil\xfckatud'
leaAddedNotAuthorized = u'Pole Autoriseeritud'
leaAdderNotFriend = u'Pole S\xf5ber'
leaUnknown = u'M\xe4\xe4ramata'
leaUnsubscribe = u'Eemaldus'
leaUserIncapable = u'Kasutaja V\xf5imalused Piiratud'
leaUserNotFound = u'Kasutajat Ei Leitud'
olsAway = u'Eemal'
olsDoNotDisturb = u'H\xf5ivatud'
olsNotAvailable = u'Kaua eemal'
olsOffline = u'V\xe4ljas'
olsOnline = u'Sees'
olsSkypeMe = u'Skype Me'
olsSkypeOut = u'SkypeOut'
olsUnknown = u'M\xe4\xe4ramata'
smsMessageStatusComposing = u'Composing'
smsMessageStatusDelivered = u'Delivered'
smsMessageStatusFailed = u'Failed'
smsMessageStatusRead = u'Read'
smsMessageStatusReceived = u'Received'
smsMessageStatusSendingToServer = u'Sending to Server'
smsMessageStatusSentToServer = u'Sent to Server'
smsMessageStatusSomeTargetsFailed = u'Some Targets Failed'
smsMessageStatusUnknown = u'Unknown'
smsMessageTypeCCRequest = u'Confirmation Code Request'
smsMessageTypeCCSubmit = u'Confirmation Code Submit'
smsMessageTypeIncoming = u'Incoming'
smsMessageTypeOutgoing = u'Outgoing'
smsMessageTypeUnknown = u'Unknown'
smsTargetStatusAcceptable = u'Acceptable'
smsTargetStatusAnalyzing = u'Analyzing'
smsTargetStatusDeliveryFailed = u'Delivery Failed'
smsTargetStatusDeliveryPending = u'Delivery Pending'
smsTargetStatusDeliverySuccessful = u'Delivery Successful'
smsTargetStatusNotRoutable = u'Not Routable'
smsTargetStatusUndefined = u'Undefined'
smsTargetStatusUnknown = u'Unknown'
usexFemale = u'Naine'
usexMale = u'Mees'
usexUnknown = u'M\xe4\xe4ramata'
vmrConnectError = u'\xdchenduse Viga'
vmrFileReadError = u'Viga Lugemisel'
vmrFileWriteError = u'Viga Kirjutamisel'
vmrMiscError = u'M\xe4\xe4ramata Viga'
vmrNoError = u'Korras'
vmrNoPrivilege = u'Pole K\xf5neposti Privileegi'
vmrNoVoicemail = u'Pole Sellist K\xf5neposti'
vmrPlaybackError = u'Viga Esitamisel'
vmrRecordingError = u'Viga Salvestamisel'
vmrUnknown = u'M\xe4\xe4ramata'
vmsBlank = u'T\xfchi'
vmsBuffering = u'Kogumine'
vmsDeleting = u'Kustutamine'
vmsDownloading = u'Allalaadimine'
vmsFailed = u'Eba\xf5nnestus'
vmsNotDownloaded = u'Pole Laaditud'
vmsPlayed = u'Esitatud'
vmsPlaying = u'Esitamine'
vmsRecorded = u'Salvestatud'
vmsRecording = u'K\xf5neposti salvestamine'
vmsUnknown = u'M\xe4\xe4ramata'
vmsUnplayed = u'M\xe4ngimata'
vmsUploaded = u'\xdcleslaaditud'
vmsUploading = u'\xdcleslaadimine'
vmtCustomGreeting = u'Kasutaja Tervitus'
vmtDefaultGreeting = u'Vaikimisi Tervitus'
vmtIncoming = u'sissetulev k\xf5nepost'
vmtOutgoing = u'V\xe4ljaminev'
vmtUnknown = u'M\xe4\xe4ramata'
vssAvailable = u'Olemas'
vssNotAvailable = u'Puudub'
vssPaused = u'Peatatud'
vssRejected = u'Tagasil\xfckatud'
vssRunning = u'Kestev'
vssStarting = u'Algab'
vssStopping = u'Peatamine'
vssUnknown = u'M\xe4\xe4ramata'
| gpl-2.0 |
dajhorn/ps2binutils | gdb/testsuite/gdb.python/py-typeprint.py | 46 | 1128 | # Copyright (C) 2012-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
class Recognizer(object):
def __init__(self):
self.enabled = True
def recognize(self, type_obj):
if type_obj.tag == 'basic_string':
return 'string'
return None
class StringTypePrinter(object):
def __init__(self):
self.name = 'string'
self.enabled = True
def instantiate(self):
return Recognizer()
gdb.type_printers.append(StringTypePrinter())
| gpl-2.0 |
guorendong/iridium-browser-ubuntu | tools/boilerplate.py | 87 | 2132 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create files with copyright boilerplate and header include guards.
Usage: tools/boilerplate.py path/to/file.{h,cc}
"""
from datetime import date
import os
import os.path
import sys
LINES = [
'Copyright %d The Chromium Authors. All rights reserved.' %
date.today().year,
'Use of this source code is governed by a BSD-style license that can be',
'found in the LICENSE file.'
]
EXTENSIONS_TO_COMMENTS = {
'h': '//',
'cc': '//',
'mm': '//',
'js': '//',
'py': '#'
}
def _GetHeader(filename):
_, ext = os.path.splitext(filename)
ext = ext[1:]
comment = EXTENSIONS_TO_COMMENTS[ext] + ' '
return '\n'.join([comment + line for line in LINES])
def _CppHeader(filename):
guard = filename.replace('/', '_').replace('.', '_').upper() + '_'
return '\n'.join([
'',
'#ifndef ' + guard,
'#define ' + guard,
'',
'#endif // ' + guard,
''
])
def _CppImplementation(filename):
base, _ = os.path.splitext(filename)
include = '#include "' + base + '.h"'
return '\n'.join(['', include])
def _CreateFile(filename):
contents = _GetHeader(filename) + '\n'
if filename.endswith('.h'):
contents += _CppHeader(filename)
elif filename.endswith('.cc') or filename.endswith('.mm'):
contents += _CppImplementation(filename)
fd = open(filename, 'w')
fd.write(contents)
fd.close()
def Main():
files = sys.argv[1:]
if len(files) < 1:
print >> sys.stderr, 'Usage: boilerplate.py path/to/file.h path/to/file.cc'
return 1
# Perform checks first so that the entire operation is atomic.
for f in files:
_, ext = os.path.splitext(f)
if not ext[1:] in EXTENSIONS_TO_COMMENTS:
print >> sys.stderr, 'Unknown file type for %s' % f
return 2
if os.path.exists(f):
print >> sys.stderr, 'A file at path %s already exists' % f
return 2
for f in files:
_CreateFile(f)
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
tavaresdong/cs61a-projects | projects/hog/tests/utils.py | 3 | 13140 |
import hog
'J06F2o_c3W6o3_4d_191j4'
def w_1Fh30(N894_4hH_):
oA61__qf1 = {}
def P_S_7(*t_IY_0):
if (t_IY_0 not in oA61__qf1):
oA61__qf1[t_IY_0] = N894_4hH_(*t_IY_0)
return oA61__qf1[t_IY_0]
return P_S_7
def G17T(T4_xg9):
return ((((-61 + -75) + (66 + 25)) + ((-34 + -10) + (135 + -45))) - T4_xg9)
@w_1Fh30
def yx53d__(EDP9Y5_1P):
if (EDP9Y5_1P < (((-5 + 41) + (99 + -84)) + ((-50 + -4) + (-46 + 51)))):
return False
lj51207 = (((-129 + 32) + (-14 + 83)) + ((128 + -64) + (0 + -34)))
while ((lj51207 * lj51207) <= EDP9Y5_1P):
if ((EDP9Y5_1P % lj51207) == int((((-1.094295505045491 + 0.7613674982733807) + (0.153461372059721 + 0.7390648355218271)) * int(((0.14553767642704973 + 0.44049382935637826) * 0))))):
return False
lj51207 += (((128 + 31) + (-77 + -12)) + ((-187 + 88) + (-10 + 40)))
return True
@w_1Fh30
def f_h_(EDP9Y5_1P):
EDP9Y5_1P += (((-193 + 97) + (10 + 67)) + ((79 + -22) + (-69 + 32)))
while (not yx53d__(EDP9Y5_1P)):
EDP9Y5_1P += (((-49 + -47) + (129 + -48)) + ((41 + -78) + (16 + 37)))
return EDP9Y5_1P
def C47Q(Z8_4, CK0_s):
(Z8_4, CK0_s) = ((Z8_4 % (((334 + -69) + (-188 + 91)) + ((35 + -92) + (30 + -41)))), (CK0_s % (((218 + -76) + (-11 + 4)) + ((-103 + 40) + (-6 + 34)))))
return (Z8_4 == ((CK0_s // (((201 + -45) + (11 + -77)) + ((-27 + -75) + (-32 + 54)))) + ((CK0_s % (((-44 + 40) + (2 + 53)) + ((-91 + 64) + (-86 + 72)))) * (((7 + 63) + (-12 + -11)) + ((-68 + 96) + (-144 + 79))))))
def fG_7bDt8y(Kec95w):
'M4b45b_ASe68L5j_1lyq'
x28FV5I = sum([Kec95w[r0_1Ez_70] for r0_1Ez_70 in Kec95w])
for r0_1Ez_70 in Kec95w:
Kec95w[r0_1Ez_70] /= x28FV5I
def a__b4(M4KEJ9hn6, opponent_score):
return (((-150 + 71) + (-26 + 57)) + ((-4 + 27) + (-26 + 56)))
def wk6__t06(ky7J088, oK_0QBz_, T4_xg9):
return (ky7J088, oK_0QBz_, T4_xg9)
def H_55_(Js_f3X_):
return Js_f3X_[int((((-0.06895390620080366 + 0.23009749398538937) + (0.39836778995341304 + 0.40935770363685375)) * int((0.8568716050522642 * 0))))]
def bG16F(Js_f3X_):
return Js_f3X_[(((115 + -28) + (-66 + 32)) + ((-118 + 89) + (48 + -71)))]
def h_7_(Js_f3X_):
return Js_f3X_[(((-77 + -19) + (74 + -8)) + ((-5 + -49) + (53 + 33)))]
o_5Hw0 = int((((-0.6730609828505822 + 0.6510254571263283) + (0.5734714882188517 + 0.3947612869402677)) * int((0.4408764248870518 * 0))))
Qa0F__b = (- (((49 + 61) + (-130 + 34)) + ((-119 + 54) + (63 + -11))))
def ty4N3gtk(M4KEJ9hn6, H5_f__):
'lT_12h2bO_UR4__62_W_Em'
if ((M4KEJ9hn6 == Qa0F__b) or (H5_f__ == (((-44 + 46) + (-169 + 74)) + ((50 + 65) + (54 + -75))))):
return Qa0F__b
return (M4KEJ9hn6 + H5_f__)
def mYF00H(Js_f3X_, B_5544ZP1, jLh9d_O):
'P4FD_85_3L7_h22g799wUe8P4_2'
(Z8_4, CK0_s) = H_55_(Js_f3X_)
G87T = B_5544ZP1
T4_xg9 = h_7_(Js_f3X_)
if (B_5544ZP1 == Qa0F__b):
if (T4_xg9 == int((0.9546400643636819 * 0))):
CK0_s += jLh9d_O
else:
Z8_4 += jLh9d_O
G87T = int((((-0.33191170072113707 + 0.013157764274433492) + (-0.38770112705675186 + 0.7069694593628609)) * int(((-0.3678625894988602 + 0.864169177687381) * int((0.8482320142031017 * 0))))))
if (jLh9d_O == int((((-0.5454843748671404 + 0.06791389812235493) + (0.5162800052902131 + 0.09545992515631974)) * 0))):
if (T4_xg9 == int(((0.32761668848172676 + 0.578317831604538) * int((0.7082471480573831 * 0))))):
G87T = ((((-94 + 15) + (23 + -12)) + ((39 + -35) + (66 + -1))) + max((CK0_s % (((-86 + -25) + (74 + -46)) + ((35 + 51) + (44 + -37)))), (CK0_s // (((-54 + 15) + (82 + -98)) + ((-24 + 7) + (1 + 81))))))
else:
G87T = ((((-142 + 83) + (35 + -49)) + ((170 + -76) + (-86 + 66))) + max((Z8_4 % (((0 + -40) + (-74 + 98)) + ((50 + -79) + (118 + -63)))), (Z8_4 // (((194 + -94) + (-95 + 64)) + ((11 + -87) + (-13 + 30))))))
if yx53d__(G87T):
G87T = f_h_(G87T)
if (T4_xg9 == int(((0.42983549458075576 + 0.4682067678219757) * int((0.16032315669548503 * 0))))):
Z8_4 += G87T
else:
CK0_s += G87T
if C47Q(Z8_4, CK0_s):
(Z8_4, CK0_s) = (CK0_s, Z8_4)
return wk6__t06((Z8_4, CK0_s), bG16F(Js_f3X_), G17T(T4_xg9))
def b5t0(Js_f3X_):
'RN_x8gZ0_8h3zQ51i__W'
if ((sum(H_55_(Js_f3X_)) % (((26 + -47) + (109 + -34)) + ((-190 + 68) + (64 + 11)))) == int((((0.269067864970701 + 0.32259921447958395) + (-0.06819576870039001 + 0.33042353697347804)) * int(((-0.3268373704349704 + 0.962121454186533) * int((0.2499635171703265 * 0))))))):
return qF_Q4Cy
return J_Q_co3
def sJQ1(Hi_3):
'cW5_t2_30u2NL9_5QMB__5_7x19'
fG_7bDt8y(Hi_3)
return tuple(Hi_3.items())
J_Q_co3 = sJQ1({(((69 + 0) + (-54 + 7)) + ((0 + 72) + (-146 + 53))): (((-13 + 75) + (-40 + 13)) + ((60 + -58) + (-2 + -34))), (((-118 + -23) + (19 + 34)) + ((115 + 42) + (33 + -100))): (((-65 + 12) + (-40 + 43)) + ((85 + -96) + (137 + -75))), (((3 + -100) + (18 + -14)) + ((42 + 15) + (77 + -38))): (((75 + -79) + (81 + -47)) + ((-16 + 85) + (-56 + -42))), (((-214 + 71) + (124 + -34)) + ((4 + 15) + (135 + -97))): (((0 + 65) + (-8 + -53)) + ((-160 + 63) + (44 + 50))), (((11 + -99) + (59 + -16)) + ((7 + -21) + (36 + 28))): (((-72 + 54) + (-43 + -28)) + ((158 + 31) + (-120 + 21))), (((94 + -77) + (11 + 39)) + ((-72 + 89) + (-40 + -38))): (((16 + 90) + (-51 + -12)) + ((-188 + 98) + (101 + -53)))})
qF_Q4Cy = sJQ1({(((139 + 18) + (-142 + 64)) + ((-26 + -5) + (-106 + 59))): (((19 + -67) + (-46 + 84)) + ((-87 + 57) + (88 + -47))), (((-24 + 72) + (110 + -95)) + ((-152 + 17) + (124 + -50))): (((-38 + 9) + (-7 + 39)) + ((-41 + 83) + (-133 + 89))), (((-156 + 79) + (-21 + 41)) + ((232 + -92) + (-85 + 5))): (((58 + -50) + (-31 + 95)) + ((-115 + 14) + (114 + -84))), (((64 + 12) + (109 + -97)) + ((-227 + 45) + (181 + -83))): (((87 + -78) + (-60 + -3)) + ((122 + -3) + (-36 + -28)))})
@w_1Fh30
def pR869H(jLh9d_O, Hi_3):
'Z8A839k40Tz4_3_y9__6_304m88'
if (jLh9d_O == int((((-0.47772052324243386 + 0.30405462845221687) + (0.16575098267168764 + 0.3466600405965793)) * int(((0.338052492913136 + 0.4391511750119873) * 0))))):
return {o_5Hw0: (((59 + -84) + (101 + -16)) + ((-155 + -4) + (41 + 59)))}
dAbmO68m = pR869H((jLh9d_O - (((170 + -61) + (-40 + 27)) + ((-156 + 98) + (18 + -55)))), Hi_3)
r_2_ = {}
for (M4KEJ9hn6, v1b9062) in dAbmO68m.items():
for (H5_f__, f1H4mS) in Hi_3:
LD6_hY_ = ty4N3gtk(M4KEJ9hn6, H5_f__)
if (LD6_hY_ not in r_2_):
r_2_[LD6_hY_] = int(((0.2464584668127343 + 0.22883518523710322) * int((0.009911594198682927 * 0))))
r_2_[LD6_hY_] += (v1b9062 * f1H4mS)
fG_7bDt8y(r_2_)
return r_2_
def FKeM(c_8S5, Ia_X4s, E72708RVT=100):
'r343Wly4n6EU9U__N2O8_1k1_nmy'
q7g36YI8_ = M8t871C12
o743 = (c_8S5, Ia_X4s)
try:
return ((q7g36YI8_(wk6__t06((int((((-0.40158367373450565 + 0.5859739394473586) + (0.47934562629302513 + 0.3318195623167507)) * int(((0.586536985595748 + 0.0678137843299782) * 0)))), int((((-1.2738435749491268 + 0.9300296003610368) + (0.10925156187943774 + 0.6335950740379009)) * int((0.37761627045324764 * 0))))), o743, int(((0.4980686232689854 + 0.4239490482244015) * int((0.7944665247632448 * 0))))), E72708RVT) + q7g36YI8_(wk6__t06((int((0.36884748195910766 * 0)), int((((-0.8727551603504358 + 0.30883275015652334) + (0.8000015822093133 + 0.033095766788204584)) * int((0.15583395864785343 * 0))))), o743, (((-13 + 20) + (-95 + 49)) + ((54 + -95) + (155 + -74)))), E72708RVT)) / (((-93 + 81) + (4 + 55)) + ((-52 + -49) + (32 + 24))))
except Rtz669k as C_4o__JY:
T4_xg9 = h_7_(C_4o__JY.Js_f3X_)
m_K9 = bG16F(C_4o__JY.Js_f3X_)[T4_xg9]
print(C_4o__JY)
return ((((-0.22376849507732122 + 0.080301263296089) + (-0.40237421751922 + 0.7176222269207142)) + ((-1.137274150681609 + 0.9871974624798541) + (0.43843958026221164 + 0.5398563303192814))) * T4_xg9)
def teFn_8():
import datetime
D__0164 = datetime.datetime.now()
print(final_win_rate())
print((datetime.datetime.now() - D__0164))
def final_win_rate():
'X1_23QK82E1_8fM19w_v5_'
return FKeM(hog.final_strategy, a__b4)
def qS4Sz_n(Js_f3X_):
'j717y_ne06LT5z_7u2k_t2vCB'
(Z8_4, CK0_s) = H_55_(Js_f3X_)
try:
if (h_7_(Js_f3X_) == int(((0.5667886402036755 + 0.14312304003390575) * int((0.652482389682734 * 0))))):
jLh9d_O = bG16F(Js_f3X_)[int((((-0.09870906305021765 + 0.1862150960850123) + (-0.1938366069433911 + 0.7346547431044592)) * int(((0.01822552400723798 + 0.27551740382479806) * 0))))](Z8_4, CK0_s)
else:
jLh9d_O = bG16F(Js_f3X_)[(((152 + -90) + (-37 + 68)) + ((-173 + 91) + (39 + -49)))](CK0_s, Z8_4)
except Exception as C_4o__JY:
raise O18v8_t(Js_f3X_, C_4o__JY)
if (jLh9d_O not in range((((101 + 28) + (43 + -63)) + ((9 + -18) + (-151 + 62))))):
raise kG_w__L(Js_f3X_, jLh9d_O)
return jLh9d_O
def q7g36YI8_(Js_f3X_, E72708RVT=100):
'n36ft_z6Nj7_x9tE9qdyE'
(Z8_4, CK0_s) = H_55_(Js_f3X_)
if (min(Z8_4, CK0_s) >= E72708RVT):
return (((-0.34500682416839124 + 0.5370863398177268) + (-0.8653298646102134 + 0.9197823946321956)) + ((-1.3836038864321192 + 0.7445205970180926) + (0.16987042113511297 + 0.7226808226075958)))
elif (Z8_4 >= E72708RVT):
return (((-41 + 66) + (39 + -99)) + ((21 + -42) + (93 + -36)))
elif (CK0_s >= E72708RVT):
return 0
jLh9d_O = qS4Sz_n(Js_f3X_)
return C6y0Pzs1G(Js_f3X_, E72708RVT, jLh9d_O)
@w_1Fh30
def C6y0Pzs1G(Js_f3X_, E72708RVT, jLh9d_O):
'oOV1Z_063J0k_64FhAD_5UqW_zd2'
a96__5d_ = int(((-0.013700830960395427 + 0.9148298499489498) * int((0.6429395418099988 * 0))))
Z661_ = pR869H(jLh9d_O, b5t0(Js_f3X_))
for (LD6_hY_, s_80xu7_E) in Z661_.items():
a96__5d_ += (q7g36YI8_(mYF00H(Js_f3X_, LD6_hY_, jLh9d_O), E72708RVT) * s_80xu7_E)
return a96__5d_
K98__ = {}
def M8t871C12(Js_f3X_, E72708RVT):
'd_72537q_suO69C1uX9__d0'
x_3_ = Js_f3X_
JA249 = [Js_f3X_]
while JA249:
Js_f3X_ = JA249[(- (((-42 + 95) + (-99 + 70)) + ((-20 + 57) + (-65 + 5))))]
if (Js_f3X_ not in K98__):
(Z8_4, CK0_s) = H_55_(Js_f3X_)
try:
jLh9d_O = qS4Sz_n(Js_f3X_)
except Rtz669k as C_4o__JY:
for Js_f3X_ in JA249:
K98__[Js_f3X_] = C_4o__JY
raise C_4o__JY
Z661_ = pR869H(jLh9d_O, b5t0(Js_f3X_))
AEG7_O16 = [(mYF00H(Js_f3X_, LD6_hY_, jLh9d_O), x_h_Ur8) for (LD6_hY_, x_h_Ur8) in Z661_.items()]
for rP____ in AEG7_O16:
rP____ = rP____[int((((-0.565842222819887 + 0.16174852962683217) + (0.2090994414071361 + 0.3812745287972982)) * int(((-0.6903858917254032 + 0.8272905272300173) * int((0.25269866781807293 * 0))))))]
if ((rP____ not in K98__) and (max(H_55_(rP____)) < E72708RVT)):
JA249.append(rP____)
K98__[Js_f3X_] = AEG7_O16
continue
elif isinstance(K98__[Js_f3X_], list):
gn00_M5 = int(((-0.6506678467432071 + 0.9085470778385724) * int((0.30080868237529723 * 0))))
for (rP____, x_h_Ur8) in K98__[Js_f3X_]:
ky7J088 = H_55_(rP____)
if (min(ky7J088) >= E72708RVT):
gn00_M5 += ((((0.074736461318054 + 0.0987453311389368) + (-0.10502305211088026 + 0.2502917138961679)) + ((-0.27979497140209575 + 0.25660810263164413) + (-0.11599511250883032 + 0.3204315270370035))) * x_h_Ur8)
elif (ky7J088[int(((0.4077216191538975 + 0.4904910286076165) * int((0.25080661181547204 * 0))))] >= E72708RVT):
gn00_M5 += x_h_Ur8
elif (ky7J088[(((128 + -35) + (-138 + 84)) + ((-84 + -4) + (23 + 27)))] >= E72708RVT):
pass
else:
gn00_M5 += (K98__[rP____] * x_h_Ur8)
K98__[Js_f3X_] = gn00_M5
JA249.pop()
if isinstance(K98__[x_3_], Rtz669k):
raise K98__[x_3_]
return K98__[x_3_]
class Rtz669k(Exception):
def __init__(niD2, Js_f3X_):
niD2.Js_f3X_ = Js_f3X_
class kG_w__L(Rtz669k):
def __init__(niD2, Js_f3X_, jLh9d_O):
niD2.Js_f3X_ = Js_f3X_
niD2.jLh9d_O = jLh9d_O
def __str__(niD2):
return ((('Evalua' + 'tion ') + ('of Pla' + 'yer ')) + (('{' + "0}'s {1}{2} returned {3}, an invalid numbe") + ('r of rol' + 'ls'))).format(h_7_(niD2.Js_f3X_), bG16F(niD2.Js_f3X_)[h_7_(niD2.Js_f3X_)].__name__, H_55_(niD2.Js_f3X_)[::((- (((-97 + -29) + (186 + -93)) + ((41 + 73) + (-116 + 36)))) ** h_7_(niD2.Js_f3X_))], niD2.jLh9d_O)
class O18v8_t(Rtz669k):
def __init__(niD2, Js_f3X_, g7_0_7c):
niD2.Js_f3X_ = Js_f3X_
niD2.g7_0_7c = g7_0_7c
def __str__(niD2):
return (('' + ('E' + 'v')) + (('aluat' + 'ion of Player') + (" {0}'s {1}{2} raised the following err" + 'or:\n {3}: {4}'))).format(h_7_(niD2.Js_f3X_), bG16F(niD2.Js_f3X_)[h_7_(niD2.Js_f3X_)].__name__, H_55_(niD2.Js_f3X_)[::((- (((8 + -47) + (79 + 10)) + ((-12 + -44) + (65 + -58)))) ** h_7_(niD2.Js_f3X_))], type(niD2.g7_0_7c).__name__, niD2.g7_0_7c)
| mit |
egraba/vbox_openbsd | VirtualBox-5.0.0/src/VBox/ValidationKit/testmanager/webui/wuiadmintestcase.py | 2 | 10377 | # -*- coding: utf-8 -*-
# $Id: wuiadmintestcase.py $
"""
Test Manager WUI - Test Cases.
"""
__copyright__ = \
"""
Copyright (C) 2012-2015 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 100880 $"
# Validation Kit imports.
from common import utils, webutils;
from testmanager.webui.wuicontentbase import WuiFormContentBase, WuiListContentBase, WuiTmLink, WuiRawHtml;
from testmanager.core.db import isDbTimestampInfinity;
from testmanager.core.testcase import TestCaseDataEx, TestCaseData, TestCaseDependencyLogic;
from testmanager.core.globalresource import GlobalResourceData, GlobalResourceLogic;
class WuiTestCaseList(WuiListContentBase):
"""
WUI test case list content generator.
"""
def __init__(self, aoEntries, iPage, cItemsPerPage, tsEffective, fnDPrint, oDisp):
WuiListContentBase.__init__(self, aoEntries, iPage, cItemsPerPage, tsEffective,
sTitle = 'Test Cases', fnDPrint = fnDPrint, oDisp = oDisp);
self._asColumnHeaders = \
[
'Name', 'Active', 'Default Timeout', 'Base Command / Variations', 'Validation Kit Files',
'Test Case Prereqs', 'Global Resources', 'Actions'
];
self._asColumnAttribs = \
[
'', '', '', '', '',
'valign="top"', 'valign="top"', 'align="center"'
];
def _formatListEntry(self, iEntry):
oEntry = self._aoEntries[iEntry];
from testmanager.webui.wuiadmin import WuiAdmin;
aoRet = \
[
oEntry.sName.replace('-', u'\u2011'),
'Enabled' if oEntry.fEnabled else 'Disabled',
utils.formatIntervalSeconds(oEntry.cSecTimeout),
];
# Base command and variations.
fNoGang = True;
for oVar in oEntry.aoTestCaseArgs:
if oVar.cGangMembers > 1:
fNoGang = False;
break;
sHtml = ' <table class="tminnertbl" width=100%>\n' \
' <tr>\n' \
' <th>';
if not fNoGang:
sHtml += '<th>Gang Size</th>';
sHtml += 'Timeout</th><th>Additional Arguments</b></th>\n' \
' </tr>\n'
for oTmp in oEntry.aoTestCaseArgs:
sHtml += '<tr>';
if not fNoGang:
sHtml += '<td>%d</td>' % (oTmp.cGangMembers,)
sHtml += '<td>%s</td><td>%s</td></tr>\n' \
% ( utils.formatIntervalSeconds(oTmp.cSecTimeout) if oTmp.cSecTimeout is not None else 'Default',
webutils.escapeElem(oTmp.sArgs.replace('-', u'\u2011')),)
sHtml += ' </table>'
aoRet.append([oEntry.sBaseCmd.replace('-', u'\u2011'), WuiRawHtml(sHtml)]);
# Next.
aoRet += [ oEntry.sValidationKitZips if oEntry.sValidationKitZips is not None else '', ];
# Show dependency on other testcases
if oEntry.aoDepTestCases not in (None, []):
sHtml = ' <ul class="tmshowall">\n'
for sTmp in oEntry.aoDepTestCases:
sHtml += ' <li class="tmshowall"><a href="%s?%s=%s&%s=%s">%s</a></li>\n' \
% (WuiAdmin.ksScriptName,
WuiAdmin.ksParamAction, WuiAdmin.ksActionTestCaseEdit,
TestCaseData.ksParam_idTestCase, sTmp.idTestCase,
sTmp.sName)
sHtml += ' </ul>\n'
else:
sHtml = '<ul class="tmshowall"><li class="tmshowall">None</li></ul>\n'
aoRet.append(WuiRawHtml(sHtml));
# Show dependency on global resources
if oEntry.aoDepGlobalResources not in (None, []):
sHtml = ' <ul class="tmshowall">\n'
for sTmp in oEntry.aoDepGlobalResources:
sHtml += ' <li class="tmshowall"><a href="%s?%s=%s&%s=%s">%s</a></li>\n' \
% (WuiAdmin.ksScriptName,
WuiAdmin.ksParamAction, WuiAdmin.ksActionGlobalRsrcShowEdit,
GlobalResourceData.ksParam_idGlobalRsrc, sTmp.idGlobalRsrc,
sTmp.sName)
sHtml += ' </ul>\n'
else:
sHtml = '<ul class="tmshowall"><li class="tmshowall">None</li></ul>\n'
aoRet.append(WuiRawHtml(sHtml));
# Show actions that can be taken.
aoActions = [ WuiTmLink('Details', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestCaseDetails,
TestCaseData.ksParam_idGenTestCase: oEntry.idGenTestCase }), ];
if isDbTimestampInfinity(oEntry.tsExpire):
aoActions.append(WuiTmLink('Modify', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestCaseEdit,
TestCaseData.ksParam_idTestCase: oEntry.idTestCase }));
aoActions.append(WuiTmLink('Clone', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestCaseClone,
TestCaseData.ksParam_idGenTestCase: oEntry.idGenTestCase }));
if isDbTimestampInfinity(oEntry.tsExpire):
aoActions.append(WuiTmLink('Remove', WuiAdmin.ksScriptName,
{ WuiAdmin.ksParamAction: WuiAdmin.ksActionTestCaseDoRemove,
TestCaseData.ksParam_idTestCase: oEntry.idTestCase },
sConfirm = 'Are you sure you want to remove test case #%d?' % (oEntry.idTestCase,)));
aoRet.append(aoActions);
return aoRet;
class WuiTestCase(WuiFormContentBase):
"""
WUI user account content generator.
"""
def __init__(self, oData, sMode, oDisp):
assert isinstance(oData, TestCaseDataEx);
if sMode == WuiFormContentBase.ksMode_Add:
sTitle = 'New Test Case';
elif sMode == WuiFormContentBase.ksMode_Edit:
sTitle = 'Edit Test Case - %s (#%s)' % (oData.sName, oData.idTestCase);
else:
assert sMode == WuiFormContentBase.ksMode_Show;
sTitle = 'Test Case - %s (#%s)' % (oData.sName, oData.idTestCase);
WuiFormContentBase.__init__(self, oData, sMode, 'TestCase', oDisp, sTitle);
# Read additional bits form the DB.
oDepLogic = TestCaseDependencyLogic(oDisp.getDb());
self._aoAllTestCases = oDepLogic.getApplicableDepTestCaseData(-1 if oData.idTestCase is None else oData.idTestCase);
self._aoAllGlobalRsrcs = GlobalResourceLogic(oDisp.getDb()).getAll();
def _populateForm(self, oForm, oData):
oForm.addIntRO (TestCaseData.ksParam_idTestCase, oData.idTestCase, 'Test Case ID')
oForm.addTimestampRO(TestCaseData.ksParam_tsEffective, oData.tsEffective, 'Last changed')
oForm.addTimestampRO(TestCaseData.ksParam_tsExpire, oData.tsExpire, 'Expires (excl)')
oForm.addIntRO (TestCaseData.ksParam_uidAuthor, oData.uidAuthor, 'Changed by UID')
oForm.addIntRO (TestCaseData.ksParam_idGenTestCase, oData.idGenTestCase, 'Test Case generation ID')
oForm.addText (TestCaseData.ksParam_sName, oData.sName, 'Name')
oForm.addText (TestCaseData.ksParam_sDescription, oData.sDescription, 'Description')
oForm.addCheckBox (TestCaseData.ksParam_fEnabled, oData.fEnabled, 'Enabled')
oForm.addLong (TestCaseData.ksParam_cSecTimeout,
utils.formatIntervalSeconds2(oData.cSecTimeout), 'Default timeout')
oForm.addWideText (TestCaseData.ksParam_sTestBoxReqExpr, oData.sTestBoxReqExpr, 'TestBox requirements (python)');
oForm.addWideText (TestCaseData.ksParam_sBuildReqExpr, oData.sBuildReqExpr, 'Build requirement (python)');
oForm.addWideText (TestCaseData.ksParam_sBaseCmd, oData.sBaseCmd, 'Base command')
oForm.addText (TestCaseData.ksParam_sValidationKitZips, oData.sValidationKitZips, 'Test suite files')
oForm.addListOfTestCaseArgs(TestCaseDataEx.ksParam_aoTestCaseArgs, oData.aoTestCaseArgs, 'Argument variations')
aoTestCaseDeps = [];
for oTestCase in self._aoAllTestCases:
if oTestCase.idTestCase == oData.idTestCase:
continue;
fSelected = False;
for oDep in oData.aoDepTestCases:
if oDep.idTestCase == oTestCase.idTestCase:
fSelected = True;
break;
aoTestCaseDeps.append([oTestCase.idTestCase, fSelected, oTestCase.sName]);
oForm.addListOfTestCases(TestCaseDataEx.ksParam_aoDepTestCases, aoTestCaseDeps, 'Depends on test cases')
aoGlobalResrcDeps = [];
for oGlobalRsrc in self._aoAllGlobalRsrcs:
fSelected = False;
for oDep in oData.aoDepGlobalResources:
if oDep.idGlobalRsrc == oGlobalRsrc.idGlobalRsrc:
fSelected = True;
break;
aoGlobalResrcDeps.append([oGlobalRsrc.idGlobalRsrc, fSelected, oGlobalRsrc.sName]);
oForm.addListOfResources(TestCaseDataEx.ksParam_aoDepGlobalResources, aoGlobalResrcDeps, 'Depends on resources')
oForm.addSubmit();
return True;
| mit |
googleapis/python-bigquery | google/cloud/bigquery/magics/magics.py | 1 | 28344 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IPython Magics
To use these magics, you must first register them. Run the ``%load_ext`` magic
in a Jupyter notebook cell.
.. code::
%load_ext google.cloud.bigquery
This makes the ``%%bigquery`` magic available.
.. function:: %%bigquery
IPython cell magic to run a query and display the result as a DataFrame
.. code-block:: python
%%bigquery [<destination_var>] [--project <project>] [--use_legacy_sql]
[--verbose] [--params <params>]
<query>
Parameters:
* ``<destination_var>`` (Optional[line argument]):
variable to store the query results. The results are not displayed if
this parameter is used. If an error occurs during the query execution,
the corresponding ``QueryJob`` instance (if available) is stored in
the variable instead.
* ``--destination_table`` (Optional[line argument]):
A dataset and table to store the query results. If table does not exists,
it will be created. If table already exists, its data will be overwritten.
Variable should be in a format <dataset_id>.<table_id>.
* ``--project <project>`` (Optional[line argument]):
Project to use for running the query. Defaults to the context
:attr:`~google.cloud.bigquery.magics.Context.project`.
* ``--use_bqstorage_api`` (Optional[line argument]):
[Deprecated] Not used anymore, as BigQuery Storage API is used by default.
* ``--use_rest_api`` (Optional[line argument]):
Use the BigQuery REST API instead of the Storage API.
* ``--use_legacy_sql`` (Optional[line argument]):
Runs the query using Legacy SQL syntax. Defaults to Standard SQL if
this argument not used.
* ``--verbose`` (Optional[line argument]):
If this flag is used, information including the query job ID and the
amount of time for the query to complete will not be cleared after the
query is finished. By default, this information will be displayed but
will be cleared after the query is finished.
* ``--params <params>`` (Optional[line argument]):
If present, the argument following the ``--params`` flag must be
either:
* :class:`str` - A JSON string representation of a dictionary in the
format ``{"param_name": "param_value"}`` (ex. ``{"num": 17}``). Use
of the parameter in the query should be indicated with
``@param_name``. See ``In[5]`` in the Examples section below.
* :class:`dict` reference - A reference to a ``dict`` in the format
``{"param_name": "param_value"}``, where the value types must be JSON
serializable. The variable reference is indicated by a ``$`` before
the variable name (ex. ``$my_dict_var``). See ``In[6]`` and ``In[7]``
in the Examples section below.
* ``<query>`` (required, cell argument):
SQL query to run. If the query does not contain any whitespace (aside
from leading and trailing whitespace), it is assumed to represent a
fully-qualified table ID, and the latter's data will be fetched.
Returns:
A :class:`pandas.DataFrame` with the query results.
.. note::
All queries run using this magic will run using the context
:attr:`~google.cloud.bigquery.magics.Context.credentials`.
Examples:
The following examples can be run in an IPython notebook after loading
the bigquery IPython extension (see ``In[1]``) and setting up
Application Default Credentials.
.. code-block:: none
In [1]: %load_ext google.cloud.bigquery
In [2]: %%bigquery
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Out[2]: name count
...: -------------------
...: 0 James 4987296
...: 1 John 4866302
...: 2 Robert 4738204
In [3]: %%bigquery df --project my-alternate-project --verbose
...: SELECT name, SUM(number) as count
...: FROM `bigquery-public-data.usa_names.usa_1910_current`
...: WHERE gender = 'F'
...: GROUP BY name
...: ORDER BY count DESC
...: LIMIT 3
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 2.61s
Query complete after 2.92s
In [4]: df
Out[4]: name count
...: ----------------------
...: 0 Mary 3736239
...: 1 Patricia 1568495
...: 2 Elizabeth 1519946
In [5]: %%bigquery --params {"num": 17}
...: SELECT @num AS num
Out[5]: num
...: -------
...: 0 17
In [6]: params = {"num": 17}
In [7]: %%bigquery --params $params
...: SELECT @num AS num
Out[7]: num
...: -------
...: 0 17
"""
from __future__ import print_function
import re
import ast
import copy
import functools
import sys
import time
import warnings
from concurrent import futures
try:
import IPython
from IPython import display
from IPython.core import magic_arguments
except ImportError: # pragma: NO COVER
raise ImportError("This module can only be loaded in IPython.")
from google.api_core import client_info
from google.api_core import client_options
from google.api_core.exceptions import NotFound
import google.auth
from google.cloud import bigquery
import google.cloud.bigquery.dataset
from google.cloud.bigquery.dbapi import _helpers
from google.cloud.bigquery.magics import line_arg_parser as lap
IPYTHON_USER_AGENT = "ipython-{}".format(IPython.__version__)
class Context(object):
"""Storage for objects to be used throughout an IPython notebook session.
A Context object is initialized when the ``magics`` module is imported,
and can be found at ``google.cloud.bigquery.magics.context``.
"""
def __init__(self):
self._credentials = None
self._project = None
self._connection = None
self._default_query_job_config = bigquery.QueryJobConfig()
self._bigquery_client_options = client_options.ClientOptions()
self._bqstorage_client_options = client_options.ClientOptions()
self._progress_bar_type = "tqdm"
@property
def credentials(self):
"""google.auth.credentials.Credentials: Credentials to use for queries
performed through IPython magics.
Note:
These credentials do not need to be explicitly defined if you are
using Application Default Credentials. If you are not using
Application Default Credentials, manually construct a
:class:`google.auth.credentials.Credentials` object and set it as
the context credentials as demonstrated in the example below. See
`auth docs`_ for more information on obtaining credentials.
Example:
Manually setting the context credentials:
>>> from google.cloud.bigquery import magics
>>> from google.oauth2 import service_account
>>> credentials = (service_account
... .Credentials.from_service_account_file(
... '/path/to/key.json'))
>>> magics.context.credentials = credentials
.. _auth docs: http://google-auth.readthedocs.io
/en/latest/user-guide.html#obtaining-credentials
"""
if self._credentials is None:
self._credentials, _ = google.auth.default()
return self._credentials
@credentials.setter
def credentials(self, value):
self._credentials = value
@property
def project(self):
"""str: Default project to use for queries performed through IPython
magics.
Note:
The project does not need to be explicitly defined if you have an
environment default project set. If you do not have a default
project set in your environment, manually assign the project as
demonstrated in the example below.
Example:
Manually setting the context project:
>>> from google.cloud.bigquery import magics
>>> magics.context.project = 'my-project'
"""
if self._project is None:
_, self._project = google.auth.default()
return self._project
@project.setter
def project(self, value):
self._project = value
@property
def bigquery_client_options(self):
"""google.api_core.client_options.ClientOptions: client options to be
used through IPython magics.
Note::
The client options do not need to be explicitly defined if no
special network connections are required. Normally you would be
using the https://bigquery.googleapis.com/ end point.
Example:
Manually setting the endpoint:
>>> from google.cloud.bigquery import magics
>>> client_options = {}
>>> client_options['api_endpoint'] = "https://some.special.url"
>>> magics.context.bigquery_client_options = client_options
"""
return self._bigquery_client_options
@bigquery_client_options.setter
def bigquery_client_options(self, value):
self._bigquery_client_options = value
@property
def bqstorage_client_options(self):
"""google.api_core.client_options.ClientOptions: client options to be
used through IPython magics for the storage client.
Note::
The client options do not need to be explicitly defined if no
special network connections are required. Normally you would be
using the https://bigquerystorage.googleapis.com/ end point.
Example:
Manually setting the endpoint:
>>> from google.cloud.bigquery import magics
>>> client_options = {}
>>> client_options['api_endpoint'] = "https://some.special.url"
>>> magics.context.bqstorage_client_options = client_options
"""
return self._bqstorage_client_options
@bqstorage_client_options.setter
def bqstorage_client_options(self, value):
self._bqstorage_client_options = value
@property
def default_query_job_config(self):
"""google.cloud.bigquery.job.QueryJobConfig: Default job
configuration for queries.
The context's :class:`~google.cloud.bigquery.job.QueryJobConfig` is
used for queries. Some properties can be overridden with arguments to
the magics.
Example:
Manually setting the default value for ``maximum_bytes_billed``
to 100 MB:
>>> from google.cloud.bigquery import magics
>>> magics.context.default_query_job_config.maximum_bytes_billed = 100000000
"""
return self._default_query_job_config
@default_query_job_config.setter
def default_query_job_config(self, value):
self._default_query_job_config = value
@property
def progress_bar_type(self):
"""str: Default progress bar type to use to display progress bar while
executing queries through IPython magics.
Note::
Install the ``tqdm`` package to use this feature.
Example:
Manually setting the progress_bar_type:
>>> from google.cloud.bigquery import magics
>>> magics.context.progress_bar_type = "tqdm"
"""
return self._progress_bar_type
@progress_bar_type.setter
def progress_bar_type(self, value):
self._progress_bar_type = value
context = Context()
def _handle_error(error, destination_var=None):
"""Process a query execution error.
Args:
error (Exception):
An exception that ocurred during the query exectution.
destination_var (Optional[str]):
The name of the IPython session variable to store the query job.
"""
if destination_var:
query_job = getattr(error, "query_job", None)
if query_job is not None:
IPython.get_ipython().push({destination_var: query_job})
else:
# this is the case when previewing table rows by providing just
# table ID to cell magic
print(
"Could not save output to variable '{}'.".format(destination_var),
file=sys.stderr,
)
print("\nERROR:\n", str(error), file=sys.stderr)
def _run_query(client, query, job_config=None):
"""Runs a query while printing status updates
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
query (str):
SQL query to be executed. Defaults to the standard SQL dialect.
Use the ``job_config`` parameter to change dialects.
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Extra configuration options for the job.
Returns:
google.cloud.bigquery.job.QueryJob: the query job created
Example:
>>> client = bigquery.Client()
>>> _run_query(client, "SELECT 17")
Executing query with job ID: bf633912-af2c-4780-b568-5d868058632b
Query executing: 1.66s
Query complete after 2.07s
'bf633912-af2c-4780-b568-5d868058632b'
"""
start_time = time.time()
query_job = client.query(query, job_config=job_config)
if job_config and job_config.dry_run:
return query_job
print("Executing query with job ID: {}".format(query_job.job_id))
while True:
print("\rQuery executing: {:0.2f}s".format(time.time() - start_time), end="")
try:
query_job.result(timeout=0.5)
break
except futures.TimeoutError:
continue
print("\nQuery complete after {:0.2f}s".format(time.time() - start_time))
return query_job
def _create_dataset_if_necessary(client, dataset_id):
"""Create a dataset in the current project if it doesn't exist.
Args:
client (google.cloud.bigquery.client.Client):
Client to bundle configuration needed for API requests.
dataset_id (str):
Dataset id.
"""
dataset_reference = bigquery.dataset.DatasetReference(client.project, dataset_id)
try:
dataset = client.get_dataset(dataset_reference)
return
except NotFound:
pass
dataset = bigquery.Dataset(dataset_reference)
dataset.location = client.location
print("Creating dataset: {}".format(dataset_id))
dataset = client.create_dataset(dataset)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"destination_var",
nargs="?",
help=("If provided, save the output to this variable instead of displaying it."),
)
@magic_arguments.argument(
"--destination_table",
type=str,
default=None,
help=(
"If provided, save the output of the query to a new BigQuery table. "
"Variable should be in a format <dataset_id>.<table_id>. "
"If table does not exists, it will be created. "
"If table already exists, its data will be overwritten."
),
)
@magic_arguments.argument(
"--project",
type=str,
default=None,
help=("Project to use for executing this query. Defaults to the context project."),
)
@magic_arguments.argument(
"--max_results",
default=None,
help=(
"Maximum number of rows in dataframe returned from executing the query."
"Defaults to returning all rows."
),
)
@magic_arguments.argument(
"--maximum_bytes_billed",
default=None,
help=(
"maximum_bytes_billed to use for executing this query. Defaults to "
"the context default_query_job_config.maximum_bytes_billed."
),
)
@magic_arguments.argument(
"--dry_run",
action="store_true",
default=False,
help=(
"Sets query to be a dry run to estimate costs. "
"Defaults to executing the query instead of dry run if this argument is not used."
),
)
@magic_arguments.argument(
"--use_legacy_sql",
action="store_true",
default=False,
help=(
"Sets query to use Legacy SQL instead of Standard SQL. Defaults to "
"Standard SQL if this argument is not used."
),
)
@magic_arguments.argument(
"--bigquery_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquery.googlepis.com. Defaults to this "
"option's value in the context bigquery_client_options."
),
)
@magic_arguments.argument(
"--bqstorage_api_endpoint",
type=str,
default=None,
help=(
"The desired API endpoint, e.g., bigquerystorage.googlepis.com. Defaults to "
"this option's value in the context bqstorage_client_options."
),
)
@magic_arguments.argument(
"--use_bqstorage_api",
action="store_true",
default=None,
help=(
"[Deprecated] The BigQuery Storage API is already used by default to "
"download large query results, and this option has no effect. "
"If you want to switch to the classic REST API instead, use the "
"--use_rest_api option."
),
)
@magic_arguments.argument(
"--use_rest_api",
action="store_true",
default=False,
help=(
"Use the classic REST API instead of the BigQuery Storage API to "
"download query results."
),
)
@magic_arguments.argument(
"--verbose",
action="store_true",
default=False,
help=(
"If set, print verbose output, including the query job ID and the "
"amount of time for the query to finish. By default, this "
"information will be displayed as the query runs, but will be "
"cleared after the query is finished."
),
)
@magic_arguments.argument(
"--params",
nargs="+",
default=None,
help=(
"Parameters to format the query string. If present, the --params "
"flag should be followed by a string representation of a dictionary "
"in the format {'param_name': 'param_value'} (ex. {\"num\": 17}), "
"or a reference to a dictionary in the same format. The dictionary "
"reference can be made by including a '$' before the variable "
"name (ex. $my_dict_var)."
),
)
@magic_arguments.argument(
"--progress_bar_type",
type=str,
default=None,
help=(
"Sets progress bar type to display a progress bar while executing the query."
"Defaults to use tqdm. Install the ``tqdm`` package to use this feature."
),
)
def _cell_magic(line, query):
"""Underlying function for bigquery cell magic
Note:
This function contains the underlying logic for the 'bigquery' cell
magic. This function is not meant to be called directly.
Args:
line (str): "%%bigquery" followed by arguments as required
query (str): SQL query to run
Returns:
pandas.DataFrame: the query results.
"""
# The built-in parser does not recognize Python structures such as dicts, thus
# we extract the "--params" option and inteprpret it separately.
try:
params_option_value, rest_of_args = _split_args_line(line)
except lap.exceptions.QueryParamsParseError as exc:
rebranded_error = SyntaxError(
"--params is not a correctly formatted JSON string or a JSON "
"serializable dictionary"
)
raise rebranded_error from exc
except lap.exceptions.DuplicateQueryParamsError as exc:
rebranded_error = ValueError("Duplicate --params option.")
raise rebranded_error from exc
except lap.exceptions.ParseError as exc:
rebranded_error = ValueError(
"Unrecognized input, are option values correct? "
"Error details: {}".format(exc.args[0])
)
raise rebranded_error from exc
args = magic_arguments.parse_argstring(_cell_magic, rest_of_args)
if args.use_bqstorage_api is not None:
warnings.warn(
"Deprecated option --use_bqstorage_api, the BigQuery "
"Storage API is already used by default.",
category=DeprecationWarning,
)
use_bqstorage_api = not args.use_rest_api
params = []
if params_option_value:
# A non-existing params variable is not expanded and ends up in the input
# in its raw form, e.g. "$query_params".
if params_option_value.startswith("$"):
msg = 'Parameter expansion failed, undefined variable "{}".'.format(
params_option_value[1:]
)
raise NameError(msg)
params = _helpers.to_query_parameters(ast.literal_eval(params_option_value), {})
project = args.project or context.project
bigquery_client_options = copy.deepcopy(context.bigquery_client_options)
if args.bigquery_api_endpoint:
if isinstance(bigquery_client_options, dict):
bigquery_client_options["api_endpoint"] = args.bigquery_api_endpoint
else:
bigquery_client_options.api_endpoint = args.bigquery_api_endpoint
client = bigquery.Client(
project=project,
credentials=context.credentials,
default_query_job_config=context.default_query_job_config,
client_info=client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
client_options=bigquery_client_options,
)
if context._connection:
client._connection = context._connection
bqstorage_client_options = copy.deepcopy(context.bqstorage_client_options)
if args.bqstorage_api_endpoint:
if isinstance(bqstorage_client_options, dict):
bqstorage_client_options["api_endpoint"] = args.bqstorage_api_endpoint
else:
bqstorage_client_options.api_endpoint = args.bqstorage_api_endpoint
bqstorage_client = _make_bqstorage_client(
client, use_bqstorage_api, bqstorage_client_options,
)
close_transports = functools.partial(_close_transports, client, bqstorage_client)
try:
if args.max_results:
max_results = int(args.max_results)
else:
max_results = None
query = query.strip()
if not query:
error = ValueError("Query is missing.")
_handle_error(error, args.destination_var)
return
# Any query that does not contain whitespace (aside from leading and trailing whitespace)
# is assumed to be a table id
if not re.search(r"\s", query):
try:
rows = client.list_rows(query, max_results=max_results)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
result = rows.to_dataframe(bqstorage_client=bqstorage_client)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
return
else:
return result
job_config = bigquery.job.QueryJobConfig()
job_config.query_parameters = params
job_config.use_legacy_sql = args.use_legacy_sql
job_config.dry_run = args.dry_run
if args.destination_table:
split = args.destination_table.split(".")
if len(split) != 2:
raise ValueError(
"--destination_table should be in a <dataset_id>.<table_id> format."
)
dataset_id, table_id = split
job_config.allow_large_results = True
dataset_ref = bigquery.dataset.DatasetReference(client.project, dataset_id)
destination_table_ref = dataset_ref.table(table_id)
job_config.destination = destination_table_ref
job_config.create_disposition = "CREATE_IF_NEEDED"
job_config.write_disposition = "WRITE_TRUNCATE"
_create_dataset_if_necessary(client, dataset_id)
if args.maximum_bytes_billed == "None":
job_config.maximum_bytes_billed = 0
elif args.maximum_bytes_billed is not None:
value = int(args.maximum_bytes_billed)
job_config.maximum_bytes_billed = value
try:
query_job = _run_query(client, query, job_config=job_config)
except Exception as ex:
_handle_error(ex, args.destination_var)
return
if not args.verbose:
display.clear_output()
if args.dry_run and args.destination_var:
IPython.get_ipython().push({args.destination_var: query_job})
return
elif args.dry_run:
print(
"Query validated. This query will process {} bytes.".format(
query_job.total_bytes_processed
)
)
return query_job
progress_bar = context.progress_bar_type or args.progress_bar_type
if max_results:
result = query_job.result(max_results=max_results).to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
else:
result = query_job.to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type=progress_bar
)
if args.destination_var:
IPython.get_ipython().push({args.destination_var: result})
else:
return result
finally:
close_transports()
def _split_args_line(line):
"""Split out the --params option value from the input line arguments.
Args:
line (str): The line arguments passed to the cell magic.
Returns:
Tuple[str, str]
"""
lexer = lap.Lexer(line)
scanner = lap.Parser(lexer)
tree = scanner.input_line()
extractor = lap.QueryParamsExtractor()
params_option_value, rest_of_args = extractor.visit(tree)
return params_option_value, rest_of_args
def _make_bqstorage_client(client, use_bqstorage_api, client_options):
if not use_bqstorage_api:
return None
try:
from google.cloud import bigquery_storage # noqa: F401
except ImportError as err:
customized_error = ImportError(
"The default BigQuery Storage API client cannot be used, install "
"the missing google-cloud-bigquery-storage and pyarrow packages "
"to use it. Alternatively, use the classic REST API by specifying "
"the --use_rest_api magic option."
)
raise customized_error from err
try:
from google.api_core.gapic_v1 import client_info as gapic_client_info
except ImportError as err:
customized_error = ImportError(
"Install the grpcio package to use the BigQuery Storage API."
)
raise customized_error from err
return client._ensure_bqstorage_client(
client_options=client_options,
client_info=gapic_client_info.ClientInfo(user_agent=IPYTHON_USER_AGENT),
)
def _close_transports(client, bqstorage_client):
"""Close the given clients' underlying transport channels.
Closing the transport is needed to release system resources, namely open
sockets.
Args:
client (:class:`~google.cloud.bigquery.client.Client`):
bqstorage_client
(Optional[:class:`~google.cloud.bigquery_storage.BigQueryReadClient`]):
A client for the BigQuery Storage API.
"""
client.close()
if bqstorage_client is not None:
bqstorage_client._transport.grpc_channel.close()
| apache-2.0 |
defance/edx-platform | lms/lib/courseware_search/test/test_lms_filter_generator.py | 43 | 5770 | """
Tests for the lms_filter_generator
"""
from mock import patch, Mock
from xmodule.modulestore.tests.factories import ItemFactory, CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from lms.lib.courseware_search.lms_filter_generator import LmsSearchFilterGenerator
class LmsSearchFilterGeneratorTestCase(ModuleStoreTestCase):
""" Test case class to test search result processor """
def build_courses(self):
"""
Build up a course tree with multiple test courses
"""
self.courses = [
CourseFactory.create(
org='ElasticsearchFiltering',
course='ES101F',
run='test_run',
display_name='Elasticsearch Filtering test course',
),
CourseFactory.create(
org='FilterTest',
course='FT101',
run='test_run',
display_name='FilterTest test course',
)
]
self.chapter = ItemFactory.create(
parent_location=self.courses[0].location,
category='chapter',
display_name="Week 1",
publish_item=True,
)
self.chapter2 = ItemFactory.create(
parent_location=self.courses[1].location,
category='chapter',
display_name="Week 1",
publish_item=True,
)
def setUp(self):
super(LmsSearchFilterGeneratorTestCase, self).setUp()
self.build_courses()
self.user = UserFactory.create(username="jack", email="jack@fake.edx.org", password='test')
for course in self.courses:
CourseEnrollment.enroll(self.user, course.location.course_key)
def test_course_id_not_provided(self):
"""
Tests that we get the list of IDs of courses the user is enrolled in when the course ID is null or not provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertTrue('start_date' in filter_dictionary)
self.assertIn(unicode(self.courses[0].id), field_dictionary['course'])
self.assertIn(unicode(self.courses[1].id), field_dictionary['course'])
def test_course_id_provided(self):
"""
Tests that we get the course ID when the course ID is provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters(
user=self.user,
course_id=unicode(self.courses[0].id)
)
self.assertTrue('start_date' in filter_dictionary)
self.assertEqual(unicode(self.courses[0].id), field_dictionary['course'])
def test_user_not_provided(self):
"""
Tests that we get empty list of courses in case the user is not provided
"""
field_dictionary, filter_dictionary, _ = LmsSearchFilterGenerator.generate_field_filters()
self.assertTrue('start_date' in filter_dictionary)
self.assertEqual(0, len(field_dictionary['course']))
def test_excludes_microsite(self):
"""
By default there is the test microsite and the microsite with logistration
to exclude
"""
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertIn('org', exclude_dictionary)
exclude_orgs = exclude_dictionary['org']
self.assertEqual(2, len(exclude_orgs))
self.assertEqual('LogistrationX', exclude_orgs[0])
self.assertEqual('TestMicrositeX', exclude_orgs[1])
@patch('microsite_configuration.microsite.get_all_orgs', Mock(return_value=[]))
def test_excludes_no_microsite(self):
""" Test when no microsite is present - nothing to exclude """
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
@patch('microsite_configuration.microsite.get_value', Mock(return_value='TestMicrositeX'))
def test_excludes_microsite_within(self):
field_dictionary, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
self.assertIn('org', field_dictionary)
self.assertEqual('TestMicrositeX', field_dictionary['org'])
@patch(
'microsite_configuration.microsite.get_all_orgs',
Mock(return_value=["TestMicrosite1", "TestMicrosite2", "TestMicrosite3", "TestMicrosite4"])
)
def test_excludes_multi_microsites(self):
_, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertIn('org', exclude_dictionary)
exclude_orgs = exclude_dictionary['org']
self.assertEqual(4, len(exclude_orgs))
self.assertIn('TestMicrosite1', exclude_orgs)
self.assertIn('TestMicrosite2', exclude_orgs)
self.assertIn('TestMicrosite3', exclude_orgs)
self.assertIn('TestMicrosite4', exclude_orgs)
@patch(
'microsite_configuration.microsite.get_all_orgs',
Mock(return_value=["TestMicrosite1", "TestMicrosite2", "TestMicrosite3", "TestMicrosite4"])
)
@patch('microsite_configuration.microsite.get_value', Mock(return_value='TestMicrosite3'))
def test_excludes_multi_microsites_within(self):
field_dictionary, _, exclude_dictionary = LmsSearchFilterGenerator.generate_field_filters(user=self.user)
self.assertNotIn('org', exclude_dictionary)
self.assertIn('org', field_dictionary)
self.assertEqual('TestMicrosite3', field_dictionary['org'])
| agpl-3.0 |
fredwilliam/PMO | tests/deployment/testRegisterAndSubmitData.py | 5 | 22763 | import sys
from datetime import datetime
import unittest
import os
import time
import uuid
import re
import subprocess
import sys
from subprocess import PIPE
import httplib
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPRedirectHandler
import urllib2
import urllib
from cookielib import *
from urlparse import urlparse
#serverhost = 'localhost' #for you local developers on apache
#serverhost = 'localhost:8000' #for you local developers using runserver
serverhost = 'test.commcarehq.org' #for the actual server
curl_command = 'curl' #make sure curl is in your path
#example post to a form
# -F file=@schemas\2_types.xsd --request POST http://test.commcarehq.org/xforms/
def getFiles(dirname, extension, prefix=None):
curdir = os.path.dirname(__file__)
targetdir = os.path.join(curdir, dirname)
targetfiles = os.listdir(targetdir)
retfiles = []
for f in targetfiles:
if f == ".svn":
continue
if not f.endswith(extension):
continue
if prefix != None:
if not f.startswith(prefix):
continue
retfiles.append(os.path.join(targetdir,f))
return retfiles
def getFilesFromList(dirname, filename):
fin = open(filename,'r')
ret = []
line = fin.readline()
while line != '':
ret.append(os.path.join(dirname,line))
return ret
#def postSchema(self, submit_user, submit_pw, schema_prefix):
#schemafiles = getFiles('xforms','.xml', prefix=schema_prefix)
class DomainTestCase(unittest.TestCase):
def setUp(self):
self.username = ''
self.password = ''
self.prefix = ''
self.domain_name = ''
self.xform_hash = {}
self.session_cookie=''
self.cookie_header = ''
self._establishSession()
def tearDown(self):
os.remove(self.session_cookie)
def _establishSession(self):
self.session_cookie = os.path.join(os.path.dirname(__file__),str(uuid.uuid1()) + "_cookie.txt")
p = subprocess.Popen([curl_command,'-c',self.session_cookie, '-F username=%s' % self.username, '-F password=%s' % self.password,'--request', 'POST', 'http://%s/accounts/login/' % serverhost],stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
def _loadDataFilesList(self, xform_filepath):
domain_dir = os.path.dirname(xform_filepath)
fname, ext = os.path.splitext(os.path.basename(xform_filepath))
if os.path.exists(os.path.join(domain_dir, fname+'.lst')):
fin = open(os.path.join(domain_dir, fname+'.lst'), 'r')
files = fin.readlines()
fin.close()
return files
else:
return []
def _loadFiles(self, dirname, extension):
curdir = os.path.dirname(__file__)
domain_dir = os.path.join(curdir, self.prefix)
targetfiles = os.listdir(domain_dir)
retfiles = []
for f in targetfiles:
if f == ".svn":
continue
if not f.endswith(extension):
continue
if self.prefix != None:
if not f.startswith(self.prefix):
continue
retfiles.append(os.path.join(domain_dir,f))
return retfiles
def _verifySchema(self, results, schema_name):
if results.count("Submit Error:") != 0:
#self.fail( "Verify Schema, submission errors")
print "Verify Schema, submission errors: \n%s" % results
return -1
if results.count(schema_name) != 1:
print "Verify Schema, schema did not save: \n%s" % results
return -1
#self.assertEqual(1, results.count(schema_name))
#get the schema id just created
if results.count("Registration successful for xform id:") != 1:
print "registration of xform id not successful: \n%s" % results
return -1
else:
idx = results.index("Registration successful for xform id:")
substr = results[idx+37:]
pidx = substr.index('</p>')
schema_idstr= substr[0:pidx]
try:
schema_id=int(schema_idstr)
return schema_id
except:
"Error, schema id could not be extracted: \n%s" % results
return -1
def _getMaxSchemaSubmitId(self, xform_id):
url = 'http://%s/api/xforms/%d/?format=csv' % (serverhost, xform_id)
p = subprocess.Popen([curl_command,'-b', self.session_cookie, url],stdout=PIPE,stderr=PIPE,shell=False)
data = p.stdout.read()
count = data.count('\n') - 1
return count
def _postXform2(self,submit_user, submit_pw, xformfile):
# build opener with HTTPCookieProcessor
o = self._doLogin(submit_user, submit_pw)
# second request should automatically pass back any
# cookies received during login... thanks to the HTTPCookieProcessor
fin = open(xformfile,'r')
schema = fin.read()
fin.close()
shortname = os.path.basename(xformfile)
shortname = shortname.replace('.xml','')
shortname = shortname + "-" + str(uuid.uuid1())
param_dict = {'file': schema, 'form_display_name': shortname}
p2 = urllib.urlencode(param_dict)
up = urlparse('http://%s/xforms/register/' % (serverhost))
try:
conn = httplib.HTTPConnection(up.netloc)
conn.request('POST', up.path, p2, {'Content-Type': 'multipart/form-data', 'User-Agent': 'CCHQ-testRegisterAndSubmit-python-v0.1', 'Cookie':self.cookie_header})
resp = conn.getresponse()
data = resp.read()
except:
return None
return self._verifySchema(data, shortname)
def _postXform(self, submit_user, submit_pw, xformfile):
"""Does an authenticated CURL post of an xform. Upon finishing the CURL, will do a GET off the server to see if the resultant xform is actually received correctly based upon a uuid displayname being picked"""
fin = open(xformfile,'r')
schema = fin.read()
fin.close()
shortname = os.path.basename(xformfile)
shortname = shortname.replace('.xml','')
shortname = shortname + "-" + str(uuid.uuid1())
print "Posting Xform: %s" % shortname
#print ' '.join([curl_command,'-N', '-b',self.session_cookie, '-F file=@%s' % xformfile, '-F form_display_name=%s' % shortname, '--request', 'POST', 'http://%s/xforms/register/' % serverhost])
p = subprocess.Popen([curl_command,'-b', self.session_cookie, '-F file=@%s' % xformfile, '-F form_display_name=%s' % shortname, '--request', 'POST', 'http://%s/xforms/register/' % serverhost],stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
return self._verifySchema(results, shortname)
def _verifySubmission(self, resultstring, num_attachments):
"""Verify that a raw xform submission is submitted and the correct reply comes back in. This also checks to make sure that the attachments are parsed out correctly"""
# this should == xformmanager.SUCCESSFUL_SUBMISSION
rescount = resultstring.count("Thanks!")
attachment_count = '[no attachment]'
#self.assertEqual(1,rescount)
if rescount != 1:
msg = "Data submission failed, not successful: " + str(rescount)
print msg
print "===== xform =====pn%s" % resultstring
self.fail(msg)
else:
attach_pattern = re.compile(r"<NumAttachments>(\d+)</NumAttachments>")
try:
matches = attach_pattern.search(resultstring)
attachment_count = int(matches.groups()[0])
self.assertEqual(attachment_count, num_attachments)
except Exception, ex:
msg = "Data submission error: attachment not found: " + attachment_count + " Exception: " + str(ex)
print "===== xform =====pn%s" % resultstring
self.fail(msg)
def _postSimpleData2(self, datafile,domain_name):
"""Pure python method to submit direct POSTs"""
if datafile == ".svn" or datafile.endswith('.py'):
return
fin = open(os.path.join(os.path.dirname(__file__),datafile),'r')
filestr= fin.read()
fin.close()
up = urlparse('http://%s/receiver/submit/%s' % (serverhost, domain_name))
try:
conn = httplib.HTTPConnection(up.netloc)
conn.request('POST', up.path, filestr, {'Content-Type': 'text/xml', 'User-Agent': 'CCHQ-submitfromfile-python-v0.1'})
resp = conn.getresponse()
results = resp.read()
except (httplib.HTTPException, socket.error):
return None
self._verifySubmission(results,1)
return results
def _postSimpleData(self, datafile, domain_name):
"""Curl method to do data POSTs"""
if datafile == ".svn" or datafile.endswith('.py'):
return
fin = open(datafile,'r')
filestr= fin.read()
fin.close()
print "Simple Submission: " + datafile
command_arr = [curl_command, '--header','Content-type:text/xml', '--header', 'Content-length:%s' % len(filestr), '--data-binary', '@%s' % datafile, '--request', 'POST', 'http://%s/receiver/submit/%s' % (serverhost, self.domain_name)] #
print ' '.join(command_arr)
p = subprocess.Popen(command_arr,stdout=PIPE,stderr=PIPE,shell=False,close_fds=True)
results, stderr = p.communicate()
if stderr.count("transfer closed") > 0:
self.fail("Curl Error with connection premature closure")
elif stderr.count("couldn't connect to host") > 0:
self.fail("Curl error, connection timeout, host not reachable")
self._verifySubmission(results,1)
def _verifySchemaSubmits(self, form_id, formname):
print '\n\n****************\nverifySchemaSubmits for ' + formname
datafiles = self._loadDataFilesList(formname)
if len(datafiles) == 0:
print "No instance data for " + formname
last_id = self._getMaxSchemaSubmitId(form_id)
new_id = -1
for file in datafiles:
data_file = os.path.join(self.prefix,'data',file.strip())
submitresults = self._postSimpleData2(data_file, self.domain_name)
new_id = self._getMaxSchemaSubmitId(form_id)
self.assertNotEqual(new_id, last_id)
print "Submitted file: " + data_file + " Row ID: " + str(new_id)
last_id = new_id
def _doTest0PostXFormsAndVerify(self):
"""DomainTestCase doTest0PostXFormsAndVerify"""
if self.username == '':
print 'self.username is null'
return
xforms = self._loadFiles(self.prefix,'.xml')
for xf in xforms:
print xf
form_id = self._postXform(self.username,self.password,xf)
print "xform registration done for: " + xf + " id: " + str(form_id)
if form_id > 0:
self.xform_hash[xf] = form_id
self._verifySchemaSubmits(form_id, xf)
else:
msg = "xform registration failed for: " + xf
self.fail(msg)
class TestDeployPathFinder(DomainTestCase):
def setUp(self):
self.username = 'pfadmin'
self.password = 'commcare123'
self.prefix = 'pf'
self.domain_name = 'pathfinder'
self.xform_hash = {}
self.cookie_header = ''
self.session_cookie = ''
self._establishSession()
def tearDown(self):
try:
os.remove(self.session_cookie)
except:
pass
def test0PostXformsAndVerify(self):
self._doTest0PostXFormsAndVerify()
class TestDeployPathBracCHW(DomainTestCase):
def setUp(self):
self.username = 'brian'
self.password = 'test'
self.prefix = 'brac-chw'
self.domain_name = 'BRAC'
self.xform_hash = {}
self.cookie_header = ''
self.session_cookie = ''
self._establishSession()
def tearDown(self):
try:
os.remove(self.session_cookie)
except:
pass
def test0PostXformsAndVerify(self):
self._doTest0PostXFormsAndVerify()
class TestDeployPathBracCHP(DomainTestCase):
def setUp(self):
self.username = 'brian'
self.password = 'test'
self.prefix = 'brac-chp'
self.domain_name = 'BRAC'
self.xform_hash = {}
self.cookie_header = ''
self.session_cookie = ''
self._establishSession()
def tearDown(self):
try:
os.remove(self.session_cookie)
except:
pass
def test0PostXformsAndVerify(self):
self._doTest0PostXFormsAndVerify()
class TestDeployPathGrameen(DomainTestCase):
def setUp(self):
self.username = 'gradmin'
self.password = 'commcare123'
self.prefix = 'mvp'
self.domain_name = 'grameen'
self.xform_hash = {}
self.cookie_header = ''
self.session_cookie = ''
self._establishSession()
def tearDown(self):
try:
os.remove(self.session_cookie)
except:
pass
def test0PostXformsAndVerify(self):
self._doTest0PostXFormsAndVerify()
class TestSimpleSubmits(unittest.TestCase):
def setup(self):
pass
def _scanBlockForInt(self, results, startword,endtag):
try:
id_start = results.index(startword)
submit_len = len(startword)
sub_block = results[id_start:]
id_endtag = sub_block.index(endtag)
submission_id = sub_block[submit_len:id_endtag]
id = int(submission_id)
return id
except:
return -1
def _verifySubmission(self, resultstring, num_attachments):
"""Verify that a raw xform submission is submitted and the correct reply comes back in. This also checks to make sure that the attachments are parsed out correctly"""
rescount = resultstring.count("Submission received, thank you")
attachment_count = '[no attachment]'
#self.assertEqual(1,rescount)
if rescount != 1:
msg = "Data submission failed, not successful: " + str(rescount)
print msg
self.fail(msg)
#print resultstring
else:
idx = resultstring.index("<p>Attachments:")
attachment_count = resultstring[idx+15:].replace('</p>','')
try:
anum = int(attachment_count)
self.assertEqual(anum, num_attachments)
except:
print "Data submission error: attachment not found: " + attachment_count
self.assertFalse(True)
def testPostAndVerifyMultipart(self):
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir,'multipart')
datafiles = os.listdir(datadir)
for file in datafiles:
if file == ".svn":
continue
fullpath = os.path.join(datadir,file)
fin = open(fullpath,'rb')
filestr= fin.read()
fin.close()
# -F file=@schemas\2_types.xsd --request POST http://test.commcarehq.org/xforms/
p = subprocess.Popen([curl_command,'--header','Content-type:multipart/mixed; boundary=newdivider', '--header', '"Content-length:%s' % len(filestr), '--data-binary', '@%s' % fullpath, '--request', 'POST', 'http://%s/receiver/submit/Pathfinder/' % serverhost],stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
#self._verifySubmission(results,3)
p = subprocess.Popen([curl_command,'--header','Content-type:multipart/mixed; boundary=newdivider', '--header', '"Content-length:%s' % len(filestr), '--data-binary', '@%s' % fullpath, '--request', 'POST', 'http://%s/receiver/submit/BRAC/' % serverhost],stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
#self._verifySubmission(results,3)
class TestODKSubmit(unittest.TestCase):
def setup(self):
pass
def testPostLegacyXForm(self):
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir,'odk')
fin = open(os.path.join(datadir,'xform.xml'),'r')
filestr = fin.read()
fin.close()
p = subprocess.Popen([curl_command,
'--header',
'Content-type:text/xml',
'--header',
'"Content-length:%s' % len(filestr),
'--data-binary',
'@%s' % os.path.join(datadir,'xform.xml'),
'--request',
'POST',
'http://%s/receiver/submit/BRAC/' % serverhost],
stdout=PIPE,stderr=PIPE,shell=False)
print "executing testPostLegacyXForm"
results = p.stdout.read()
print results
def testPostSingleXForm(self):
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir,'odk')
xformpath = os.path.join(datadir,'xform.xml')
command_arr = [
curl_command,
'-F xml_submission_file=@%s' % xformpath,
'--request', 'POST',
'http://%s/receiver/submit/BRAC/' % serverhost
]
p = subprocess.Popen(command_arr,
stdout=PIPE,stderr=PIPE,shell=False)
print "Executing testPostSingleXForm"
print ' '.join(command_arr)
p = subprocess.Popen(command_arr, stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
errors = p.stderr.read()
print results
def testPostMultiPart(self):
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir,'odk')
xformpath = os.path.join(datadir,'xform.xml')
imagepath = os.path.join(datadir,'xform.xml')
command_arr = [
curl_command,
'-F xml_submission_file=@%s' % xformpath,
'-F file1.jpg=@%s' % imagepath,
'--request', 'POST',
'http://%s/receiver/submit/BRAC/' % serverhost
]
print "Executing testPostMultipart"
print ' '.join(command_arr)
p = subprocess.Popen(command_arr, stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
errors = p.stderr.read()
print results
class TestBackupRestore(unittest.TestCase):
def setup(self):
pass
def _postSimpleData(self, datafiles, domain_name):
for file in datafiles:
#time.sleep(.1)
if file == ".svn":
continue
fin = open(file,'r')
filestr= fin.read()
fin.close()
print "Backup/Restore Test: " + file
p = subprocess.Popen([curl_command,'--header','Content-type: text/xml', '--header', 'Content-length: %s' % len(filestr), '--data-binary', '@%s' % file, '--request', 'POST', 'http://%s/receiver/backup/%s' % (serverhost,domain_name)],stdout=PIPE,stderr=PIPE,shell=False)
results = p.stdout.read()
#print "BackupRestore: " + results
conn = httplib.HTTPConnection(serverhost)
res = conn.request("GET", "/receiver/restore/%s" % (results))
#print res
r2 = conn.getresponse()
#self.assertEquals(r2.status,200)
restored = r2.read()
if restored != filestr:
msg = "BackupRestore error failed for id: " + results
print msg
self.fail(msg)
self.assertEquals(restored,filestr)
def testPostFilesAsBackups(self):
return
# CZUE: was going to comment this out but i see it's already been hijacked
# TODO: add/fix test with new backup and restore workflow.
files = getFiles('brac-chw', '.xml')
self._postSimpleData(files, 'BRAC')
if __name__ == "__main__":
real_args = [sys.argv[0]]
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
argsplit = arg.split('=')
if len(argsplit) == 2:
if argsplit[0] == 'serverhost':
serverhost = argsplit[-1]
elif argsplit[0] == 'curlcommand':
curl_command = argsplit[-1]
else:
raise "Error, these arguments are wrong, it should only be\nt\tserverhost=<hostname>\n\tcurlcommand=<curl command>\n\t\tand they BOTH must be there!"
else:
#it's not an argument we want to parse, so put it into the actual args
real_args.append(arg)
print curl_command
unittest.main(argv=real_args)
| bsd-3-clause |
qpxu007/luigi | luigi/util.py | 37 | 8144 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import functools
import logging
from luigi import six
from luigi import task
from luigi import parameter
from luigi.deprecate_kwarg import deprecate_kwarg # removing this breaks code
if six.PY3:
xrange = range
logger = logging.getLogger('luigi-interface')
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_param_names = dict(task_cls.get_params()).keys()
common_param_names = list(set.intersection(set(task_instance_param_names), set(task_cls_param_names)))
common_param_vals = [(key, dict(task_cls.get_params())[key]) for key in common_param_names]
common_kwargs = dict([(key, task_instance.param_kwargs[key]) for key in common_param_names])
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
def task_wraps(P):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
return functools.wraps(P, updated=[])
class inherits(object):
"""
Task inheritance.
Usage:
.. code-block:: python
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
@inherits(AnotherTask):
class MyTask(luigi.Task):
def requires(self):
return self.clone_parent()
def run(self):
print self.n # this will be defined
# ...
"""
def __init__(self, task_to_inherit):
super(inherits, self).__init__()
self.task_to_inherit = task_to_inherit
def __call__(self, task_that_inherits):
for param_name, param_obj in self.task_to_inherit.get_params():
if not hasattr(task_that_inherits, param_name):
setattr(task_that_inherits, param_name, param_obj)
# Modify task_that_inherits by subclassing it and adding methods
@task_wraps(task_that_inherits)
class Wrapped(task_that_inherits):
def clone_parent(_self, **args):
return _self.clone(cls=self.task_to_inherit, **args)
return Wrapped
class requires(object):
"""
Same as @inherits, but also auto-defines the requires method.
"""
def __init__(self, task_to_require):
super(requires, self).__init__()
self.inherit_decorator = inherits(task_to_require)
def __call__(self, task_that_requires):
task_that_requires = self.inherit_decorator(task_that_requires)
# Modify task_that_requres by subclassing it and adding methods
@task_wraps(task_that_requires)
class Wrapped(task_that_requires):
def requires(_self):
return _self.clone_parent()
return Wrapped
class copies(object):
"""
Auto-copies a task.
Usage:
.. code-block:: python
@copies(MyTask):
class CopyOfMyTask(luigi.Task):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
"""
def __init__(self, task_to_copy):
super(copies, self).__init__()
self.requires_decorator = requires(task_to_copy)
def __call__(self, task_that_copies):
task_that_copies = self.requires_decorator(task_that_copies)
# Modify task_that_copies by subclassing it and adding methods
@task_wraps(task_that_copies)
class Wrapped(task_that_copies):
def run(_self):
i, o = _self.input(), _self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
return Wrapped
def delegates(task_that_delegates):
""" Lets a task call methods on subtask(s).
The way this works is that the subtask is run as a part of the task, but
the task itself doesn't have to care about the requirements of the subtasks.
The subtask doesn't exist from the scheduler's point of view, and
its dependencies are instead required by the main task.
Example:
.. code-block:: python
class PowersOfN(luigi.Task):
n = luigi.IntParameter()
def f(self, x): return x ** self.n
@delegates
class T(luigi.Task):
def subtasks(self): return PowersOfN(5)
def run(self): print self.subtasks().f(42)
"""
if not hasattr(task_that_delegates, 'subtasks'):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates)
@task_wraps(task_that_delegates)
class Wrapped(task_that_delegates):
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run(self):
for t in task.flatten(self.subtasks()):
t.run()
task_that_delegates.run(self)
return Wrapped
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
def get_previous_completed(task, max_steps=10):
prev = task
for _ in xrange(max_steps):
prev = previous(prev)
logger.debug("Checking if %s is complete", prev.task_id)
if prev.complete():
return prev
return None
| apache-2.0 |
bertucho/epic-movie-quotes-quiz | dialogos/build/cryptography/src/cryptography/hazmat/bindings/openssl/x509name.py | 5 | 2167 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/x509.h>
/*
* See the comment above Cryptography_STACK_OF_X509 in x509.py
*/
typedef STACK_OF(X509_NAME) Cryptography_STACK_OF_X509_NAME;
typedef STACK_OF(X509_NAME_ENTRY) Cryptography_STACK_OF_X509_NAME_ENTRY;
"""
TYPES = """
typedef ... X509_NAME;
typedef ... X509_NAME_ENTRY;
typedef ... Cryptography_STACK_OF_X509_NAME;
typedef ... Cryptography_STACK_OF_X509_NAME_ENTRY;
"""
FUNCTIONS = """
X509_NAME *X509_NAME_new(void);
void X509_NAME_free(X509_NAME *);
int X509_NAME_entry_count(X509_NAME *);
X509_NAME_ENTRY *X509_NAME_get_entry(X509_NAME *, int);
ASN1_OBJECT *X509_NAME_ENTRY_get_object(X509_NAME_ENTRY *);
ASN1_STRING *X509_NAME_ENTRY_get_data(X509_NAME_ENTRY *);
unsigned long X509_NAME_hash(X509_NAME *);
int i2d_X509_NAME(X509_NAME *, unsigned char **);
int X509_NAME_add_entry_by_txt(X509_NAME *, const char *, int,
const unsigned char *, int, int, int);
int X509_NAME_add_entry_by_NID(X509_NAME *, int, int, unsigned char *,
int, int, int);
X509_NAME_ENTRY *X509_NAME_delete_entry(X509_NAME *, int);
void X509_NAME_ENTRY_free(X509_NAME_ENTRY *);
int X509_NAME_get_index_by_NID(X509_NAME *, int, int);
int X509_NAME_cmp(const X509_NAME *, const X509_NAME *);
char *X509_NAME_oneline(X509_NAME *, char *, int);
X509_NAME *X509_NAME_dup(X509_NAME *);
"""
MACROS = """
Cryptography_STACK_OF_X509_NAME *sk_X509_NAME_new_null(void);
int sk_X509_NAME_num(Cryptography_STACK_OF_X509_NAME *);
int sk_X509_NAME_push(Cryptography_STACK_OF_X509_NAME *, X509_NAME *);
X509_NAME *sk_X509_NAME_value(Cryptography_STACK_OF_X509_NAME *, int);
void sk_X509_NAME_free(Cryptography_STACK_OF_X509_NAME *);
int sk_X509_NAME_ENTRY_num(Cryptography_STACK_OF_X509_NAME_ENTRY *);
X509_NAME_ENTRY *sk_X509_NAME_ENTRY_value(
Cryptography_STACK_OF_X509_NAME_ENTRY *, int);
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| mit |
stickystyle/pocketmine | yaml/resolver.py | 474 | 8972 |
__all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*'))
| mit |
neilLasrado/erpnext | erpnext/healthcare/doctype/healthcare_service_unit_type/healthcare_service_unit_type.py | 7 | 3807 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
class HealthcareServiceUnitType(Document):
def validate(self):
if self.is_billable == 1:
if not self.uom or not self.item_group or not self.description or not self.no_of_hours > 0:
frappe.throw(_("Configure Item Fields like UOM, Item Group, Description and No of Hours."))
def after_insert(self):
if self.inpatient_occupancy and self.is_billable:
create_item(self)
def on_trash(self):
if(self.item):
try:
frappe.delete_doc("Item",self.item)
except Exception:
frappe.throw(_("""Not permitted. Please disable the Service Unit Type"""))
def on_update(self):
if(self.change_in_item and self.is_billable == 1 and self.item):
updating_item(self)
item_price = item_price_exist(self)
if not item_price:
if(self.rate != 0.0):
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
if(self.rate):
make_item_price(self.item_code, price_list_name, self.rate)
else:
make_item_price(self.item_code, price_list_name, 0.0)
else:
frappe.db.set_value("Item Price", item_price, "price_list_rate", self.rate)
frappe.db.set_value(self.doctype,self.name,"change_in_item",0)
elif(self.is_billable == 0 and self.item):
frappe.db.set_value("Item",self.item,"disabled",1)
self.reload()
def item_price_exist(doc):
item_price = frappe.db.exists({
"doctype": "Item Price",
"item_code": doc.item_code})
if(item_price):
return item_price[0][0]
else:
return False
def updating_item(doc):
frappe.db.sql("""update `tabItem` set item_name=%s, item_group=%s, disabled=0, standard_rate=%s,
description=%s, modified=NOW() where item_code=%s""",
(doc.service_unit_type, doc.item_group , doc.rate, doc.description, doc.item))
def create_item(doc):
#insert item
item = frappe.get_doc({
"doctype": "Item",
"item_code": doc.item_code,
"item_name":doc.service_unit_type,
"item_group": doc.item_group,
"description":doc.description,
"is_sales_item": 1,
"is_service_item": 1,
"is_purchase_item": 0,
"is_stock_item": 0,
"show_in_website": 0,
"is_pro_applicable": 0,
"disabled": 0,
"stock_uom": doc.uom
}).insert(ignore_permissions=True)
#insert item price
#get item price list to insert item price
if(doc.rate != 0.0):
price_list_name = frappe.db.get_value("Price List", {"selling": 1})
if(doc.rate):
make_item_price(item.name, price_list_name, doc.rate)
item.standard_rate = doc.rate
else:
make_item_price(item.name, price_list_name, 0.0)
item.standard_rate = 0.0
item.save(ignore_permissions = True)
#Set item to the Doc
frappe.db.set_value("Healthcare Service Unit Type", doc.name, "item", item.name)
doc.reload() #refresh the doc after insert.
def make_item_price(item, price_list_name, item_price):
frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list_name,
"item_code": item,
"price_list_rate": item_price
}).insert(ignore_permissions=True)
@frappe.whitelist()
def change_item_code(item, item_code, doc_name):
item_exist = frappe.db.exists({
"doctype": "Item",
"item_code": item_code})
if(item_exist):
frappe.throw(_("Code {0} already exist").format(item_code))
else:
frappe.rename_doc("Item", item, item_code, ignore_permissions = True)
frappe.db.set_value("Healthcare Service Unit Type", doc_name, "item_code", item_code)
@frappe.whitelist()
def disable_enable(status, doc_name, item=None, is_billable=None):
frappe.db.set_value("Healthcare Service Unit Type", doc_name, "disabled", status)
if(is_billable == 1):
frappe.db.set_value("Item", item, "disabled", status)
| gpl-3.0 |
shanemcd/ansible | contrib/inventory/jail.py | 196 | 1305 | #!/usr/bin/env python
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen, PIPE
import sys
import json
result = {}
result['all'] = {}
pipe = Popen(['jls', '-q', 'name'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
result['all']['vars']['ansible_connection'] = 'jail'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
print(json.dumps({'ansible_connection': 'jail'}))
else:
sys.stderr.write("Need an argument, either --list or --host <host>\n")
| gpl-3.0 |
beni55/edx-platform | lms/djangoapps/certificates/views.py | 6 | 22083 | """URL handlers related to certificate handling by LMS"""
from datetime import datetime
import dogstats_wrapper as dog_stats_api
import json
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from capa.xqueue_interface import XQUEUE_METRIC_NAME
from certificates.models import (
certificate_status_for_student,
CertificateStatuses,
GeneratedCertificate,
ExampleCertificate,
CertificateHtmlViewConfiguration
)
from certificates.queue import XQueueCertInterface
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from util.json_request import JsonResponse, JsonResponseBadRequest
from util.bad_request_rate_limiter import BadRequestRateLimiter
logger = logging.getLogger(__name__)
@csrf_exempt
def request_certificate(request):
"""Request the on-demand creation of a certificate for some user, course.
A request doesn't imply a guarantee that such a creation will take place.
We intentionally use the same machinery as is used for doing certification
at the end of a course run, so that we can be sure users get graded and
then if and only if they pass, do they get a certificate issued.
"""
if request.method == "POST":
if request.user.is_authenticated():
xqci = XQueueCertInterface()
username = request.user.username
student = User.objects.get(username=username)
course_key = SlashSeparatedCourseKey.from_deprecated_string(request.POST.get('course_id'))
course = modulestore().get_course(course_key, depth=2)
status = certificate_status_for_student(student, course_key)['status']
if status in [CertificateStatuses.unavailable, CertificateStatuses.notpassing, CertificateStatuses.error]:
log_msg = u'Grading and certification requested for user %s in course %s via /request_certificate call'
logger.info(log_msg, username, course_key)
status = xqci.add_cert(student, course_key, course=course)
return HttpResponse(json.dumps({'add_status': status}), mimetype='application/json')
return HttpResponse(json.dumps({'add_status': 'ERRORANONYMOUSUSER'}), mimetype='application/json')
@csrf_exempt
def update_certificate(request):
"""
Will update GeneratedCertificate for a new certificate or
modify an existing certificate entry.
See models.py for a state diagram of certificate states
This view should only ever be accessed by the xqueue server
"""
status = CertificateStatuses
if request.method == "POST":
xqueue_body = json.loads(request.POST.get('xqueue_body'))
xqueue_header = json.loads(request.POST.get('xqueue_header'))
try:
course_key = SlashSeparatedCourseKey.from_deprecated_string(xqueue_body['course_id'])
cert = GeneratedCertificate.objects.get(
user__username=xqueue_body['username'],
course_id=course_key,
key=xqueue_header['lms_key'])
except GeneratedCertificate.DoesNotExist:
logger.critical('Unable to lookup certificate\n'
'xqueue_body: {0}\n'
'xqueue_header: {1}'.format(
xqueue_body, xqueue_header))
return HttpResponse(json.dumps({
'return_code': 1,
'content': 'unable to lookup key'}),
mimetype='application/json')
if 'error' in xqueue_body:
cert.status = status.error
if 'error_reason' in xqueue_body:
# Hopefully we will record a meaningful error
# here if something bad happened during the
# certificate generation process
#
# example:
# (aamorm BerkeleyX/CS169.1x/2012_Fall)
# <class 'simples3.bucket.S3Error'>:
# HTTP error (reason=error(32, 'Broken pipe'), filename=None) :
# certificate_agent.py:175
cert.error_reason = xqueue_body['error_reason']
else:
if cert.status in [status.generating, status.regenerating]:
cert.download_uuid = xqueue_body['download_uuid']
cert.verify_uuid = xqueue_body['verify_uuid']
cert.download_url = xqueue_body['url']
cert.status = status.downloadable
elif cert.status in [status.deleting]:
cert.status = status.deleted
else:
logger.critical('Invalid state for cert update: {0}'.format(
cert.status))
return HttpResponse(
json.dumps({
'return_code': 1,
'content': 'invalid cert status'
}),
mimetype='application/json'
)
dog_stats_api.increment(XQUEUE_METRIC_NAME, tags=[
u'action:update_certificate',
u'course_id:{}'.format(cert.course_id)
])
cert.save()
return HttpResponse(json.dumps({'return_code': 0}),
mimetype='application/json')
@csrf_exempt
@require_POST
def update_example_certificate(request):
"""Callback from the XQueue that updates example certificates.
Example certificates are used to verify that certificate
generation is configured correctly for a course.
Unlike other certificates, example certificates
are not associated with a particular user or displayed
to students.
For this reason, we need a different end-point to update
the status of generated example certificates.
Arguments:
request (HttpRequest)
Returns:
HttpResponse (200): Status was updated successfully.
HttpResponse (400): Invalid parameters.
HttpResponse (403): Rate limit exceeded for bad requests.
HttpResponse (404): Invalid certificate identifier or access key.
"""
logger.info(u"Received response for example certificate from XQueue.")
rate_limiter = BadRequestRateLimiter()
# Check the parameters and rate limits
# If these are invalid, return an error response.
if rate_limiter.is_rate_limit_exceeded(request):
logger.info(u"Bad request rate limit exceeded for update example certificate end-point.")
return HttpResponseForbidden("Rate limit exceeded")
if 'xqueue_body' not in request.POST:
logger.info(u"Missing parameter 'xqueue_body' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_body' is required.")
if 'xqueue_header' not in request.POST:
logger.info(u"Missing parameter 'xqueue_header' for update example certificate end-point")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameter 'xqueue_header' is required.")
try:
xqueue_body = json.loads(request.POST['xqueue_body'])
xqueue_header = json.loads(request.POST['xqueue_header'])
except (ValueError, TypeError):
logger.info(u"Could not decode params to example certificate end-point as JSON.")
rate_limiter.tick_bad_request_counter(request)
return JsonResponseBadRequest("Parameters must be JSON-serialized.")
# Attempt to retrieve the example certificate record
# so we can update the status.
try:
uuid = xqueue_body.get('username')
access_key = xqueue_header.get('lms_key')
cert = ExampleCertificate.objects.get(uuid=uuid, access_key=access_key)
except ExampleCertificate.DoesNotExist:
# If we are unable to retrieve the record, it means the uuid or access key
# were not valid. This most likely means that the request is NOT coming
# from the XQueue. Return a 404 and increase the bad request counter
# to protect against a DDOS attack.
logger.info(u"Could not find example certificate with uuid '%s' and access key '%s'", uuid, access_key)
rate_limiter.tick_bad_request_counter(request)
raise Http404
if 'error' in xqueue_body:
# If an error occurs, save the error message so we can fix the issue.
error_reason = xqueue_body.get('error_reason')
cert.update_status(ExampleCertificate.STATUS_ERROR, error_reason=error_reason)
logger.warning(
(
u"Error occurred during example certificate generation for uuid '%s'. "
u"The error response was '%s'."
), uuid, error_reason
)
else:
# If the certificate generated successfully, save the download URL
# so we can display the example certificate.
download_url = xqueue_body.get('url')
if download_url is None:
rate_limiter.tick_bad_request_counter(request)
logger.warning(u"No download URL provided for example certificate with uuid '%s'.", uuid)
return JsonResponseBadRequest(
"Parameter 'download_url' is required for successfully generated certificates."
)
else:
cert.update_status(ExampleCertificate.STATUS_SUCCESS, download_url=download_url)
logger.info("Successfully updated example certificate with uuid '%s'.", uuid)
# Let the XQueue know that we handled the response
return JsonResponse({'return_code': 0})
# pylint: disable=too-many-statements, bad-continuation
@login_required
def render_html_view(request):
"""
This view generates an HTML representation of the specified student's certificate
If a certificate is not available, we display a "Sorry!" screen instead
"""
# Initialize the template context and bootstrap with default values from configuration
context = {}
configuration = CertificateHtmlViewConfiguration.get_config()
context = configuration.get('default', {})
invalid_template_path = 'certificates/invalid.html'
# Translators: This text is bound to the HTML 'title' element of the page and appears
# in the browser title bar when a requested certificate is not found or recognized
context['document_title'] = _("Invalid Certificate")
# Feature Flag check
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return render_to_response(invalid_template_path, context)
course_id = request.GET.get('course', None)
context['course'] = course_id
if not course_id:
return render_to_response(invalid_template_path, context)
# Course Lookup
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
return render_to_response(invalid_template_path, context)
course = modulestore().get_course(course_key)
if not course:
return render_to_response(invalid_template_path, context)
# Certificate Lookup
try:
certificate = GeneratedCertificate.objects.get(
user=request.user,
course_id=course_key
)
except GeneratedCertificate.DoesNotExist:
return render_to_response(invalid_template_path, context)
# Override the defaults with any mode-specific static values
context.update(configuration.get(certificate.mode, {}))
# Override further with any course-specific static values
context.update(course.cert_html_view_overrides)
# Populate dynamic output values using the course/certificate data loaded above
user_fullname = request.user.profile.name
platform_name = context.get('platform_name')
context['accomplishment_copy_name'] = user_fullname
context['accomplishment_copy_course_org'] = course.org
context['accomplishment_copy_course_name'] = course.display_name
context['certificate_id_number'] = certificate.verify_uuid
context['certificate_verify_url'] = "{prefix}{uuid}{suffix}".format(
prefix=context.get('certificate_verify_url_prefix'),
uuid=certificate.verify_uuid,
suffix=context.get('certificate_verify_url_suffix')
)
context['logo_alt'] = platform_name
accd_course_org_html = '<span class="detail--xuniversity">{partner_name}</span>'.format(partner_name=course.org)
accd_platform_name_html = '<span class="detail--company">{platform_name}</span>'.format(platform_name=platform_name)
# Translators: This line appears on the certificate after the name of a course, and provides more
# information about the organizations providing the course material to platform users
context['accomplishment_copy_course_description'] = _('a course of study offered by {partner_name}, '
'through {platform_name}.').format(
partner_name=accd_course_org_html,
platform_name=accd_platform_name_html
)
context['accomplishment_more_title'] = _("More Information About {user_name}'s Certificate:").format(
user_name=user_fullname
)
# Translators: This line appears on the page just before the generation date for the certificate
context['certificate_date_issued_title'] = _("Issued On:")
# Translators: The format of the date includes the full name of the month
context['certificate_date_issued'] = _('{month} {day}, {year}').format(
month=certificate.modified_date.strftime("%B"),
day=certificate.modified_date.day,
year=certificate.modified_date.year
)
# Translators: The Certificate ID Number is an alphanumeric value unique to each individual certificate
context['certificate_id_number_title'] = _('Certificate ID Number')
context['certificate_info_title'] = _('About {platform_name} Certificates').format(
platform_name=platform_name
)
# Translators: This text describes the purpose (and therefore, value) of a course certificate
# 'verifying your identity' refers to the process for establishing the authenticity of the student
context['certificate_info_description'] = _("{platform_name} acknowledges achievements through certificates, which "
"are awarded for various activities {platform_name} students complete "
"under the <a href='{tos_url}'>{platform_name} Honor Code</a>. Some "
"certificates require completing additional steps, such as "
"<a href='{verified_cert_url}'> verifying your identity</a>.").format(
platform_name=platform_name,
tos_url=context.get('company_tos_url'),
verified_cert_url=context.get('company_verified_certificate_url')
)
# Translators: Certificate Types correspond to the different enrollment options available for a given course
context['certificate_type_title'] = _('{certificate_type} Certfificate').format(
certificate_type=context.get('certificate_type')
)
context['certificate_verify_title'] = _("How {platform_name} Validates Student Certificates").format(
platform_name=platform_name
)
# Translators: This text describes the validation mechanism for a certificate file (known as GPG security)
context['certificate_verify_description'] = _('Certificates issued by {platform_name} are signed by a gpg key so '
'that they can be validated independently by anyone with the '
'{platform_name} public key. For independent verification, '
'{platform_name} uses what is called a '
'"detached signature""".').format(platform_name=platform_name)
context['certificate_verify_urltext'] = _("Validate this certificate for yourself")
# Translators: This text describes (at a high level) the mission and charter the edX platform and organization
context['company_about_description'] = _("{platform_name} offers interactive online classes and MOOCs from the "
"world's best universities, including MIT, Harvard, Berkeley, University "
"of Texas, and many others. {platform_name} is a non-profit online "
"initiative created by founding partners Harvard and MIT.").format(
platform_name=platform_name
)
context['company_about_title'] = _("About {platform_name}").format(platform_name=platform_name)
context['company_about_urltext'] = _("Learn more about {platform_name}").format(platform_name=platform_name)
context['company_courselist_urltext'] = _("Learn with {platform_name}").format(platform_name=platform_name)
context['company_careers_urltext'] = _("Work at {platform_name}").format(platform_name=platform_name)
context['company_contact_urltext'] = _("Contact {platform_name}").format(platform_name=platform_name)
context['company_privacy_urltext'] = _("Privacy Policy")
context['company_tos_urltext'] = _("Terms of Service & Honor Code")
# Translators: This text appears near the top of the certficate and describes the guarantee provided by edX
context['document_banner'] = _("{platform_name} acknowledges the following student accomplishment").format(
platform_name=platform_name
)
context['logo_subtitle'] = _("Certificate Validation")
if certificate.mode == 'honor':
# Translators: This text describes the 'Honor' course certificate type.
context['certificate_type_description'] = _("An {cert_type} Certificate signifies that an {platform_name} "
"learner has agreed to abide by {platform_name}'s honor code and "
"completed all of the required tasks for this course under its "
"guidelines.").format(
cert_type=context.get('certificate_type'),
platform_name=platform_name
)
elif certificate.mode == 'verified':
# Translators: This text describes the 'ID Verified' course certificate type, which is a higher level of
# verification offered by edX. This type of verification is useful for professional education/certifications
context['certificate_type_description'] = _("An {cert_type} Certificate signifies that an {platform_name} "
"learner has agreed to abide by {platform_name}'s honor code and "
"completed all of the required tasks for this course under its "
"guidelines, as well as having their photo ID checked to verify "
"their identity.").format(
cert_type=context.get('certificate_type'),
platform_name=platform_name
)
elif certificate.mode == 'xseries':
# Translators: This text describes the 'XSeries' course certificate type. An XSeries is a collection of
# courses related to each other in a meaningful way, such as a specific topic or theme, or even an organization
context['certificate_type_description'] = _("An {cert_type} Certificate demonstrates a high level of "
"achievement in a program of study, and includes verification of "
"the student's identity.").format(
cert_type=context.get('certificate_type')
)
# Translators: This is the copyright line which appears at the bottom of the certificate page/screen
context['copyright_text'] = _('© {year} {platform_name}. All rights reserved.').format(
year=datetime.now().year,
platform_name=platform_name
)
# Translators: This text represents the verification of the certificate
context['document_meta_description'] = _('This is a valid {platform_name} certificate for {user_name}, '
'who participated in {partner_name} {course_number}').format(
platform_name=platform_name,
user_name=user_fullname,
partner_name=course.org,
course_number=course.number
)
# Translators: This text is bound to the HTML 'title' element of the page and appears in the browser title bar
context['document_title'] = _("Valid {partner_name} {course_number} Certificate | {platform_name}").format(
partner_name=course.org,
course_number=course.number,
platform_name=platform_name
)
# Translators: This text fragment appears after the student's name (displayed in a large font) on the certificate
# screen. The text describes the accomplishment represented by the certificate information displayed to the user
context['accomplishment_copy_description_full'] = _("successfully completed, received a passing grade, and was "
"awarded a {platform_name} {certificate_type} "
"Certificate of Completion in ").format(
platform_name=platform_name,
certificate_type=context.get("certificate_type")
)
return render_to_response("certificates/valid.html", context)
| agpl-3.0 |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/encodings/iso8859_13.py | 1 | 1910 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: iso8859_13.py
""" Python Character Mapping Codec iso8859_13 generated from 'MAPPINGS/ISO8859/8859-13.TXT' with gencodec.py.
"""
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='iso8859-13', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0”¢£¤„¦§Ø©Ŗ«¬\xad®Æ°±²³“µ¶·ø¹ŗ»¼½¾æĄĮĀĆÄÅĘĒČÉŹĖĢĶĪĻŠŃŅÓŌÕÖ×ŲŁŚŪÜŻŽßąįāćäåęēčéźėģķīļšńņóōõö÷ųłśūüżž’'
encoding_table = codecs.charmap_build(decoding_table) | unlicense |
pdellaert/ansible | lib/ansible/modules/storage/ibm/ibm_sa_host.py | 61 | 3264 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 IBM CORPORATION
# Author(s): Tzur Eliyahu <tzure@il.ibm.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: ibm_sa_host
short_description: Adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems.
version_added: "2.7"
description:
- "This module adds hosts to or removes them from IBM Spectrum Accelerate Family storage systems."
options:
host:
description:
- Host name.
required: true
state:
description:
- Host state.
required: true
default: "present"
choices: [ "present", "absent" ]
cluster:
description:
- The name of the cluster to include the host.
required: false
domain:
description:
- The domains the cluster will be attached to.
To include more than one domain,
separate domain names with commas.
To include all existing domains, use an asterisk ("*").
required: false
iscsi_chap_name:
description:
- The host's CHAP name identifier
required: false
iscsi_chap_secret:
description:
- The password of the initiator used to
authenticate to the system when CHAP is enable
required: false
extends_documentation_fragment:
- ibm_storage
author:
- Tzur Eliyahu (@tzure)
'''
EXAMPLES = '''
- name: Define new host.
ibm_sa_host:
host: host_name
state: present
username: admin
password: secret
endpoints: hostdev-system
- name: Delete host.
ibm_sa_host:
host: host_name
state: absent
username: admin
password: secret
endpoints: hostdev-system
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ibm_sa_utils import execute_pyxcli_command, \
connect_ssl, spectrum_accelerate_spec, is_pyxcli_installed
def main():
argument_spec = spectrum_accelerate_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
host=dict(required=True),
cluster=dict(),
domain=dict(),
iscsi_chap_name=dict(),
iscsi_chap_secret=dict()
)
)
module = AnsibleModule(argument_spec)
is_pyxcli_installed(module)
xcli_client = connect_ssl(module)
host = xcli_client.cmd.host_list(
host=module.params['host']).as_single_element
state = module.params['state']
state_changed = False
if state == 'present' and not host:
state_changed = execute_pyxcli_command(
module, 'host_define', xcli_client)
elif state == 'absent' and host:
state_changed = execute_pyxcli_command(
module, 'host_delete', xcli_client)
module.exit_json(changed=state_changed)
if __name__ == '__main__':
main()
| gpl-3.0 |
danielmitterdorfer/elasticsearch | dev-tools/prepare_release_update_documentation.py | 269 | 5009 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release: Update the documentation and commit
#
# USAGE:
#
# python3 ./dev-tools/prepare_release_update_documentation.py
#
# Note: Ensure the script is run from the root directory
# This script needs to be run and then pushed,
# before proceeding with prepare_release_create-release-version.py
# on your build VM
#
import fnmatch
import subprocess
import tempfile
import re
import os
import shutil
def run(command):
if os.system('%s' % (command)):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch: got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch: got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
if file:
# print("Adding file: %s" % (file))
run('git add %s' % (file))
# Updates documentation feature flags
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
if __name__ == "__main__":
release_version = find_release_version()
print('*** Preparing release version documentation: [%s]' % release_version)
ensure_checkout_is_clean()
pending_files = update_reference_docs(release_version)
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
else:
print('WARNING: no documentation references updates for release %s' % (release_version))
print('*** Done.')
| apache-2.0 |
hyperized/ansible | lib/ansible/utils/ssh_functions.py | 148 | 1600 | # (c) 2016, James Tanner
# (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
from ansible.module_utils._text import to_bytes
_HAS_CONTROLPERSIST = {}
def check_for_controlpersist(ssh_executable):
try:
# If we've already checked this executable
return _HAS_CONTROLPERSIST[ssh_executable]
except KeyError:
pass
b_ssh_exec = to_bytes(ssh_executable, errors='surrogate_or_strict')
has_cp = True
try:
cmd = subprocess.Popen([b_ssh_exec, '-o', 'ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if b"Bad configuration option" in err or b"Usage:" in err:
has_cp = False
except OSError:
has_cp = False
_HAS_CONTROLPERSIST[ssh_executable] = has_cp
return has_cp
| gpl-3.0 |
IL2HorusTeam/il2ds-middleware | il2fb/ds/middleware/device_link/client.py | 1 | 11816 | # coding: utf-8
import asyncio
import logging
from typing import Tuple, Awaitable, List
from il2fb.ds.middleware.device_link import requests
from il2fb.ds.middleware.device_link import messages as msg
from il2fb.ds.middleware.device_link import structures
LOG = logging.getLogger(__name__)
Address = Tuple[str, int]
class DeviceLinkClient(asyncio.DatagramProtocol):
def __init__(
self,
remote_address: Address,
trace: bool=False,
loop: asyncio.AbstractEventLoop=None,
):
self._loop = loop
self._trace = trace
self._remote_address = remote_address
self._requests = asyncio.Queue(loop=self._loop)
self._request = None
self._transport = None
self._log_message_prefix_format = self._make_log_message_prefix_format(
remote_address=remote_address,
)
self._messages = []
self._do_close = False
self._connected_ack = asyncio.Future(loop=self._loop)
self._closed_ack = asyncio.Future(loop=self._loop)
@property
def remote_address(self):
return self._remote_address
@staticmethod
def _make_log_message_prefix_format(remote_address) -> str:
addr, port = remote_address
return f"[device link@{addr}:{port}] {{}}"
def _prefix_log_message(self, s: str) -> str:
return self._log_message_prefix_format.format(s)
def connection_made(self, transport) -> None:
LOG.debug(self._prefix_log_message(
"transport was opened"
))
self._transport = transport
asyncio.ensure_future(self._dispatch_all_requests(), loop=self._loop)
self._connected_ack.set_result(None)
def wait_connected(self) -> Awaitable[None]:
return self._connected_ack
def wait_closed(self) -> Awaitable[None]:
return self._closed_ack
def connection_lost(self, e: Exception=None) -> None:
self._closed_ack.set_result(e)
LOG.debug(self._prefix_log_message(
f"transport was closed (details={e or 'N/A'})"
))
def close(self) -> None:
LOG.debug(self._prefix_log_message(
"ask dispatching of requests to stop"
))
if not self._do_close:
self._do_close = True
self._requests.put_nowait(None)
async def _dispatch_all_requests(self) -> None:
LOG.info(self._prefix_log_message(
"dispatching of requests was started"
))
while True:
try:
await self._dispatch_request()
except StopAsyncIteration:
break
except Exception:
LOG.exception(self._prefix_log_message(
"failed to dispatch a single request"
))
self._transport.close()
LOG.info(self._prefix_log_message(
"dispatching of requests was stopped"
))
async def _dispatch_request(self) -> None:
self._request = await self._requests.get()
if not self._request or self._do_close:
LOG.info(self._prefix_log_message(
"got request to stop dispatching of requests"
))
raise StopAsyncIteration
if self._trace:
LOG.debug(self._prefix_log_message(
f"req <-- {repr(self._request)}"
))
try:
await self._request.execute(self._write_bytes)
finally:
self._request = None
def _write_bytes(self, data: bytes) -> None:
self._transport.sendto(data)
if self._trace:
LOG.debug(self._prefix_log_message(
f"dat --> {repr(data)}"
))
def datagram_received(self, data: bytes, addr: Address) -> None:
if addr != self._remote_address:
if self._trace:
LOG.warning(self._prefix_log_message(
f"dat <-? unknown sender {addr}, skip"
))
return
if self._trace:
LOG.debug(self._prefix_log_message(
f"dat <-- {repr(data)}"
))
if not self._request:
if self._trace:
LOG.warning(self._prefix_log_message(
f"req N/A, skip"
))
return
try:
self._request.data_received(data)
except Exception:
LOG.exception(self._prefix_log_message(
"failed to handle response"
))
def error_received(self, e) -> None:
if self._trace:
LOG.error(self._prefix_log_message(
f"err <-- {e}"
))
if self._request:
self._request.set_exception(e)
def schedule_request(self, request: requests.DeviceLinkRequest) -> None:
if self._do_close:
raise ConnectionAbortedError(
"client is closed and does not accept requests"
)
self._requests.put_nowait(request)
def send_messages(
self,
messages: List[msg.DeviceLinkRequestMessage],
timeout: float=None,
) -> Awaitable[List[msg.DeviceLinkMessage]]:
r = requests.DeviceLinkRequest(
messages=messages,
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def refresh_radar(self) -> Awaitable[None]:
r = requests.RefreshRadarRequest(
loop=self._loop,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_moving_aircrafts_count(
self,
timeout: float=None,
) -> Awaitable[int]:
r = requests.GetMovingAircraftsCountRequest(
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_moving_aircraft_position(
self,
index: int,
timeout: float=None,
) -> Awaitable[structures.MovingAircraftPosition]:
r = requests.GetMovingAircraftsPositionsRequest(
loop=self._loop,
indices=[index, ],
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
async def get_all_moving_aircrafts_positions(
self,
timeout: float=None,
) -> Awaitable[List[structures.MovingAircraftPosition]]:
count = await self.get_moving_aircrafts_count()
if not count:
return []
indices = range(count)
r = requests.GetMovingAircraftsPositionsRequest(
loop=self._loop,
indices=indices,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return (await r.result())
def get_moving_ground_units_count(
self,
timeout: float=None,
) -> Awaitable[int]:
r = requests.GetMovingGroundUnitsCountRequest(
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_moving_ground_unit_position(
self,
index: int,
timeout: float=None,
) -> Awaitable[structures.MovingGroundUnitPosition]:
r = requests.GetMovingGroundUnitsPositionsRequest(
loop=self._loop,
indices=[index, ],
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
async def get_all_moving_ground_units_positions(
self,
timeout: float=None,
) -> Awaitable[List[structures.MovingGroundUnitPosition]]:
count = await self.get_moving_ground_units_count()
if not count:
return []
indices = range(count)
r = requests.GetMovingGroundUnitsPositionsRequest(
loop=self._loop,
indices=indices,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return (await r.result())
def get_ships_count(
self,
timeout: float=None,
) -> Awaitable[int]:
r = requests.GetShipsCountRequest(
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_ship_position(
self,
index: int,
timeout: float=None,
) -> Awaitable[structures.ShipPosition]:
r = requests.GetShipsPositionsRequest(
loop=self._loop,
indices=[index, ],
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
async def get_all_ships_positions(
self,
timeout: float=None,
) -> Awaitable[List[structures.ShipPosition]]:
count = await self.get_ships_count()
if not count:
return []
indices = range(count)
r = requests.GetShipsPositionsRequest(
loop=self._loop,
indices=indices,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return (await r.result())
def get_stationary_objects_count(
self,
timeout: float=None,
) -> Awaitable[int]:
r = requests.GetStationaryObjectsCountRequest(
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_stationary_object_position(
self,
index: int,
timeout: float=None,
) -> Awaitable[structures.StationaryObjectPosition]:
r = requests.GetStationaryObjectsPositionsRequest(
loop=self._loop,
indices=[index, ],
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
async def get_all_stationary_objects_positions(
self,
timeout: float=None,
) -> Awaitable[List[structures.StationaryObjectPosition]]:
count = await self.get_stationary_objects_count()
if not count:
return []
indices = range(count)
r = requests.GetStationaryObjectsPositionsRequest(
loop=self._loop,
indices=indices,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return (await r.result())
def get_houses_count(
self,
timeout: float=None,
) -> Awaitable[int]:
r = requests.GetHousesCountRequest(
loop=self._loop,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
def get_house_position(
self,
index: int,
timeout: float=None,
) -> Awaitable[structures.HousePosition]:
r = requests.GetHousesPositionsRequest(
loop=self._loop,
indices=[index, ],
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return r.result()
async def get_all_houses_positions(
self,
timeout: float=None,
) -> Awaitable[List[structures.HousePosition]]:
count = await self.get_houses_count()
if not count:
return []
indices = range(count)
r = requests.GetHousesPositionsRequest(
loop=self._loop,
indices=indices,
timeout=timeout,
trace=self._trace,
)
self.schedule_request(r)
return (await r.result())
| lgpl-3.0 |
ecederstrand/django | tests/delete_regress/models.py | 325 | 3172 | from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
content_object = GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award, models.CASCADE)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child, models.CASCADE)
toy = models.ForeignKey(Toy, models.CASCADE)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith, models.CASCADE)
note = models.TextField()
class Contact(models.Model):
label = models.CharField(max_length=100)
class Email(Contact):
email_address = models.EmailField(max_length=100)
class Researcher(models.Model):
contacts = models.ManyToManyField(Contact, related_name="research_contacts")
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
class Eaten(models.Model):
food = models.ForeignKey(Food, models.CASCADE, to_field="name")
meal = models.CharField(max_length=20)
# Models for #15776
class Policy(models.Model):
policy_number = models.CharField(max_length=10)
class Version(models.Model):
policy = models.ForeignKey(Policy, models.CASCADE)
class Location(models.Model):
version = models.ForeignKey(Version, models.SET_NULL, blank=True, null=True)
class Item(models.Model):
version = models.ForeignKey(Version, models.CASCADE)
location = models.ForeignKey(Location, models.SET_NULL, blank=True, null=True)
# Models for #16128
class File(models.Model):
pass
class Image(File):
class Meta:
proxy = True
class Photo(Image):
class Meta:
proxy = True
class FooImage(models.Model):
my_image = models.ForeignKey(Image, models.CASCADE)
class FooFile(models.Model):
my_file = models.ForeignKey(File, models.CASCADE)
class FooPhoto(models.Model):
my_photo = models.ForeignKey(Photo, models.CASCADE)
class FooFileProxy(FooFile):
class Meta:
proxy = True
class OrgUnit(models.Model):
name = models.CharField(max_length=64, unique=True)
class Login(models.Model):
description = models.CharField(max_length=32)
orgunit = models.ForeignKey(OrgUnit, models.CASCADE)
class House(models.Model):
address = models.CharField(max_length=32)
class OrderedPerson(models.Model):
name = models.CharField(max_length=32)
lives_in = models.ForeignKey(House, models.CASCADE)
class Meta:
ordering = ['name']
| bsd-3-clause |
mtils/ems | ems/listcomparator.py | 1 | 7301 | '''
Created on 02.10.2012
@author: michi
'''
from ems.util import isiterable
class EqualsComparator(object):
def compare(self, a, b):
return a == b
class PropertyComparator(object):
def __init__(self, propertyName):
self.propertyName = propertyName
def compare(self, a, b):
return a.__getattribute__(self.propertyName) == \
b.__getattribute__(self.propertyName)
class ItemComparator(object):
def __init__(self, keyName):
self.keyName = keyName
def compare(self, a, b):
return a[self.keyName] == b[self.keyName]
class MethodComparator(object):
def __init__(self, methodName):
self.methodName = methodName
def compare(self, a, b):
return a.__getattribute__(self.methodName)() == \
b.__getattribute__(self.methodName)()
class ListComparator(object):
def __init__(self, iterableA=None, iterableB=None, comparator=None):
if iterableA is not None:
self.setA(iterableA)
if iterableB is not None:
self.setB(iterableB)
self._reset()
if comparator is None:
comparator = EqualsComparator()
self.setComparator(comparator)
def _reset(self):
self._union = []
self._intersection = []
self._difference = []
self._onlyContainedInA = []
self._onlyContainedInB = []
self._symetricDifference = []
self._isParsed = False
self._onlyContainedInAParsed = False
self._onlyContainedInBParsed = False
self._unionParsed = False
self._symetricDifferenceParsed = False
def getA(self):
return self._iterableA
def setA(self, iterableA):
if not isiterable(iterableA):
raise TypeError("A and B has to be iterable and countable by len()")
self._iterableA = iterableA
self._reset()
return self
a = property(getA, setA)
def getB(self):
return self._iterableB
def setB(self, iterableB):
if not isiterable(iterableB):
raise TypeError("A and B has to be iterable and countable by len()")
self._iterableB = iterableB
self._reset()
return self
b = property(getB, setB)
def getComparator(self):
'''
Returns the object which makes the comparison
@rtype: EqualsComparator
'''
return self._comparator
def setComparator(self, comparator):
if not hasattr(comparator, 'compare'):
raise TypeError("")
self._comparator = comparator
return self
comparator = property(getComparator, setComparator)
def getIntersection(self):
'''
Returns the Intersection of a and b
@return: The values which are in a and b
@rtype: list
'''
if not self._onlyContainedInAParsed:
self.getOnlyContainedInA()
return self._intersection
intersection = property(getIntersection)
def getUnion(self):
'''
Returns the unification of a and b
@return: All Values of a and b (unique)
@rtype: list
'''
if not self._unionParsed:
inter = self.getIntersection()
sym = self.getSymetricDifference()
interLen = len(inter)
symLen = len(sym)
fullLength = max(interLen, symLen)
for i in range(fullLength):
if i < symLen:
self._union.append(sym[i])
if i < interLen:
self._union.append(inter[i])
return self._union
union = property(getUnion)
def getOnlyContainedInA(self):
'''
Returns all values which are only in a
@return: All values of a which are not contained in b
@rtype: list
'''
if not self._onlyContainedInAParsed:
for aVal in self._iterableA:
aIsInB = False
for bVal in self._iterableB:
if self._comparator.compare(aVal, bVal):
aIsInB = True
self._intersection.append(aVal)
if not aIsInB:
self._onlyContainedInA.append(aVal)
self._onlyContainedInAParsed = True
return self._onlyContainedInA
onlyContainedInA = property(getOnlyContainedInA)
def getOnlyContainedInB(self):
'''
Returns all values which are only in b
@return: All values in b which are not contained in a
@rtype: list
'''
if not self._onlyContainedInBParsed:
for bVal in self._iterableB:
bIsInA = False
for aVal in self._iterableA:
if self._comparator.compare(aVal, bVal):
bIsInA = True
#self._intersection.append(aVal)
if not bIsInA:
self._onlyContainedInB.append(bVal)
self._onlyContainedInBParsed = True
return self._onlyContainedInB
onlyContainedInB = property(getOnlyContainedInB)
def getDifference(self):
return self.getOnlyContainedInA()
difference = property(getDifference)
def getSymetricDifference(self):
if not self._symetricDifferenceParsed:
self._symetricDifference = self.getOnlyContainedInA() + \
self.getOnlyContainedInB()
self._symetricDifferenceParsed = True
return self._symetricDifference
symetricDifference = property(getSymetricDifference)
def getComplement(self):
return self.getOnlyContainedInB()
complement = property(getComplement)
def _parse(self):
if self._isParsed:
return
self._isParsed = True
if __name__ == '__main__':
simpleList = []
a = {
'Title':'Simple Lists',
'a' : [1,2,3,4,5,6,7,8,9],
'b' : [2,4,6,8,10,12,14,16],
}
a['c'] = ListComparator(a['a'], a['b'])
b = {
'Title':'Dicts',
'a': [{'testItem':1},{'testItem':2},{'testItem':3},{'testItem':4},
{'testItem':5},{'testItem':6},{'testItem':7},{'testItem':8},
{'testItem':9}],
'b': [{'testItem':2},{'testItem':4},{'testItem':6},{'testItem':8},
{'testItem':10},{'testItem':12},{'testItem':14},{'testItem':16}]
}
b['c'] = ListComparator(b['a'], b['b'], ItemComparator('testItem'))
for test in a,b:
print "------------------------------------------"
print "Title:", test['Title']
print "a", test['a']
print "b", test['b']
print "Intersection:", test['c'].intersection
print "Only in A:", test['c'].onlyContainedInA
print "Only in B:", test['c'].onlyContainedInB
print "Union", test['c'].union
print "SymetricDifference", test['c'].symetricDifference
| mit |
hesseltuinhof/mxnet | example/gluon/data.py | 4 | 4691 | # pylint: skip-file
""" data iterator for mnist """
import os
import random
import sys
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
import get_data
import mxnet as mx
def mnist_iterator(batch_size, input_shape):
"""return train and val iterators for mnist"""
# download data
get_data.GetMNIST_ubyte()
flat = False if len(input_shape) == 3 else True
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=flat)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
flat=flat)
return (train_dataiter, val_dataiter)
def cifar10_iterator(batch_size, data_shape, resize=-1):
get_data.GetCifar10()
train = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/train.rec",
# mean_img = "data/cifar/mean.bin",
resize = resize,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
val = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/test.rec",
# mean_img = "data/cifar/mean.bin",
resize = resize,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
return train, val
class DummyIter(mx.io.DataIter):
def __init__(self, batch_size, data_shape, batches = 5):
super(DummyIter, self).__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,)
self.provide_data = [('data', self.data_shape)]
self.provide_label = [('softmax_label', self.label_shape)]
self.batch = mx.io.DataBatch(data=[mx.nd.zeros(self.data_shape)],
label=[mx.nd.zeros(self.label_shape)])
self._batches = 0
self.batches = batches
def next(self):
if self._batches < self.batches:
self._batches += 1
return self.batch
else:
self._batches = 0
raise StopIteration
def dummy_iterator(batch_size, data_shape):
return DummyIter(batch_size, data_shape), DummyIter(batch_size, data_shape)
class ImagePairIter(mx.io.DataIter):
def __init__(self, path, data_shape, label_shape, batch_size=64, flag=0, input_aug=None, target_aug=None):
super(ImagePairIter, self).__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,) + label_shape
self.input_aug = input_aug
self.target_aug = target_aug
self.provide_data = [('data', self.data_shape)]
self.provide_label = [('label', self.label_shape)]
is_image_file = lambda fn: any(fn.endswith(ext) for ext in [".png", ".jpg", ".jpeg"])
self.filenames = [os.path.join(path, x) for x in os.listdir(path) if is_image_file(x)]
self.count = 0
self.flag = flag
random.shuffle(self.filenames)
def next(self):
from PIL import Image
if self.count + self.batch_size <= len(self.filenames):
data = []
label = []
for i in range(self.batch_size):
fn = self.filenames[self.count]
self.count += 1
image = Image.open(fn).convert('YCbCr').split()[0]
if image.size[0] > image.size[1]:
image = image.transpose(Image.TRANSPOSE)
image = mx.nd.expand_dims(mx.nd.array(image), axis=2)
target = image.copy()
for aug in self.input_aug:
image = aug(image)[0]
for aug in self.target_aug:
target = aug(target)[0]
data.append(image)
label.append(target)
data = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in data], dim=0)
label = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in label], dim=0)
data = [mx.nd.transpose(data, axes=(0, 3, 1, 2)).astype('float32')/255]
label = [mx.nd.transpose(label, axes=(0, 3, 1, 2)).astype('float32')/255]
return mx.io.DataBatch(data=data, label=label)
else:
raise StopIteration
def reset(self):
self.count = 0
random.shuffle(self.filenames)
| apache-2.0 |
serviceagility/boto | boto/route53/record.py | 136 | 14689 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
self.next_record_identifier = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: bool
:param alias_evaluate_target_health: *Required for alias resource record
sets* Indicates whether this Resource Record Set should respect the
health status of any health checks associated with the ALIAS target
record which it is linked to.
:type health_check: str
:param health_check: Health check to associate with this record
:type failover: str
:param failover: *Failover resource record sets only* Whether this is the
primary or secondary resource record set.
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check, failover=failover)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName,
NextRecordType and NextRecordIdentifier to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
elif name == 'NextRecordIdentifier':
self.next_record_identifier = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name,
type=self.next_record_type,
identifier=self.next_record_identifier)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
FailoverBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Failover>%(failover)s</Failover>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None, failover=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
self.failover = failover
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % {"hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health}
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier,
"weight": self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier,
"region": self.region}
elif self.identifier is not None and self.failover is not None:
weight = self.FailoverBody % {"identifier": self.identifier,
"failover": self.failover}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
elif self.identifier is not None and self.failover is not None:
rr += ' (FAILOVER id=%s, failover=%s)' % (self.identifier, self.failover)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
elif name == 'Failover':
self.failover = value
elif name == 'HealthCheckId':
self.health_check = value
def startElement(self, name, attrs, connection):
return None
| mit |
Yelp/pootle | pootle/apps/pootle_app/migrations/0002_mark_empty_dirs_as_obsolete.py | 5 | 1301 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def make_dir_obsolete(directory):
"""Make directory and its parents obsolete if a parent contains one empty
directory only
"""
p = directory.parent
if p is not None and p.child_dirs.filter(obsolete=False).count() == 1:
make_dir_obsolete(p)
directory.obsolete = True
directory.save()
def make_empty_directories_obsolete(apps, schema_editor):
Directory = apps.get_model("pootle_app", "Directory")
from pootle.core.url_helpers import split_pootle_path
for d in Directory.objects.filter(child_stores__isnull=True,
child_dirs__isnull=True,
obsolete=False):
lang_code, prj_code, dir_path, fname = split_pootle_path(d.pootle_path)
# makeobsolete translation project directories and lower
# and do not touch language and project directories
if lang_code and prj_code:
make_dir_obsolete(d)
class Migration(migrations.Migration):
dependencies = [
('pootle_app', '0001_initial'),
('pootle_store', '0001_initial'),
]
operations = [
migrations.RunPython(make_empty_directories_obsolete),
]
| gpl-3.0 |
esthermm/odoomrp-wip | base_partner_references/models/res_partner.py | 31 | 1381 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp.osv import orm, fields
class ResPartner(orm.Model):
_inherit = 'res.partner'
_columns = {
'ref_customer': fields.char('Customer Reference', size=64,
help='This is my customer reference for'
' the supplier'),
'ref_supplier': fields.char('Supplier Reference', size=64,
help='This is my supplier reference for'
' the customer'),
}
| agpl-3.0 |
jgors/duecredit | duecredit/tests/test_injections.py | 1 | 8520 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import gc
import sys
from six import viewvalues, PY2
if PY2:
import __builtin__
else:
import builtins as __builtin__
_orig__import__ = __builtin__.__import__
from duecredit.collector import DueCreditCollector, InactiveDueCreditCollector
from duecredit.entries import BibTeX, Doi
from ..injections.injector import DueCreditInjector, find_object, get_modules_for_injection
from .. import __version__
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
try:
import mvpa2
_have_mvpa2 = True
except ImportError:
_have_mvpa2 = False
class TestActiveInjector(object):
def setup(self):
self._cleanup_modules()
self.due = DueCreditCollector()
self.injector = DueCreditInjector(collector=self.due)
self.injector.activate(retrospect=False) # numpy might be already loaded...
def teardown(self):
# gc might not pick up inj after some tests complete
# so we will always deactivate explicitly
self.injector.deactivate()
assert_true(__builtin__.__import__ is _orig__import__)
self._cleanup_modules()
def _cleanup_modules(self):
if 'duecredit.tests.mod' in sys.modules:
sys.modules.pop('duecredit.tests.mod')
def _test_simple_injection(self, func, import_stmt, func_call=None):
assert_false('duecredit.tests.mod' in sys.modules)
self.injector.add('duecredit.tests.mod', func,
Doi('1.2.3.4'),
description="Testing %s" % func,
min_version='0.1', max_version='1.0',
tags=["implementation", "very custom"])
assert_false('duecredit.tests.mod' in sys.modules) # no import happening
assert_equal(len(self.due._entries), 0)
assert_equal(len(self.due.citations), 0)
exec(import_stmt)
assert_equal(len(self.due._entries), 1) # we should get an entry now
assert_equal(len(self.due.citations), 0) # but not yet a citation
import duecredit.tests.mod as mod
_, _, obj = find_object(mod, func)
assert_true(obj.__duecredited__) # we wrapped
assert_false(obj.__duecredited__ is obj) # and it is not pointing to the same func
assert_equal(obj.__doc__, "custom docstring") # we preserved docstring
# TODO: test decoration features -- preserver __doc__ etc
exec('ret = %s(None, "somevalue")' % (func_call or func))
# XXX: awkwardly 'ret' is not found in the scope while running nosetests
# under python3.4, although present in locals()... WTF?
assert_equal(locals()['ret'], "%s: None, somevalue" % func)
assert_equal(len(self.due._entries), 1)
assert_equal(len(self.due.citations), 1)
# TODO: there must be a cleaner way to get first value
citation = list(viewvalues(self.due.citations))[0]
# TODO: ATM we don't allow versioning of the submodules -- we should
# assert_equal(citation.version, '0.5')
# ATM it will be the duecredit's version
assert_equal(citation.version, __version__)
assert(citation.tags == ['implementation', 'very custom'])
def test_simple_injection(self):
yield self._test_simple_injection, "testfunc1", 'from duecredit.tests.mod import testfunc1'
yield self._test_simple_injection, "TestClass1.testmeth1", \
'from duecredit.tests.mod import TestClass1; c = TestClass1()', 'c.testmeth1'
yield self._test_simple_injection, "TestClass12.Embed.testmeth1", \
'from duecredit.tests.mod import TestClass12; c = TestClass12.Embed()', 'c.testmeth1'
def test_delayed_entries(self):
# verify that addition of delayed injections happened
modules_for_injection = get_modules_for_injection()
assert_equal(len(self.injector._delayed_injections), len(modules_for_injection))
assert_equal(self.injector._entry_records, {}) # but no entries were added
assert('scipy' in self.injector._delayed_injections) # We must have it ATM
try:
# We do have injections for scipy
import scipy
except ImportError as e:
raise SkipTest("scipy was not found: %s" % (e,))
def test_import_mvpa2_suite(self):
if not _have_mvpa2:
raise SkipTest("no mvpa2 found")
# just a smoke test for now
import mvpa2.suite as mv
def _test_incorrect_path(self, mod, obj):
ref = Doi('1.2.3.4')
# none of them should lead to a failure
self.injector.add(mod, obj, ref)
# now cause the import handling -- it must not fail
# TODO: catch/analyze warnings
exec('from duecredit.tests.mod import testfunc1')
def test_incorrect_path(self):
yield self._test_incorrect_path, "nonexistingmodule", None
yield self._test_incorrect_path, "duecredit.tests.mod.nonexistingmodule", None
yield self._test_incorrect_path, "duecredit.tests.mod", "nonexisting"
yield self._test_incorrect_path, "duecredit.tests.mod", "nonexisting.whocares"
def _test_find_object(mod, path, parent, obj_name, obj):
assert_equal(find_object(mod, path), (parent, obj_name, obj))
def test_find_object():
import duecredit.tests.mod as mod
yield _test_find_object, mod, 'testfunc1', mod, 'testfunc1', mod.testfunc1
yield _test_find_object, mod, 'TestClass1', mod, 'TestClass1', mod.TestClass1
yield _test_find_object, mod, 'TestClass1.testmeth1', mod.TestClass1, 'testmeth1', mod.TestClass1.testmeth1
yield _test_find_object, mod, 'TestClass12.Embed.testmeth1', \
mod.TestClass12.Embed, 'testmeth1', mod.TestClass12.Embed.testmeth1
def test_no_double_activation():
orig__import__ = __builtin__.__import__
try:
due = DueCreditCollector()
injector = DueCreditInjector(collector=due)
injector.activate()
assert_false(__builtin__.__import__ is orig__import__)
duecredited__import__ = __builtin__.__import__
# TODO: catch/analyze/swallow warning
injector.activate()
assert_true(__builtin__.__import__ is duecredited__import__) # we didn't decorate again
finally:
injector.deactivate()
__builtin__.__import__ = orig__import__
def test_get_modules_for_injection():
assert_equal(get_modules_for_injection(), [
'mod_biosig',
'mod_dipy',
'mod_mdp',
'mod_mne',
'mod_nibabel',
'mod_nipy',
'mod_nipype',
'mod_numpy',
'mod_pandas',
'mod_psychopy',
'mod_scipy',
'mod_skimage',
'mod_sklearn'])
def test_cover_our_injections():
# this one tests only import/syntax/api for the injections
due = DueCreditCollector()
inj = DueCreditInjector(collector=due)
for modname in get_modules_for_injection():
mod = __import__('duecredit.injections.' + modname, fromlist=["duecredit.injections"])
mod.inject(inj)
def test_no_harm_from_deactivate():
# if we have not activated one -- shouldn't blow if we deactivate it
# TODO: catch warning being spitted out
DueCreditInjector().deactivate()
def test_injector_del():
orig__import__ = __builtin__.__import__
try:
due = DueCreditCollector()
inj = DueCreditInjector(collector=due)
del inj # delete inactive
assert_true(__builtin__.__import__ is orig__import__)
inj = DueCreditInjector(collector=due)
inj.activate(retrospect=False)
assert_false(__builtin__.__import__ is orig__import__)
assert_false(inj._orig_import is None)
del inj # delete active but not used
inj = None
__builtin__.__import__ = None # We need to do that since otherwise gc will not pick up inj
gc.collect() # To cause __del__
assert_true(__builtin__.__import__ is orig__import__)
import abc # and new imports work just fine
finally:
__builtin__.__import__ = orig__import__
| bsd-2-clause |
anryko/ansible | lib/ansible/modules/network/f5/bigip_firewall_dos_vector.py | 38 | 47849 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_dos_vector
short_description: Manage attack vector configuration in an AFM DoS profile
description:
- Manage attack vector configuration in an AFM DoS profile. In addition to the normal
AFM DoS profile vectors, this module can manage the device-configuration vectors.
See the module documentation for details about this method.
version_added: 2.8
options:
name:
description:
- Specifies the name of the vector to modify.
- Vectors that ship with the device are "hard-coded" so-to-speak in that the list
of vectors is known to the system and users cannot add new vectors. Users only
manipulate the existing vectors; all of which are disabled by default.
- When C(ext-hdr-too-large), configures the "IPv6 extension header too large"
Network Security vector.
- When C(hop-cnt-low), configures the "IPv6 hop count <= <tunable>" Network
Security vector.
- When C(host-unreachable), configures the "Host Unreachable" Network Security
vector.
- When C(icmp-frag), configures the "ICMP Fragment" Network Security vector.
- When C(icmpv4-flood), configures the "ICMPv4 flood" Network Security vector.
- When C(icmpv6-flood), configures the "ICMPv6 flood" Network Security vector.
- When C(ip-frag-flood), configures the "IP Fragment Flood" Network Security vector.
- When C(ip-low-ttl), configures the "TTL <= <tunable>" Network Security vector.
- When C(ip-opt-frames), configures the "IP Option Frames" Network Security vector.
- When C(ipv6-ext-hdr-frames), configures the "IPv6 Extended Header Frames"
Network Security vector.
- When C(ipv6-frag-flood), configures the "IPv6 Fragment Flood" Network Security
vector.
- When C(opt-present-with-illegal-len), configures the "Option Present With Illegal
Length" Network Security vector.
- When C(sweep), configures the "Sweep" Network Security vector.
- When C(tcp-bad-urg), configures the "TCP Flags-Bad URG" Network Security vector.
- When C(tcp-half-open), configures the "TCP Half Open" Network Security vector.
- When C(tcp-opt-overruns-tcp-hdr), configures the "TCP Option Overruns TCP Header"
Network Security vector.
- When C(tcp-psh-flood), configures the "TCP PUSH Flood" Network Security vector.
- When C(tcp-rst-flood), configures the "TCP RST Flood" Network Security vector.
- When C(tcp-syn-flood), configures the "TCP SYN Flood" Network Security vector.
- When C(tcp-syn-oversize), configures the "TCP SYN Oversize" Network Security
vector.
- When C(tcp-synack-flood), configures the "TCP SYN ACK Flood" Network Security
vector.
- When C(tcp-window-size), configures the "TCP Window Size" Network Security
vector.
- When C(tidcmp), configures the "TIDCMP" Network Security vector.
- When C(too-many-ext-hdrs), configures the "Too Many Extension Headers" Network
Security vector.
- When C(udp-flood), configures the "UDP Flood" Network Security vector.
- When C(unk-tcp-opt-type), configures the "Unknown TCP Option Type" Network
Security vector.
- When C(a), configures the "DNS A Query" DNS Protocol Security vector.
- When C(aaaa), configures the "DNS AAAA Query" DNS Protocol Security vector.
- When C(any), configures the "DNS ANY Query" DNS Protocol Security vector.
- When C(axfr), configures the "DNS AXFR Query" DNS Protocol Security vector.
- When C(cname), configures the "DNS CNAME Query" DNS Protocol Security vector.
- When C(dns-malformed), configures the "dns-malformed" DNS Protocol Security vector.
- When C(ixfr), configures the "DNS IXFR Query" DNS Protocol Security vector.
- When C(mx), configures the "DNS MX Query" DNS Protocol Security vector.
- When C(ns), configures the "DNS NS Query" DNS Protocol Security vector.
- When C(other), configures the "DNS OTHER Query" DNS Protocol Security vector.
- When C(ptr), configures the "DNS PTR Query" DNS Protocol Security vector.
- When C(qdcount), configures the "DNS QDCOUNT Query" DNS Protocol Security vector.
- When C(soa), configures the "DNS SOA Query" DNS Protocol Security vector.
- When C(srv), configures the "DNS SRV Query" DNS Protocol Security vector.
- When C(txt), configures the "DNS TXT Query" DNS Protocol Security vector.
- When C(ack), configures the "SIP ACK Method" SIP Protocol Security vector.
- When C(bye), configures the "SIP BYE Method" SIP Protocol Security vector.
- When C(cancel), configures the "SIP CANCEL Method" SIP Protocol Security vector.
- When C(invite), configures the "SIP INVITE Method" SIP Protocol Security vector.
- When C(message), configures the "SIP MESSAGE Method" SIP Protocol Security vector.
- When C(notify), configures the "SIP NOTIFY Method" SIP Protocol Security vector.
- When C(options), configures the "SIP OPTIONS Method" SIP Protocol Security vector.
- When C(other), configures the "SIP OTHER Method" SIP Protocol Security vector.
- When C(prack), configures the "SIP PRACK Method" SIP Protocol Security vector.
- When C(publish), configures the "SIP PUBLISH Method" SIP Protocol Security vector.
- When C(register), configures the "SIP REGISTER Method" SIP Protocol Security vector.
- When C(sip-malformed), configures the "sip-malformed" SIP Protocol Security vector.
- When C(subscribe), configures the "SIP SUBSCRIBE Method" SIP Protocol Security vector.
- When C(uri-limit), configures the "uri-limit" SIP Protocol Security vector.
type: str
choices:
- ext-hdr-too-large
- hop-cnt-low
- host-unreachable
- icmp-frag
- icmpv4-flood
- icmpv6-flood
- ip-frag-flood
- ip-low-ttl
- ip-opt-frames
- ipv6-frag-flood
- opt-present-with-illegal-len
- sweep
- tcp-bad-urg
- tcp-half-open
- tcp-opt-overruns-tcp-hdr
- tcp-psh-flood
- tcp-rst-flood
- tcp-syn-flood
- tcp-syn-oversize
- tcp-synack-flood
- tcp-window-size
- tidcmp
- too-many-ext-hdrs
- udp-flood
- unk-tcp-opt-type
- a
- aaaa
- any
- axfr
- cname
- dns-malformed
- ixfr
- mx
- ns
- other
- ptr
- qdcount
- soa
- srv
- txt
- ack
- bye
- cancel
- invite
- message
- notify
- options
- other
- prack
- publish
- register
- sip-malformed
- subscribe
- uri-limit
profile:
description:
- Specifies the name of the profile to manage vectors in.
- The name C(device-config) is reserved for use by this module.
- Vectors can be managed in either DoS Profiles, or Device Configuration. By
specifying a profile of 'device-config', this module will specifically tailor
configuration of the provided vectors to the Device Configuration.
type: str
required: True
auto_blacklist:
description:
- Automatically blacklists detected bad actors.
- To enable this parameter, the C(bad_actor_detection) must also be enabled.
- This parameter is not supported by the C(dns-malformed) vector.
- This parameter is not supported by the C(qdcount) vector.
type: bool
bad_actor_detection:
description:
- Whether Bad Actor detection is enabled or disabled for a vector, if available.
- This parameter must be enabled to enable the C(auto_blacklist) parameter.
- This parameter is not supported by the C(dns-malformed) vector.
- This parameter is not supported by the C(qdcount) vector.
type: bool
attack_ceiling:
description:
- Specifies the absolute maximum allowable for packets of this type.
- This setting rate limits packets to the packets per second setting, when
specified.
- To set no hard limit and allow automatic thresholds to manage all rate limiting,
set this to C(infinite).
type: str
attack_floor:
description:
- Specifies packets per second to identify an attack.
- These settings provide an absolute minimum of packets to allow before the attack
is identified.
- As the automatic detection thresholds adjust to traffic and CPU usage on the
system over time, this attack floor becomes less relevant.
- This value may not exceed the value in C(attack_floor).
type: str
allow_advertisement:
description:
- Specifies that addresses that are identified for blacklisting are advertised to
BGP routers
type: bool
simulate_auto_threshold:
description:
- Specifies that results of the current automatic thresholds are logged, though
manual thresholds are enforced, and no action is taken on automatic thresholds.
- The C(sweep) vector does not support this parameter.
type: bool
blacklist_detection_seconds:
description:
- Detection, in seconds, before blacklisting occurs.
type: int
blacklist_duration:
description:
- Duration, in seconds, that the blacklist will last.
type: int
per_source_ip_detection_threshold:
description:
- Specifies the number of packets per second to identify an IP address as a bad
actor.
type: str
per_source_ip_mitigation_threshold:
description:
- Specifies the rate limit applied to a source IP that is identified as a bad
actor.
type: str
detection_threshold_percent:
description:
- Lists the threshold percent increase over time that the system must detect in
traffic in order to detect this attack.
- The C(tcp-half-open) vector does not support this parameter.
type: str
aliases:
- rate_increase
detection_threshold_eps:
description:
- Lists how many packets per second the system must discover in traffic in order
to detect this attack.
type: str
aliases:
- rate_threshold
mitigation_threshold_eps:
description:
- Specify the maximum number of this type of packet per second the system allows
for a vector.
- The system drops packets once the traffic level exceeds the rate limit.
type: str
aliases:
- rate_limit
threshold_mode:
description:
- The C(dns-malformed) vector does not support C(fully-automatic), or C(stress-based-mitigation)
for this parameter.
- The C(qdcount) vector does not support C(fully-automatic), or C(stress-based-mitigation)
for this parameter.
- The C(sip-malformed) vector does not support C(fully-automatic), or C(stress-based-mitigation)
for this parameter.
type: str
choices:
- manual
- stress-based-mitigation
- fully-automatic
state:
description:
- When C(state) is C(mitigate), ensures that the vector enforces limits and
thresholds.
- When C(state) is C(detect-only), ensures that the vector does not enforce limits
and thresholds (rate limiting, dopping, etc), but is still tracked in logs and statistics.
- When C(state) is C(disabled), ensures that the vector does not enforce limits
and thresholds, but is still tracked in logs and statistics.
- When C(state) is C(learn-only), ensures that the vector does not "detect" any attacks.
Only learning and stat collecting is performed.
type: str
choices:
- mitigate
- detect-only
- learn-only
- disabled
required: True
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
requirements:
- BIG-IP >= v13.0.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Enable DNS AAAA vector mitigation
bigip_firewall_dos_vector:
name: aaaa
state: mitigate
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
allow_advertisement:
description: The new Allow External Advertisement setting.
returned: changed
type: bool
sample: yes
auto_blacklist:
description: The new Auto Blacklist setting.
returned: changed
type: bool
sample: no
bad_actor_detection:
description: The new Bad Actor Detection setting.
returned: changed
type: bool
sample: no
blacklist_detection_seconds:
description: The new Sustained Attack Detection Time setting.
returned: changed
type: int
sample: 60
blacklist_duration:
description: The new Category Duration Time setting.
returned: changed
type: int
sample: 14400
attack_ceiling:
description: The new Attack Ceiling EPS setting.
returned: changed
type: str
sample: infinite
attack_floor:
description: The new Attack Floor EPS setting.
returned: changed
type: str
sample: infinite
blacklist_category:
description: The new Category Name setting.
returned: changed
type: str
sample: /Common/cloud_provider_networks
per_source_ip_detection_threshold:
description: The new Per Source IP Detection Threshold EPS setting.
returned: changed
type: str
sample: 23
per_source_ip_mitigation_threshold:
description: The new Per Source IP Mitigation Threshold EPS setting.
returned: changed
type: str
sample: infinite
detection_threshold_percent:
description: The new Detection Threshold Percent setting.
returned: changed
type: str
sample: infinite
detection_threshold_eps:
description: The new Detection Threshold EPS setting.
returned: changed
type: str
sample: infinite
mitigation_threshold_eps:
description: The new Mitigation Threshold EPS setting.
returned: changed
type: str
sample: infinite
threshold_mode:
description: The new Mitigation Threshold EPS setting.
returned: changed
type: str
sample: infinite
simulate_auto_threshold:
description: The new Simulate Auto Threshold setting.
returned: changed
type: bool
sample: no
state:
description: The new state of the vector.
returned: changed
type: str
sample: mitigate
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.six import iteritems
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import flatten_boolean
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import flatten_boolean
NETWORK_SECURITY_VECTORS = [
'ext-hdr-too-large', # IPv6 extension header too large
'hop-cnt-low', # IPv6 hop count <= <tunable>
'host-unreachable', # Host Unreachable
'icmp-frag', # ICMP Fragment
'icmpv4-flood', # ICMPv4 flood
'icmpv6-flood', # ICMPv6 flood
'ip-frag-flood', # IP Fragment Flood
'ip-low-ttl', # TTL <= <tunable>
'ip-opt-frames', # IP Option Frames
'ipv6-ext-hdr-frames', # IPv6 Extended Header Frames
'ipv6-frag-flood', # IPv6 Fragment Flood
'opt-present-with-illegal-len', # Option Present With Illegal Length
'sweep', # Sweep
'tcp-bad-urg', # TCP Flags-Bad URG
'tcp-half-open', # TCP Half Open
'tcp-opt-overruns-tcp-hdr', # TCP Option Overruns TCP Header
'tcp-psh-flood', # TCP PUSH Flood
'tcp-rst-flood', # TCP RST Flood
'tcp-syn-flood', # TCP SYN Flood
'tcp-syn-oversize', # TCP SYN Oversize
'tcp-synack-flood', # TCP SYN ACK Flood
'tcp-window-size', # TCP Window Size
'tidcmp', # TIDCMP
'too-many-ext-hdrs', # Too Many Extension Headers
'udp-flood', # UDP Flood
'unk-tcp-opt-type', # Unknown TCP Option Type
]
PROTOCOL_SIP_VECTORS = [
'ack', # SIP ACK Method
'bye', # SIP BYE Method
'cancel', # SIP CANCEL Method
'invite', # SIP INVITE Method
'message', # SIP MESSAGE Method
'notify', # SIP NOTIFY Method
'options', # SIP OPTIONS Method
'other', # SIP OTHER Method
'prack', # SIP PRACK Method
'publish', # SIP PUBLISH Method
'register', # SIP REGISTER Method
'sip-malformed', # sip-malformed
'subscribe', # SIP SUBSCRIBE Method
'uri-limit', # uri-limit
]
PROTOCOL_DNS_VECTORS = [
'a', # DNS A Query
'aaaa', # DNS AAAA Query
'any', # DNS ANY Query
'axfr', # DNS AXFR Query
'cname', # DNS CNAME Query
'dns-malformed', # dns-malformed
'ixfr', # DNS IXFR Query
'mx', # DNS MX Query
'ns', # DNS NS Query
'other', # DNS OTHER Query
'ptr', # DNS PTR Query
'qdcount', # DNS QDCOUNT LIMIT
'soa', # DNS SOA Query
'srv', # DNS SRV Query
'txt', # DNS TXT Query
]
class Parameters(AnsibleF5Parameters):
api_map = {
'allowAdvertisement': 'allow_advertisement',
'autoBlacklisting': 'auto_blacklist',
# "autoThreshold": "disabled",
# This is a deprecated parameter in 13.1.0. Use threshold_mode instead
'badActor': 'bad_actor_detection',
'blacklistCategory': 'blacklist_category',
'blacklistDetectionSeconds': 'blacklist_detection_seconds',
'blacklistDuration': 'blacklist_duration',
'ceiling': 'attack_ceiling',
# "enforce": "enabled",
'floor': 'attack_floor',
'perSourceIpDetectionPps': 'per_source_ip_detection_threshold',
'perSourceIpLimitPps': 'per_source_ip_mitigation_threshold',
'rateIncrease': 'detection_threshold_percent',
'rateLimit': 'mitigation_threshold_eps',
'rateThreshold': 'detection_threshold_eps',
'simulateAutoThreshold': 'simulate_auto_threshold',
'thresholdMode': 'threshold_mode',
# device-config specific settings
'scrubbingDetectionSeconds': 'sustained_attack_detection_time',
'scrubbingDuration': 'category_detection_time',
'perDstIpDetectionPps': 'per_dest_ip_detection_threshold',
'perDstIpLimitPps': 'per_dest_ip_mitigation_threshold',
# The following are not enabled for device-config because I
# do not know what parameters in TMUI they map to. Additionally,
# they do not appear to have any "help" documentation available
# in ``tmsh help security dos device-config``.
#
# "allowUpstreamScrubbing": "disabled",
# "attackedDst": "disabled",
# "autoScrubbing": "disabled",
'defaultInternalRateLimit': 'mitigation_threshold_eps',
'detectionThresholdPercent': 'detection_threshold_percent',
'detectionThresholdPps': 'detection_threshold_eps',
}
api_attributes = [
'allowAdvertisement',
'autoBlacklisting',
'autoThreshold',
'badActor',
'blacklistCategory',
'blacklistDetectionSeconds',
'blacklistDuration',
'ceiling',
'enforce',
'floor',
'perSourceIpDetectionPps',
'perSourceIpLimitPps',
'rateIncrease',
'rateLimit',
'rateThreshold',
'simulateAutoThreshold',
'state',
'thresholdMode',
# device-config specific
'scrubbingDetectionSeconds',
'scrubbingDuration',
'perDstIpDetectionPps',
'perDstIpLimitPps',
'defaultInternalRateLimit',
'detectionThresholdPercent',
'detectionThresholdPps',
# Attributes on the DoS profiles that hold the different vectors
#
# Each of these attributes is a list of dictionaries. Each dictionary
# contains the settings that affect the way the vector works.
#
# The vectors appear to all have the same attributes even if those
# attributes are not used. There may be cases where this is not true,
# however, and for those vectors we should either include specific
# error detection, or pass the unfiltered values through to mcpd and
# handle any unintuitive error messages that mcpd returns.
'dosDeviceVector',
'dnsQueryVector',
'networkAttackVector',
'sipAttackVector',
]
returnables = [
'allow_advertisement',
'auto_blacklist',
'bad_actor_detection',
'blacklist_detection_seconds',
'blacklist_duration',
'attack_ceiling',
'attack_floor',
'blacklist_category',
'per_source_ip_detection_threshold',
'per_source_ip_mitigation_threshold',
'detection_threshold_percent',
'detection_threshold_eps',
'mitigation_threshold_eps',
'threshold_mode',
'simulate_auto_threshold',
'state',
]
updatables = [
'allow_advertisement',
'auto_blacklist',
'bad_actor_detection',
'blacklist_detection_seconds',
'blacklist_duration',
'attack_ceiling',
'attack_floor',
'blacklist_category',
'per_source_ip_detection_threshold',
'per_source_ip_mitigation_threshold',
'detection_threshold_percent',
'detection_threshold_eps',
'mitigation_threshold_eps',
'threshold_mode',
'simulate_auto_threshold',
'state',
]
@property
def allow_advertisement(self):
return flatten_boolean(self._values['allow_advertisement'])
@property
def auto_blacklist(self):
return flatten_boolean(self._values['auto_blacklist'])
@property
def simulate_auto_threshold(self):
return flatten_boolean(self._values['simulate_auto_threshold'])
@property
def bad_actor_detection(self):
return flatten_boolean(self._values['bad_actor_detection'])
@property
def detection_threshold_percent(self):
if self._values['detection_threshold_percent'] in [None, "infinite"]:
return self._values['detection_threshold_percent']
return int(self._values['detection_threshold_percent'])
@property
def per_source_ip_mitigation_threshold(self):
if self._values['per_source_ip_mitigation_threshold'] in [None, "infinite"]:
return self._values['per_source_ip_mitigation_threshold']
return int(self._values['per_source_ip_mitigation_threshold'])
@property
def per_dest_ip_mitigation_threshold(self):
if self._values['per_dest_ip_mitigation_threshold'] in [None, "infinite"]:
return self._values['per_dest_ip_mitigation_threshold']
return int(self._values['per_dest_ip_mitigation_threshold'])
@property
def mitigation_threshold_eps(self):
if self._values['mitigation_threshold_eps'] in [None, "infinite"]:
return self._values['mitigation_threshold_eps']
return int(self._values['mitigation_threshold_eps'])
@property
def detection_threshold_eps(self):
if self._values['detection_threshold_eps'] in [None, "infinite"]:
return self._values['detection_threshold_eps']
return int(self._values['detection_threshold_eps'])
@property
def attack_ceiling(self):
if self._values['attack_ceiling'] in [None, "infinite"]:
return self._values['attack_ceiling']
return int(self._values['attack_ceiling'])
@property
def blacklist_category(self):
if self._values['blacklist_category'] is None:
return None
return fq_name(self.partition, self._values['blacklist_category'])
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def allow_advertisement(self):
if self._values['allow_advertisement'] is None:
return None
if self._values['allow_advertisement'] == 'yes':
return 'enabled'
return 'disabled'
@property
def auto_blacklist(self):
if self._values['auto_blacklist'] is None:
return None
if self._values['auto_blacklist'] == 'yes':
return 'enabled'
return 'disabled'
@property
def simulate_auto_threshold(self):
if self._values['simulate_auto_threshold'] is None:
return None
if self._values['simulate_auto_threshold'] == 'yes':
return 'enabled'
return 'disabled'
@property
def bad_actor_detection(self):
if self._values['bad_actor_detection'] is None:
return None
if self._values['bad_actor_detection'] == 'yes':
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def allow_advertisement(self):
return flatten_boolean(self._values['allow_advertisement'])
@property
def auto_blacklist(self):
return flatten_boolean(self._values['auto_blacklist'])
@property
def simulate_auto_threshold(self):
return flatten_boolean(self._values['simulate_auto_threshold'])
@property
def bad_actor_detection(self):
return flatten_boolean(self._values['bad_actor_detection'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
want = getattr(self.want, param)
try:
have = getattr(self.have, param)
if want != have:
return want
except AttributeError:
return want
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
# A list of all the vectors queried from the API when reading current info
# from the device. This is used when updating the API as the value that needs
# to be updated is a list of vectors and PATCHing a list would override any
# default settings.
self.vectors = dict()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def format_vectors(self, vectors):
result = None
for x in vectors:
vector = ApiParameters(params=x)
self.vectors[vector.name] = x
if vector.name == self.want.name:
result = vector
if not result:
return ApiParameters()
return result
def _update(self, vtype):
self.have = self.format_vectors(self.read_current_from_device())
if not self.should_update():
return False
if self.module.check_mode:
return True
# A disabled vector does not appear in the list of existing vectors
if self.want.state == 'disabled':
if self.want.profile == 'device-config' and self.have.state == 'disabled':
return False
# For non-device-config
if self.want.name not in self.vectors:
return False
# At this point we know the existing vector is not disabled, so we need
# to change it in some way.
#
# First, if we see that the vector is in the current list of vectors,
# we are going to update it
changes = dict(self.changes.api_params())
if self.want.name in self.vectors:
self.vectors[self.want.name].update(changes)
else:
# else, we are going to add it to the list of vectors
self.vectors[self.want.name] = changes
# Since the name attribute is not a parameter tracked in the Parameter
# classes, we will add the name to the list of attributes so that when
# we update the API, it creates the correct vector
self.vectors[self.want.name].update({'name': self.want.name})
# Finally, the disabled state forces us to remove the vector from the
# list. However, items are only removed from the list if the profile
# being configured is not a device-config
if self.want.state == 'disabled':
if self.want.profile != 'device-config':
del self.vectors[self.want.name]
# All of the vectors must be re-assembled into a list of dictionaries
# so that when we PATCH the API endpoint, the vectors list is filled
# correctly.
#
# There are **not** individual API endpoints for the individual vectors.
# Instead, the endpoint includes a list of vectors that is part of the
# DoS profile
result = [v for k, v in iteritems(self.vectors)]
self.changes = Changes(params={vtype: result})
self.update_on_device()
return True
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
def exec_module(self):
if self.module.params['profile'] == 'device-config':
manager = self.get_manager('v1')
elif self.module.params['name'] in NETWORK_SECURITY_VECTORS:
manager = self.get_manager('v2')
elif self.module.params['name'] in PROTOCOL_DNS_VECTORS:
manager = self.get_manager('v3')
elif self.module.params['name'] in PROTOCOL_SIP_VECTORS:
manager = self.get_manager('v4')
else:
raise F5ModuleError(
"Unknown vector type specified."
)
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return DeviceConfigManager(**self.kwargs)
elif type == 'v2':
return NetworkSecurityManager(**self.kwargs)
elif type == 'v3':
return ProtocolDnsManager(**self.kwargs)
elif type == 'v4':
return ProtocolSipManager(**self.kwargs)
class DeviceConfigManager(BaseManager):
"""Manages AFM DoS Device Configuration settings.
DeviceConfiguration is a special type of profile that is specific to the
BIG-IP device's management interface; not the data plane interfaces.
There are many similar vectors that can be managed here. This configuration
is a super-set of the base DoS profile vector configuration and includes
several attributes per-vector that are not found in the DoS profile configuration.
These include,
* allowUpstreamScrubbing
* attackedDst
* autoScrubbing
* defaultInternalRateLimit
* detectionThresholdPercent
* detectionThresholdPps
* perDstIpDetectionPps
* perDstIpLimitPps
* scrubbingDetectionSeconds
* scrubbingDuration
"""
def __init__(self, *args, **kwargs):
super(DeviceConfigManager, self).__init__(**kwargs)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def update(self):
name = self.normalize_names_in_device_config(self.want.name)
self.want.update({'name': name})
return self._update('dosDeviceVector')
def normalize_names_in_device_config(self, name):
# Overwrite specific names because they do not align with DoS Profile names
#
# The following names (on the right) differ from the functionally equivalent
# names (on the left) found in DoS Profiles. This seems like a bug to me,
# but I do not expect it to be fixed, so this works around it in the meantime.
name_map = {
'hop-cnt-low': 'hop-cnt-leq-one',
'ip-low-ttl': 'ttl-leq-one',
}
# Attempt to normalize, else just return the name. This handles the default
# case where the name is actually correct and would not be found in the
# ``name_map`` above.
result = name_map.get(name, name)
return result
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/dos/device-config/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'dos-device-config')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/dos/device-config/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name('Common', 'dos-device-config')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
result = response.get('dosDeviceVector', [])
return result
class NetworkSecurityManager(BaseManager):
"""Manages AFM DoS Profile Network Security settings.
Network Security settings are a sub-collection attached to each profile.
There are many similar vectors that can be managed here. This configuration
is a sub-set of the device-config DoS vector configuration and excludes
several attributes per-vector that are found in the device-config configuration.
These include,
* rateIncrease
* rateLimit
* rateThreshold
"""
def __init__(self, *args, **kwargs):
super(NetworkSecurityManager, self).__init__(**kwargs)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def update(self):
return self._update('networkAttackVector')
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/dos-network/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/dos-network/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response.get('networkAttackVector', [])
class ProtocolDnsManager(BaseManager):
"""Manages AFM DoS Profile Protocol DNS settings.
Protocol DNS settings are a sub-collection attached to each profile.
There are many similar vectors that can be managed here. This configuration
is a sub-set of the device-config DoS vector configuration and excludes
several attributes per-vector that are found in the device-config configuration.
These include,
* rateIncrease
* rateLimit
* rateThreshold
"""
def __init__(self, *args, **kwargs):
super(ProtocolDnsManager, self).__init__(**kwargs)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def update(self):
return self._update('dnsQueryVector')
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/protocol-dns/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/protocol-dns/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response.get('dnsQueryVector', [])
class ProtocolSipManager(BaseManager):
"""Manages AFM DoS Profile Protocol SIP settings.
Protocol SIP settings are a sub-collection attached to each profile.
There are many similar vectors that can be managed here. This configuration
is a sub-set of the device-config DoS vector configuration and excludes
several attributes per-vector that are found in the device-config configuration.
These include,
* rateIncrease
* rateLimit
* rateThreshold
"""
def __init__(self, *args, **kwargs):
super(ProtocolSipManager, self).__init__(**kwargs)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def update(self):
return self._update('sipAttackVector')
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/protocol-sip/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/dos/profile/{2}/protocol-sip/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.profile),
self.want.profile
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response.get('sipAttackVector', [])
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
choices=[
'ext-hdr-too-large',
'hop-cnt-low',
'host-unreachable',
'icmp-frag',
'icmpv4-flood',
'icmpv6-flood',
'ip-frag-flood',
'ip-low-ttl',
'ip-opt-frames',
'ipv6-frag-flood',
'opt-present-with-illegal-len',
'sweep',
'tcp-bad-urg',
'tcp-half-open',
'tcp-opt-overruns-tcp-hdr',
'tcp-psh-flood',
'tcp-rst-flood',
'tcp-syn-flood',
'tcp-syn-oversize',
'tcp-synack-flood',
'tcp-window-size',
'tidcmp',
'too-many-ext-hdrs',
'udp-flood',
'unk-tcp-opt-type',
'a',
'aaaa',
'any',
'axfr',
'cname',
'dns-malformed',
'ixfr',
'mx',
'ns',
'other',
'ptr',
'qdcount',
'soa',
'srv',
'txt',
'ack',
'bye',
'cancel',
'invite',
'message',
'notify',
'options',
'other',
'prack',
'publish',
'register',
'sip-malformed',
'subscribe',
'uri-limit',
]
),
profile=dict(required=True),
allow_advertisement=dict(type='bool'),
auto_blacklist=dict(type='bool'),
simulate_auto_threshold=dict(type='bool'),
bad_actor_detection=dict(type='bool'),
blacklist_detection_seconds=dict(type='int'),
blacklist_duration=dict(type='int'),
attack_ceiling=dict(),
attack_floor=dict(),
per_source_ip_detection_threshold=dict(),
per_source_ip_mitigation_threshold=dict(),
# sustained_attack_detection_time=dict(),
# category_detection_time=dict(),
# per_dest_ip_detection_threshold=dict(),
# per_dest_ip_mitigation_threshold=dict(),
detection_threshold_percent=dict(
aliases=['rate_increase']
),
detection_threshold_eps=dict(
aliases=['rate_threshold']
),
mitigation_threshold_eps=dict(
aliases=['rate_limit']
),
threshold_mode=dict(
choices=['manual', 'stress-based-mitigation', 'fully-automatic']
),
state=dict(
choices=['mitigate', 'detect-only', 'learn-only', 'disabled'],
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
CenterForOpenScience/osf.io | api_tests/schemas/views/test_registration_schemas_detail.py | 6 | 3903 | import pytest
from api.base.settings.defaults import API_BASE
from osf.models import RegistrationSchema
from osf_tests.factories import (
AuthUserFactory,
)
pytestmark = pytest.mark.django_db
SCHEMA_VERSION = 2
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.fixture()
def schema():
return RegistrationSchema.objects.filter(
name='Prereg Challenge',
schema_version=SCHEMA_VERSION
).first()
class TestDeprecatedMetaSchemaDetail:
def test_deprecated_metaschemas_routes(self, app, user, schema):
# test base /metaschemas/ GET with min version
url = '/{}metaschemas/?version=2.7'.format(API_BASE)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test GET with higher version
url = '/{}metaschemas/?version=2.8'.format(API_BASE)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This route has been deprecated. It was last available in version 2.7'
# test /metaschemas/registrations/
url = '/{}metaschemas/registrations/{}/?version=2.8'.format(API_BASE, schema._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test /metaschemas/registrations/ deprecated version
url = '/{}metaschemas/registrations/{}/?version=2.9'.format(API_BASE, schema._id)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This route has been deprecated. It was last available in version 2.8'
@pytest.mark.django_db
class TestRegistrationSchemaDetail:
def test_schemas_detail_visibility(self, app, user, schema):
# test_pass_authenticated_user_can_retrieve_schema
url = '/{}schemas/registrations/{}/'.format(API_BASE, schema._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
data = res.json['data']['attributes']
assert data['name'] == 'Prereg Challenge'
assert data['schema_version'] == 2
assert res.json['data']['id'] == schema._id
# test_pass_unauthenticated_user_can_view_schemas
res = app.get(url)
assert res.status_code == 200
# test_inactive_metaschema_returned
inactive_schema = RegistrationSchema.objects.get(
name='Election Research Preacceptance Competition', active=False)
url = '/{}schemas/registrations/{}/'.format(API_BASE, inactive_schema._id)
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['name'] == 'Election Research Preacceptance Competition'
assert res.json['data']['attributes']['active'] is False
# test_invalid_metaschema_not_found
url = '/{}schemas/registrations/garbage/'.format(API_BASE)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_registration_schema_schema_blocks(self, app, user, schema):
# test_authenticated_user_can_retrieve_schema_schema_blocks
url = '/{}schemas/registrations/{}/schema_blocks/'.format(API_BASE, schema._id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
# test_unauthenticated_user_can_retrieve_schema_schema_blocks
url = '/{}schemas/registrations/{}/schema_blocks/'.format(API_BASE, schema._id)
res = app.get(url)
assert res.status_code == 200
# test_schema_blocks_detail
schema_block_id = schema.schema_blocks.first()._id
url = '/{}schemas/registrations/{}/schema_blocks/{}/'.format(API_BASE, schema._id, schema_block_id)
res = app.get(url, auth=user.auth)
assert res.status_code == 200
assert res.json['data']['id'] == schema_block_id
| apache-2.0 |
realsaiko/odoo | openerp/addons/base/ir/ir_sequence.py | 83 | 14810 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import time
import openerp
from openerp.osv import osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class ir_sequence_type(openerp.osv.osv.osv):
_name = 'ir.sequence.type'
_order = 'name'
_columns = {
'name': openerp.osv.fields.char('Name', required=True),
'code': openerp.osv.fields.char('Code', size=32, required=True),
}
_sql_constraints = [
('code_unique', 'unique(code)', '`code` must be unique.'),
]
def _code_get(self, cr, uid, context=None):
cr.execute('select code, name from ir_sequence_type')
return cr.fetchall()
class ir_sequence(openerp.osv.osv.osv):
""" Sequence model.
The sequence model allows to define and use so-called sequence objects.
Such objects are used to generate unique identifiers in a transaction-safe
way.
"""
_name = 'ir.sequence'
_order = 'name'
def _get_number_next_actual(self, cr, user, ids, field_name, arg, context=None):
'''Return number from ir_sequence row when no_gap implementation,
and number from postgres sequence when standard implementation.'''
res = dict.fromkeys(ids)
for element in self.browse(cr, user, ids, context=context):
if element.implementation != 'standard':
res[element.id] = element.number_next
else:
# get number from postgres sequence. Cannot use
# currval, because that might give an error when
# not having used nextval before.
statement = (
"SELECT last_value, increment_by, is_called"
" FROM ir_sequence_%03d"
% element.id)
cr.execute(statement)
(last_value, increment_by, is_called) = cr.fetchone()
if is_called:
res[element.id] = last_value + increment_by
else:
res[element.id] = last_value
return res
def _set_number_next_actual(self, cr, uid, id, name, value, args=None, context=None):
return self.write(cr, uid, id, {'number_next': value or 0}, context=context)
_columns = {
'name': openerp.osv.fields.char('Name', size=64, required=True),
'code': openerp.osv.fields.selection(_code_get, 'Sequence Type', size=64),
'implementation': openerp.osv.fields.selection( # TODO update the view
[('standard', 'Standard'), ('no_gap', 'No gap')],
'Implementation', required=True,
help="Two sequence object implementations are offered: Standard "
"and 'No gap'. The later is slower than the former but forbids any"
" gap in the sequence (while they are possible in the former)."),
'active': openerp.osv.fields.boolean('Active'),
'prefix': openerp.osv.fields.char('Prefix', help="Prefix value of the record for the sequence"),
'suffix': openerp.osv.fields.char('Suffix', help="Suffix value of the record for the sequence"),
'number_next': openerp.osv.fields.integer('Next Number', required=True, help="Next number of this sequence"),
'number_next_actual': openerp.osv.fields.function(_get_number_next_actual, fnct_inv=_set_number_next_actual, type='integer', required=True, string='Next Number', help='Next number that will be used. This number can be incremented frequently so the displayed value might already be obsolete'),
'number_increment': openerp.osv.fields.integer('Increment Number', required=True, help="The next number of the sequence will be incremented by this number"),
'padding' : openerp.osv.fields.integer('Number Padding', required=True, help="Odoo will automatically adds some '0' on the left of the 'Next Number' to get the required padding size."),
'company_id': openerp.osv.fields.many2one('res.company', 'Company'),
}
_defaults = {
'implementation': 'standard',
'active': True,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.sequence', context=c),
'number_increment': 1,
'number_next': 1,
'number_next_actual': 1,
'padding' : 0,
}
def init(self, cr):
return # Don't do the following index yet.
# CONSTRAINT/UNIQUE INDEX on (code, company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate (code, NULL) ir_sequences.
cr.execute("""
SELECT indexname FROM pg_indexes WHERE indexname =
'ir_sequence_unique_code_company_id_idx'""")
if not cr.fetchone():
cr.execute("""
CREATE UNIQUE INDEX ir_sequence_unique_code_company_id_idx
ON ir_sequence (code, (COALESCE(company_id,-1)))""")
def _create_sequence(self, cr, id, number_increment, number_next):
""" Create a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
sql = "CREATE SEQUENCE ir_sequence_%03d INCREMENT BY %%s START WITH %%s" % id
cr.execute(sql, (number_increment, number_next))
def _drop_sequence(self, cr, ids):
""" Drop the PostreSQL sequence if it exists.
There is no access rights check.
"""
ids = ids if isinstance(ids, (list, tuple)) else [ids]
assert all(isinstance(i, (int, long)) for i in ids), \
"Only ids in (int, long) allowed."
names = ','.join('ir_sequence_%03d' % i for i in ids)
# RESTRICT is the default; it prevents dropping the sequence if an
# object depends on it.
cr.execute("DROP SEQUENCE IF EXISTS %s RESTRICT " % names)
def _alter_sequence(self, cr, id, number_increment, number_next=None):
""" Alter a PostreSQL sequence.
There is no access rights check.
"""
if number_increment == 0:
raise osv.except_osv(_('Warning!'),_("Increment number must not be zero."))
assert isinstance(id, (int, long))
seq_name = 'ir_sequence_%03d' % (id,)
cr.execute("SELECT relname FROM pg_class WHERE relkind = %s AND relname=%s", ('S', seq_name))
if not cr.fetchone():
# sequence is not created yet, we're inside create() so ignore it, will be set later
return
statement = "ALTER SEQUENCE %s INCREMENT BY %d" % (seq_name, number_increment)
if number_next is not None:
statement += " RESTART WITH %d" % (number_next, )
cr.execute(statement)
def create(self, cr, uid, values, context=None):
""" Create a sequence, in implementation == standard a fast gaps-allowed PostgreSQL sequence is used.
"""
values = self._add_missing_default_values(cr, uid, values, context)
values['id'] = super(ir_sequence, self).create(cr, uid, values, context)
if values['implementation'] == 'standard':
self._create_sequence(cr, values['id'], values['number_increment'], values['number_next'])
return values['id']
def unlink(self, cr, uid, ids, context=None):
super(ir_sequence, self).unlink(cr, uid, ids, context)
self._drop_sequence(cr, ids)
return True
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids, (list, tuple)):
ids = [ids]
new_implementation = values.get('implementation')
rows = self.read(cr, uid, ids, ['implementation', 'number_increment', 'number_next'], context)
super(ir_sequence, self).write(cr, uid, ids, values, context)
for row in rows:
# 4 cases: we test the previous impl. against the new one.
i = values.get('number_increment', row['number_increment'])
n = values.get('number_next', row['number_next'])
if row['implementation'] == 'standard':
if new_implementation in ('standard', None):
# Implementation has NOT changed.
# Only change sequence if really requested.
if row['number_next'] != n:
self._alter_sequence(cr, row['id'], i, n)
else:
# Just in case only increment changed
self._alter_sequence(cr, row['id'], i)
else:
self._drop_sequence(cr, row['id'])
else:
if new_implementation in ('no_gap', None):
pass
else:
self._create_sequence(cr, row['id'], i, n)
return True
def _interpolate(self, s, d):
if s:
return s % d
return ''
def _interpolation_dict(self):
t = time.localtime() # Actually, the server is always in UTC.
return {
'year': time.strftime('%Y', t),
'month': time.strftime('%m', t),
'day': time.strftime('%d', t),
'y': time.strftime('%y', t),
'doy': time.strftime('%j', t),
'woy': time.strftime('%W', t),
'weekday': time.strftime('%w', t),
'h24': time.strftime('%H', t),
'h12': time.strftime('%I', t),
'min': time.strftime('%M', t),
'sec': time.strftime('%S', t),
}
def _next(self, cr, uid, ids, context=None):
if not ids:
return False
if context is None:
context = {}
force_company = context.get('force_company')
if not force_company:
force_company = self.pool.get('res.users').browse(cr, uid, uid).company_id.id
sequences = self.read(cr, uid, ids, ['name','company_id','implementation','number_next','prefix','suffix','padding'])
preferred_sequences = [s for s in sequences if s['company_id'] and s['company_id'][0] == force_company ]
seq = preferred_sequences[0] if preferred_sequences else sequences[0]
if seq['implementation'] == 'standard':
cr.execute("SELECT nextval('ir_sequence_%03d')" % seq['id'])
seq['number_next'] = cr.fetchone()
else:
cr.execute("SELECT number_next FROM ir_sequence WHERE id=%s FOR UPDATE NOWAIT", (seq['id'],))
cr.execute("UPDATE ir_sequence SET number_next=number_next+number_increment WHERE id=%s ", (seq['id'],))
self.invalidate_cache(cr, uid, ['number_next'], [seq['id']], context=context)
d = self._interpolation_dict()
try:
interpolated_prefix = self._interpolate(seq['prefix'], d)
interpolated_suffix = self._interpolate(seq['suffix'], d)
except ValueError:
raise osv.except_osv(_('Warning'), _('Invalid prefix or suffix for sequence \'%s\'') % (seq.get('name')))
return interpolated_prefix + '%%0%sd' % seq['padding'] % seq['number_next'] + interpolated_suffix
def next_by_id(self, cr, uid, sequence_id, context=None):
""" Draw an interpolated string using the specified sequence."""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&',('id','=', sequence_id),('company_id','in',company_ids)])
return self._next(cr, uid, ids, context)
def next_by_code(self, cr, uid, sequence_code, context=None):
""" Draw an interpolated string using a sequence with the requested code.
If several sequences with the correct code are available to the user
(multi-company cases), the one from the user's current company will
be used.
:param dict context: context dictionary may contain a
``force_company`` key with the ID of the company to
use instead of the user's current company for the
sequence selection. A matching sequence for that
specific company will get higher priority.
"""
self.check_access_rights(cr, uid, 'read')
company_ids = self.pool.get('res.company').search(cr, uid, [], context=context) + [False]
ids = self.search(cr, uid, ['&', ('code', '=', sequence_code), ('company_id', 'in', company_ids)])
return self._next(cr, uid, ids, context)
def get_id(self, cr, uid, sequence_code_or_id, code_or_id='id', context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by the ``sequence_code_or_id``
argument, which can be a code or an id (as controlled by the
``code_or_id`` argument. This method is deprecated.
"""
# TODO: bump up to warning after 6.1 release
_logger.debug("ir_sequence.get() and ir_sequence.get_id() are deprecated. "
"Please use ir_sequence.next_by_code() or ir_sequence.next_by_id().")
if code_or_id == 'id':
return self.next_by_id(cr, uid, sequence_code_or_id, context)
else:
return self.next_by_code(cr, uid, sequence_code_or_id, context)
def get(self, cr, uid, code, context=None):
""" Draw an interpolated string using the specified sequence.
The sequence to use is specified by its code. This method is
deprecated.
"""
return self.get_id(cr, uid, code, 'code', context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hendradarwin/VTK | IO/NetCDF/Testing/Python/NetCDFCFSphericalCoords.py | 20 | 1675 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# This test checks netCDF reader. It uses the CF convention.
# Open the file.
reader = vtk.vtkNetCDFCFReader()
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/tos_O1_2001-2002.nc")
# Set the arrays we want to load.
reader.UpdateMetaData()
reader.SetVariableArrayStatus("tos",1)
reader.SetSphericalCoordinates(1)
aa = vtk.vtkAssignAttribute()
aa.SetInputConnection(reader.GetOutputPort())
aa.Assign("tos","SCALARS","CELL_DATA")
thresh = vtk.vtkThreshold()
thresh.SetInputConnection(aa.GetOutputPort())
thresh.ThresholdByLower(10000)
surface = vtk.vtkDataSetSurfaceFilter()
surface.SetInputConnection(thresh.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.SetScalarRange(270,310)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
renWin = vtk.vtkRenderWindow()
renWin.SetSize(200,200)
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
# # Setup a lookup table.
# vtkLookupTable lut
# lut SetTableRange 270 310
# lut SetHueRange 0.66 0.0
# lut SetRampToLinear
# # Make pretty colors
# vtkImageMapToColors map
# map SetInputConnection [asinine GetOutputPort]
# map SetLookupTable lut
# map SetOutputFormatToRGB
# # vtkImageViewer viewer
# # viewer SetInputConnection [map GetOutputPort]
# # viewer SetColorWindow 256
# # viewer SetColorLevel 127.5
# # viewer Render
# vtkImageViewer2 viewer
# viewer SetInputConnection [map GetOutputPort]
# viewer Render
# --- end of script --
| bsd-3-clause |
tsdmgz/ansible | lib/ansible/modules/network/avi/avi_controllerproperties.py | 27 | 13790 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_controllerproperties
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of ControllerProperties Avi RESTful Object
description:
- This module is used to configure ControllerProperties object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
allow_ip_forwarding:
description:
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
allow_unauthenticated_apis:
description:
- Allow unauthenticated access for special apis.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
allow_unauthenticated_nodes:
description:
- Boolean flag to set allow_unauthenticated_nodes.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
api_idle_timeout:
description:
- Allowed values are 0-1440.
- Default value when not specified in API or module is interpreted by Avi Controller as 15.
appviewx_compat_mode:
description:
- Export configuration in appviewx compatibility mode.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
attach_ip_retry_interval:
description:
- Number of attach_ip_retry_interval.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
attach_ip_retry_limit:
description:
- Number of attach_ip_retry_limit.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
cluster_ip_gratuitous_arp_period:
description:
- Number of cluster_ip_gratuitous_arp_period.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
crashed_se_reboot:
description:
- Number of crashed_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
dead_se_detection_timer:
description:
- Number of dead_se_detection_timer.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
dns_refresh_period:
description:
- Number of dns_refresh_period.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
dummy:
description:
- Number of dummy.
fatal_error_lease_time:
description:
- Number of fatal_error_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
max_dead_se_in_grp:
description:
- Number of max_dead_se_in_grp.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
max_pcap_per_tenant:
description:
- Maximum number of pcap files stored per tenant.
- Default value when not specified in API or module is interpreted by Avi Controller as 4.
max_seq_vnic_failures:
description:
- Number of max_seq_vnic_failures.
- Default value when not specified in API or module is interpreted by Avi Controller as 3.
persistence_key_rotate_period:
description:
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
portal_token:
description:
- Token used for uploading tech-support to portal.
- Field introduced in 16.4.6,17.1.2.
version_added: "2.4"
query_host_fail:
description:
- Number of query_host_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
se_create_timeout:
description:
- Number of se_create_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 900.
se_failover_attempt_interval:
description:
- Interval between attempting failovers to an se.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
se_offline_del:
description:
- Number of se_offline_del.
- Default value when not specified in API or module is interpreted by Avi Controller as 172000.
se_vnic_cooldown:
description:
- Number of se_vnic_cooldown.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
secure_channel_cleanup_timeout:
description:
- Number of secure_channel_cleanup_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
secure_channel_controller_token_timeout:
description:
- Number of secure_channel_controller_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
secure_channel_se_token_timeout:
description:
- Number of secure_channel_se_token_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
seupgrade_fabric_pool_size:
description:
- Pool size used for all fabric commands during se upgrade.
- Default value when not specified in API or module is interpreted by Avi Controller as 20.
seupgrade_segroup_min_dead_timeout:
description:
- Time to wait before marking segroup upgrade as stuck.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
ssl_certificate_expiry_warning_days:
description:
- Number of days for ssl certificate expiry warning.
unresponsive_se_reboot:
description:
- Number of unresponsive_se_reboot.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
upgrade_dns_ttl:
description:
- Time to account for dns ttl during upgrade.
- This is in addition to vs_scalein_timeout_for_upgrade in se_group.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as 5.
upgrade_lease_time:
description:
- Number of upgrade_lease_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vnic_op_fail_time:
description:
- Number of vnic_op_fail_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 180.
vs_apic_scaleout_timeout:
description:
- Time to wait for the scaled out se to become ready before marking the scaleout done, applies to apic configuration only.
- Default value when not specified in API or module is interpreted by Avi Controller as 360.
vs_awaiting_se_timeout:
description:
- Number of vs_awaiting_se_timeout.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
vs_key_rotate_period:
description:
- Allowed values are 1-1051200.
- Special values are 0 - 'disabled'.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
vs_se_bootup_fail:
description:
- Number of vs_se_bootup_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
vs_se_create_fail:
description:
- Number of vs_se_create_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
vs_se_ping_fail:
description:
- Number of vs_se_ping_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 60.
vs_se_vnic_fail:
description:
- Number of vs_se_vnic_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
vs_se_vnic_ip_fail:
description:
- Number of vs_se_vnic_ip_fail.
- Default value when not specified in API or module is interpreted by Avi Controller as 120.
warmstart_se_reconnect_wait_time:
description:
- Number of warmstart_se_reconnect_wait_time.
- Default value when not specified in API or module is interpreted by Avi Controller as 300.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create ControllerProperties object
avi_controllerproperties:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_controllerproperties
"""
RETURN = '''
obj:
description: ControllerProperties (api/controllerproperties) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
allow_ip_forwarding=dict(type='bool',),
allow_unauthenticated_apis=dict(type='bool',),
allow_unauthenticated_nodes=dict(type='bool',),
api_idle_timeout=dict(type='int',),
appviewx_compat_mode=dict(type='bool',),
attach_ip_retry_interval=dict(type='int',),
attach_ip_retry_limit=dict(type='int',),
cluster_ip_gratuitous_arp_period=dict(type='int',),
crashed_se_reboot=dict(type='int',),
dead_se_detection_timer=dict(type='int',),
dns_refresh_period=dict(type='int',),
dummy=dict(type='int',),
fatal_error_lease_time=dict(type='int',),
max_dead_se_in_grp=dict(type='int',),
max_pcap_per_tenant=dict(type='int',),
max_seq_vnic_failures=dict(type='int',),
persistence_key_rotate_period=dict(type='int',),
portal_token=dict(type='str', no_log=True,),
query_host_fail=dict(type='int',),
se_create_timeout=dict(type='int',),
se_failover_attempt_interval=dict(type='int',),
se_offline_del=dict(type='int',),
se_vnic_cooldown=dict(type='int',),
secure_channel_cleanup_timeout=dict(type='int',),
secure_channel_controller_token_timeout=dict(type='int',),
secure_channel_se_token_timeout=dict(type='int',),
seupgrade_fabric_pool_size=dict(type='int',),
seupgrade_segroup_min_dead_timeout=dict(type='int',),
ssl_certificate_expiry_warning_days=dict(type='list',),
unresponsive_se_reboot=dict(type='int',),
upgrade_dns_ttl=dict(type='int',),
upgrade_lease_time=dict(type='int',),
url=dict(type='str',),
uuid=dict(type='str',),
vnic_op_fail_time=dict(type='int',),
vs_apic_scaleout_timeout=dict(type='int',),
vs_awaiting_se_timeout=dict(type='int',),
vs_key_rotate_period=dict(type='int',),
vs_se_bootup_fail=dict(type='int',),
vs_se_create_fail=dict(type='int',),
vs_se_ping_fail=dict(type='int',),
vs_se_vnic_fail=dict(type='int',),
vs_se_vnic_ip_fail=dict(type='int',),
warmstart_se_reconnect_wait_time=dict(type='int',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'controllerproperties',
set(['portal_token']))
if __name__ == '__main__':
main()
| gpl-3.0 |
DavidLP/home-assistant | homeassistant/components/cisco_webex_teams/notify.py | 7 | 1843 | """Cisco Webex Teams notify component."""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
PLATFORM_SCHEMA, BaseNotificationService, ATTR_TITLE)
from homeassistant.const import (CONF_TOKEN)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ROOM_ID = 'room_id'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_ROOM_ID): cv.string,
})
def get_service(hass, config, discovery_info=None):
"""Get the CiscoWebexTeams notification service."""
from webexteamssdk import WebexTeamsAPI, exceptions
client = WebexTeamsAPI(access_token=config[CONF_TOKEN])
try:
# Validate the token & room_id
client.rooms.get(config[CONF_ROOM_ID])
except exceptions.ApiError as error:
_LOGGER.error(error)
return None
return CiscoWebexTeamsNotificationService(
client,
config[CONF_ROOM_ID])
class CiscoWebexTeamsNotificationService(BaseNotificationService):
"""The Cisco Webex Teams Notification Service."""
def __init__(self, client, room):
"""Initialize the service."""
self.room = room
self.client = client
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
from webexteamssdk import ApiError
title = ""
if kwargs.get(ATTR_TITLE) is not None:
title = "{}{}".format(kwargs.get(ATTR_TITLE), "<br>")
try:
self.client.messages.create(roomId=self.room,
html="{}{}".format(title, message))
except ApiError as api_error:
_LOGGER.error("Could not send CiscoWebexTeams notification. "
"Error: %s",
api_error)
| apache-2.0 |
tonybenny2004/or-tools | examples/python/assignment.py | 34 | 3407 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Assignment problem in Google CP Solver.
Winston 'Operations Research', Assignment Problems, page 393f
(generalized version with added test column)
Compare with the following models:
* Comet : http://www.hakank.org/comet/assignment.co
* ECLiPSE : http://www.hakank.org/eclipse/assignment.ecl
* Gecode : http://www.hakank.org/gecode/assignment.cpp
* MiniZinc: http://www.hakank.org/minizinc/assignment.mzn
* Tailor/Essence': http://www.hakank.org/tailor/assignment.eprime
* SICStus: http://hakank.org/sicstus/assignment.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
from ortools.constraint_solver import pywrapcp
def main(cost, rows, cols):
# Create the solver.
solver = pywrapcp.Solver("n-queens")
#
# data
#
# declare variables
total_cost = solver.IntVar(0, 100, "total_cost")
x = []
for i in range(rows):
t = []
for j in range(cols):
t.append(solver.IntVar(0, 1, "x[%i,%i]" % (i, j)))
x.append(t)
x_flat = [x[i][j] for i in range(rows) for j in range(cols)]
#
# constraints
#
# total_cost
solver.Add(
total_cost == solver.Sum(
[solver.ScalProd(x_row, cost_row) for (x_row, cost_row) in zip(
x, cost)]))
# exacly one assignment per row, all rows must be assigned
[solver.Add(solver.Sum([x[row][j] for j in range(cols)]) == 1)
for row in range(rows)]
# zero or one assignments per column
[solver.Add(solver.Sum([x[i][col] for i in range(rows)]) <= 1)
for col in range(cols)]
objective = solver.Minimize(total_cost, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x_flat)
solution.Add(total_cost)
# db: DecisionBuilder
db = solver.Phase(x_flat,
solver.INT_VAR_SIMPLE,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print "total_cost:", total_cost.Value()
for i in range(rows):
for j in range(cols):
print x[i][j].Value(),
print
print
for i in range(rows):
print "Task:", i,
for j in range(cols):
if x[i][j].Value() == 1:
print " is done by ", j
print
num_solutions += 1
solver.EndSearch()
print
print "num_solutions:", num_solutions
print "failures:", solver.Failures()
print "branches:", solver.Branches()
print "WallTime:", solver.WallTime()
# Problem instance
# hakank: I added the fifth column to make it more
# interesting
rows = 4
cols = 5
cost = [[14, 5, 8, 7, 15],
[2, 12, 6, 5, 3],
[7, 8, 3, 9, 7],
[2, 4, 6, 10, 1]]
if __name__ == "__main__":
main(cost, rows, cols)
| apache-2.0 |
loriab/qcdb | databases/BBI.py | 2 | 151122 | #
# @BEGIN LICENSE
#
# QCDB: quantum chemistry common driver and databases
#
# Copyright (c) 2011-2017 The QCDB Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of QCDB.
#
# QCDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# QCDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with QCDB; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Merz) of protein backbone-backbone interactions.
| Geometries from Kenneth Merz Group, Univ. of Florida.
| Reference interaction energies from Sherrill group, Georgia Tech.
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'``
"""
import re
import qcdb
# <<< BBI Database Module >>>
dbse = 'BBI'
# <<< Database Members >>>
HRXN = ['004GLU-063LEU-2', '005ASP-008SER-2', '005LEU-008VAL-2', '007GLN-010ASN-1', '007VAL-041VAL-1',
'008ILE-012LEU-1', '008TRP-251HIE-2', '011ILE-014PHE-1', '012LEU-085ASP-1', '014LYS-018GLU-1',
'014VAL-017GLU-1', '015GLN-018LEU-1', '015LEU-026LEU-1', '017ASN-021LYS-2', '017LEU-025ILE-1',
'019LEU-022MET-1', '023LYS-146SER-2', '024PHE-028ALA-1', '024THR-041GLU-2', '025GLY-029SER-2',
'025ILE-070TRP-1', '027VAL-031LEU-1', '027VAL-068GLU-2', '029GLU-032VAL-1', '030TRP-178GLY-2',
'033ASN-036TRP-2', '033VAL-115VAL-1', '035TRP-056ILE-1', '037PRO-041ALA-1', '038ALA-041GLY-1',
'038GLU-047GLU-2', '039SER-046MET-1', '040THR-048VAL-2', '040THR-049GLN-2', '041GLY-045LEU-2',
'041LEU-045LEU-1', '042LEU-047ILE-2', '044SER-047GLU-1', '044TRP-054LEU-1', '048GLU-052LYS-1',
'051ALA-054VAL-1', '051ALA-055ASN-1', '052CYS-056ALA-1', '054ARG-062GLN-1', '055ASN-058GLU-2',
'060LEU-064TYR-1', '060TYR-064LEU-2', '061VAL-064TYR-2', '062LEU-066LYS-2', '064GLN-067ARG-1',
'064TYR-067GLU-1', '066PHE-072THR-1', '068ASP-072GLN-1', '071GLU-075ASP-1', '072ASN-075ARG-2',
'072THR-075PRO-1', '081ASN-084GLY-1', '081LEU-084LYS-1', '082LEU-106LEU-2', '084LEU-088MET-2',
'087ALA-171PRO-2', '087LEU-090TYR-1', '088PHE-091ALA-1', '089MET-093GLY-1', '092SER-096ARG-2',
'095GLN-183ILE-1', '095LYS-107ILE-2', '097GLU-100THR-2', '102GLN-106ILE-1', '104VAL-108ILE-1',
'108LYS-112TYR-1', '108TYR-129TRP-1', '112SER-115ALA-2', '112TRP-115ARG-1', '113TRP-124HIE-2',
'115GLN-118ARG-2', '119MET-122VAL-1', '121LYS-125ALA-1', '121VAL-155THR-2', '126VAL-129ALA-1',
'133LYS-137ASN-1', '134GLU-138ARG-1', '135ARG-152ALA-2', '137SER-144LYS-1', '140SER-144THR-2',
'142ALA-146PHE-1', '143VAL-147GLU-1', '146PHE-150LEU-1', '150LYS-158LEU-2', '157LYS-160VAL-1',
'162ALA-176GLY-1', '167GLY-232GLY-1', '168GLY-172LYS-2', '171ALA-175GLU-1', '172GLY-175TRP-1',
'197TYR-201LEU-2', '199SER-202TYR-1', '205THR-208GLU-1', '205THR-209LEU-2', '228ARG-232GLU-1']
HRXN_SHB = [
'004GLU-063LEU-2', '005LEU-008VAL-2', '007VAL-041VAL-1', '008ILE-012LEU-1', '008TRP-251HIE-2',
'012LEU-085ASP-1', '014LYS-018GLU-1', '014VAL-017GLU-1', '015LEU-026LEU-1', '017ASN-021LYS-2',
'017LEU-025ILE-1', '019LEU-022MET-1', '023LYS-146SER-2', '024PHE-028ALA-1', '024THR-041GLU-2',
'025GLY-029SER-2', '025ILE-070TRP-1', '027VAL-031LEU-1', '027VAL-068GLU-2', '030TRP-178GLY-2',
'033ASN-036TRP-2', '033VAL-115VAL-1', '035TRP-056ILE-1', '037PRO-041ALA-1', '038GLU-047GLU-2',
'039SER-046MET-1', '040THR-048VAL-2', '040THR-049GLN-2', '041GLY-045LEU-2', '041LEU-045LEU-1',
'042LEU-047ILE-2', '044TRP-054LEU-1', '048GLU-052LYS-1', '051ALA-054VAL-1', '051ALA-055ASN-1',
'052CYS-056ALA-1', '054ARG-062GLN-1', '055ASN-058GLU-2', '060LEU-064TYR-1', '060TYR-064LEU-2',
'061VAL-064TYR-2', '062LEU-066LYS-2', '064TYR-067GLU-1', '068ASP-072GLN-1', '071GLU-075ASP-1',
'081ASN-084GLY-1', '082LEU-106LEU-2', '084LEU-088MET-2', '087ALA-171PRO-2', '087LEU-090TYR-1',
'089MET-093GLY-1', '092SER-096ARG-2', '095GLN-183ILE-1', '095LYS-107ILE-2', '097GLU-100THR-2',
'102GLN-106ILE-1', '104VAL-108ILE-1', '108LYS-112TYR-1', '108TYR-129TRP-1', '113TRP-124HIE-2',
'115GLN-118ARG-2', '121LYS-125ALA-1', '121VAL-155THR-2', '133LYS-137ASN-1', '134GLU-138ARG-1',
'135ARG-152ALA-2', '137SER-144LYS-1', '140SER-144THR-2', '142ALA-146PHE-1', '143VAL-147GLU-1',
'146PHE-150LEU-1', '150LYS-158LEU-2', '157LYS-160VAL-1', '162ALA-176GLY-1', '167GLY-232GLY-1',
'168GLY-172LYS-2', '172GLY-175TRP-1', '197TYR-201LEU-2', '205THR-209LEU-2', '228ARG-232GLU-1']
HRXN_UA = [
'005ASP-008SER-2', '007GLN-010ASN-1', '011ILE-014PHE-1', '015GLN-018LEU-1', '029GLU-032VAL-1',
'038ALA-041GLY-1', '044SER-047GLU-1', '064GLN-067ARG-1', '066PHE-072THR-1', '072ASN-075ARG-2',
'072THR-075PRO-1', '081LEU-084LYS-1', '088PHE-091ALA-1', '112SER-115ALA-2', '112TRP-115ARG-1',
'119MET-122VAL-1', '126VAL-129ALA-1', '171ALA-175GLU-1', '199SER-202TYR-1', '205THR-208GLU-1']
HRXN_BBI25 = [
'007VAL-041VAL-1', '008TRP-251HIE-2', '012LEU-085ASP-1', '015LEU-026LEU-1', '017ASN-021LYS-2',
'030TRP-178GLY-2', '033ASN-036TRP-2', '033VAL-115VAL-1', '038ALA-041GLY-1', '040THR-049GLN-2',
'041GLY-045LEU-2', '044SER-047GLU-1', '044TRP-054LEU-1', '061VAL-064TYR-2', '072THR-075PRO-1',
'081LEU-084LYS-1', '084LEU-088MET-2', '087LEU-090TYR-1', '108LYS-112TYR-1', '108TYR-129TRP-1',
'112SER-115ALA-2', '121VAL-155THR-2', '171ALA-175GLU-1', '197TYR-201LEU-2', '228ARG-232GLU-1']
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supermolecular calculations
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1 }
ACTV_SA['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV_CP['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
# Bronze
BIND_BRONZE = {}
BIND_BRONZE['%s-%s' % (dbse, '004GLU-063LEU-2')] = -8.393
BIND_BRONZE['%s-%s' % (dbse, '005ASP-008SER-2')] = -1.518
BIND_BRONZE['%s-%s' % (dbse, '005LEU-008VAL-2')] = -6.233
BIND_BRONZE['%s-%s' % (dbse, '007GLN-010ASN-1')] = -1.442
BIND_BRONZE['%s-%s' % (dbse, '007VAL-041VAL-1')] = -6.114
BIND_BRONZE['%s-%s' % (dbse, '008ILE-012LEU-1')] = -7.172
BIND_BRONZE['%s-%s' % (dbse, '008TRP-251HIE-2')] = -7.615
BIND_BRONZE['%s-%s' % (dbse, '011ILE-014PHE-1')] = -1.098
BIND_BRONZE['%s-%s' % (dbse, '012LEU-085ASP-1')] = -8.072
BIND_BRONZE['%s-%s' % (dbse, '014LYS-018GLU-1')] = -7.073
BIND_BRONZE['%s-%s' % (dbse, '014VAL-017GLU-1')] = -7.550
BIND_BRONZE['%s-%s' % (dbse, '015GLN-018LEU-1')] = -1.272
BIND_BRONZE['%s-%s' % (dbse, '015LEU-026LEU-1')] = -7.406
BIND_BRONZE['%s-%s' % (dbse, '017ASN-021LYS-2')] = -6.291
BIND_BRONZE['%s-%s' % (dbse, '017LEU-025ILE-1')] = -6.664
BIND_BRONZE['%s-%s' % (dbse, '019LEU-022MET-1')] = -5.842
BIND_BRONZE['%s-%s' % (dbse, '023LYS-146SER-2')] = -7.712
BIND_BRONZE['%s-%s' % (dbse, '024PHE-028ALA-1')] = -7.167
BIND_BRONZE['%s-%s' % (dbse, '024THR-041GLU-2')] = -7.294
BIND_BRONZE['%s-%s' % (dbse, '025GLY-029SER-2')] = -6.834
BIND_BRONZE['%s-%s' % (dbse, '025ILE-070TRP-1')] = -7.910
BIND_BRONZE['%s-%s' % (dbse, '027VAL-031LEU-1')] = -7.772
BIND_BRONZE['%s-%s' % (dbse, '027VAL-068GLU-2')] = -7.712
BIND_BRONZE['%s-%s' % (dbse, '029GLU-032VAL-1')] = -1.399
BIND_BRONZE['%s-%s' % (dbse, '030TRP-178GLY-2')] = -8.039
BIND_BRONZE['%s-%s' % (dbse, '033ASN-036TRP-2')] = 1.328
BIND_BRONZE['%s-%s' % (dbse, '033VAL-115VAL-1')] = -7.858
BIND_BRONZE['%s-%s' % (dbse, '035TRP-056ILE-1')] = -7.840
BIND_BRONZE['%s-%s' % (dbse, '037PRO-041ALA-1')] = -6.672
BIND_BRONZE['%s-%s' % (dbse, '038ALA-041GLY-1')] = -1.134
BIND_BRONZE['%s-%s' % (dbse, '038GLU-047GLU-2')] = -4.753
BIND_BRONZE['%s-%s' % (dbse, '039SER-046MET-1')] = -7.258
BIND_BRONZE['%s-%s' % (dbse, '040THR-048VAL-2')] = -7.407
BIND_BRONZE['%s-%s' % (dbse, '040THR-049GLN-2')] = -8.172
BIND_BRONZE['%s-%s' % (dbse, '041GLY-045LEU-2')] = -7.460
BIND_BRONZE['%s-%s' % (dbse, '041LEU-045LEU-1')] = -7.310
BIND_BRONZE['%s-%s' % (dbse, '042LEU-047ILE-2')] = -7.191
BIND_BRONZE['%s-%s' % (dbse, '044SER-047GLU-1')] = -2.585
BIND_BRONZE['%s-%s' % (dbse, '044TRP-054LEU-1')] = -7.819
BIND_BRONZE['%s-%s' % (dbse, '048GLU-052LYS-1')] = -6.567
BIND_BRONZE['%s-%s' % (dbse, '051ALA-054VAL-1')] = 2.681
BIND_BRONZE['%s-%s' % (dbse, '051ALA-055ASN-1')] = -6.170
BIND_BRONZE['%s-%s' % (dbse, '052CYS-056ALA-1')] = -6.957
BIND_BRONZE['%s-%s' % (dbse, '054ARG-062GLN-1')] = -6.344
BIND_BRONZE['%s-%s' % (dbse, '055ASN-058GLU-2')] = -5.542
BIND_BRONZE['%s-%s' % (dbse, '060LEU-064TYR-1')] = -6.335
BIND_BRONZE['%s-%s' % (dbse, '060TYR-064LEU-2')] = -7.322
BIND_BRONZE['%s-%s' % (dbse, '061VAL-064TYR-2')] = -5.629
BIND_BRONZE['%s-%s' % (dbse, '062LEU-066LYS-2')] = -7.169
BIND_BRONZE['%s-%s' % (dbse, '064GLN-067ARG-1')] = -1.442
BIND_BRONZE['%s-%s' % (dbse, '064TYR-067GLU-1')] = -6.343
BIND_BRONZE['%s-%s' % (dbse, '066PHE-072THR-1')] = -1.684
BIND_BRONZE['%s-%s' % (dbse, '068ASP-072GLN-1')] = -3.610
BIND_BRONZE['%s-%s' % (dbse, '071GLU-075ASP-1')] = -7.049
BIND_BRONZE['%s-%s' % (dbse, '072ASN-075ARG-2')] = -1.244
BIND_BRONZE['%s-%s' % (dbse, '072THR-075PRO-1')] = -2.038
BIND_BRONZE['%s-%s' % (dbse, '081ASN-084GLY-1')] = -5.923
BIND_BRONZE['%s-%s' % (dbse, '081LEU-084LYS-1')] = -1.442
BIND_BRONZE['%s-%s' % (dbse, '082LEU-106LEU-2')] = -7.618
BIND_BRONZE['%s-%s' % (dbse, '084LEU-088MET-2')] = -7.225
BIND_BRONZE['%s-%s' % (dbse, '087ALA-171PRO-2')] = -8.151
BIND_BRONZE['%s-%s' % (dbse, '087LEU-090TYR-1')] = -6.068
BIND_BRONZE['%s-%s' % (dbse, '088PHE-091ALA-1')] = -1.387
BIND_BRONZE['%s-%s' % (dbse, '089MET-093GLY-1')] = -6.694
BIND_BRONZE['%s-%s' % (dbse, '092SER-096ARG-2')] = -7.368
BIND_BRONZE['%s-%s' % (dbse, '095GLN-183ILE-1')] = -8.222
BIND_BRONZE['%s-%s' % (dbse, '095LYS-107ILE-2')] = -7.781
BIND_BRONZE['%s-%s' % (dbse, '097GLU-100THR-2')] = -5.213
BIND_BRONZE['%s-%s' % (dbse, '102GLN-106ILE-1')] = -6.443
BIND_BRONZE['%s-%s' % (dbse, '104VAL-108ILE-1')] = -7.408
BIND_BRONZE['%s-%s' % (dbse, '108LYS-112TYR-1')] = -7.210
BIND_BRONZE['%s-%s' % (dbse, '108TYR-129TRP-1')] = -7.356
BIND_BRONZE['%s-%s' % (dbse, '112SER-115ALA-2')] = -1.716
BIND_BRONZE['%s-%s' % (dbse, '112TRP-115ARG-1')] = -1.971
BIND_BRONZE['%s-%s' % (dbse, '113TRP-124HIE-2')] = -7.921
BIND_BRONZE['%s-%s' % (dbse, '115GLN-118ARG-2')] = -6.439
BIND_BRONZE['%s-%s' % (dbse, '119MET-122VAL-1')] = -1.319
BIND_BRONZE['%s-%s' % (dbse, '121LYS-125ALA-1')] = -5.983
BIND_BRONZE['%s-%s' % (dbse, '121VAL-155THR-2')] = -7.257
BIND_BRONZE['%s-%s' % (dbse, '126VAL-129ALA-1')] = -1.269
BIND_BRONZE['%s-%s' % (dbse, '133LYS-137ASN-1')] = -7.240
BIND_BRONZE['%s-%s' % (dbse, '134GLU-138ARG-1')] = -7.140
BIND_BRONZE['%s-%s' % (dbse, '135ARG-152ALA-2')] = -7.124
BIND_BRONZE['%s-%s' % (dbse, '137SER-144LYS-1')] = -7.506
BIND_BRONZE['%s-%s' % (dbse, '140SER-144THR-2')] = -7.447
BIND_BRONZE['%s-%s' % (dbse, '142ALA-146PHE-1')] = -7.431
BIND_BRONZE['%s-%s' % (dbse, '143VAL-147GLU-1')] = -7.213
BIND_BRONZE['%s-%s' % (dbse, '146PHE-150LEU-1')] = -7.044
BIND_BRONZE['%s-%s' % (dbse, '150LYS-158LEU-2')] = -8.308
BIND_BRONZE['%s-%s' % (dbse, '157LYS-160VAL-1')] = 2.056
BIND_BRONZE['%s-%s' % (dbse, '162ALA-176GLY-1')] = -8.225
BIND_BRONZE['%s-%s' % (dbse, '167GLY-232GLY-1')] = -6.892
BIND_BRONZE['%s-%s' % (dbse, '168GLY-172LYS-2')] = -7.259
BIND_BRONZE['%s-%s' % (dbse, '171ALA-175GLU-1')] = -1.671
BIND_BRONZE['%s-%s' % (dbse, '172GLY-175TRP-1')] = -5.146
BIND_BRONZE['%s-%s' % (dbse, '197TYR-201LEU-2')] = -6.637
BIND_BRONZE['%s-%s' % (dbse, '199SER-202TYR-1')] = -1.087
BIND_BRONZE['%s-%s' % (dbse, '205THR-208GLU-1')] = -1.028
BIND_BRONZE['%s-%s' % (dbse, '205THR-209LEU-2')] = -5.627
BIND_BRONZE['%s-%s' % (dbse, '228ARG-232GLU-1')] = -7.350
# Silver
BIND_SILVER = {}
BIND_SILVER['%s-%s' % (dbse, '004GLU-063LEU-2')] = -8.623
BIND_SILVER['%s-%s' % (dbse, '005ASP-008SER-2')] = -1.589
BIND_SILVER['%s-%s' % (dbse, '005LEU-008VAL-2')] = -6.388
BIND_SILVER['%s-%s' % (dbse, '007GLN-010ASN-1')] = -1.510
BIND_SILVER['%s-%s' % (dbse, '007VAL-041VAL-1')] = -6.366
BIND_SILVER['%s-%s' % (dbse, '008ILE-012LEU-1')] = -7.410
BIND_SILVER['%s-%s' % (dbse, '008TRP-251HIE-2')] = -7.771
BIND_SILVER['%s-%s' % (dbse, '011ILE-014PHE-1')] = -1.180
BIND_SILVER['%s-%s' % (dbse, '012LEU-085ASP-1')] = -8.281
BIND_SILVER['%s-%s' % (dbse, '014LYS-018GLU-1')] = -7.312
BIND_SILVER['%s-%s' % (dbse, '014VAL-017GLU-1')] = -7.767
BIND_SILVER['%s-%s' % (dbse, '015GLN-018LEU-1')] = -1.348
BIND_SILVER['%s-%s' % (dbse, '015LEU-026LEU-1')] = -7.651
BIND_SILVER['%s-%s' % (dbse, '017ASN-021LYS-2')] = -6.497
BIND_SILVER['%s-%s' % (dbse, '017LEU-025ILE-1')] = -6.884
BIND_SILVER['%s-%s' % (dbse, '019LEU-022MET-1')] = -5.970
BIND_SILVER['%s-%s' % (dbse, '023LYS-146SER-2')] = -7.922
BIND_SILVER['%s-%s' % (dbse, '024PHE-028ALA-1')] = -7.448
BIND_SILVER['%s-%s' % (dbse, '024THR-041GLU-2')] = -7.496
BIND_SILVER['%s-%s' % (dbse, '025GLY-029SER-2')] = -7.075
BIND_SILVER['%s-%s' % (dbse, '025ILE-070TRP-1')] = -8.120
BIND_SILVER['%s-%s' % (dbse, '027VAL-031LEU-1')] = -8.041
BIND_SILVER['%s-%s' % (dbse, '027VAL-068GLU-2')] = -7.943
BIND_SILVER['%s-%s' % (dbse, '029GLU-032VAL-1')] = -1.472
BIND_SILVER['%s-%s' % (dbse, '030TRP-178GLY-2')] = -8.247
BIND_SILVER['%s-%s' % (dbse, '033ASN-036TRP-2')] = 1.229
BIND_SILVER['%s-%s' % (dbse, '033VAL-115VAL-1')] = -8.073
BIND_SILVER['%s-%s' % (dbse, '035TRP-056ILE-1')] = -8.082
BIND_SILVER['%s-%s' % (dbse, '037PRO-041ALA-1')] = -6.917
BIND_SILVER['%s-%s' % (dbse, '038ALA-041GLY-1')] = -1.191
BIND_SILVER['%s-%s' % (dbse, '038GLU-047GLU-2')] = -4.865
BIND_SILVER['%s-%s' % (dbse, '039SER-046MET-1')] = -7.468
BIND_SILVER['%s-%s' % (dbse, '040THR-048VAL-2')] = -7.639
BIND_SILVER['%s-%s' % (dbse, '040THR-049GLN-2')] = -8.407
BIND_SILVER['%s-%s' % (dbse, '041GLY-045LEU-2')] = -7.697
BIND_SILVER['%s-%s' % (dbse, '041LEU-045LEU-1')] = -7.561
BIND_SILVER['%s-%s' % (dbse, '042LEU-047ILE-2')] = -7.426
BIND_SILVER['%s-%s' % (dbse, '044SER-047GLU-1')] = -2.638
BIND_SILVER['%s-%s' % (dbse, '044TRP-054LEU-1')] = -8.043
BIND_SILVER['%s-%s' % (dbse, '048GLU-052LYS-1')] = -6.828
BIND_SILVER['%s-%s' % (dbse, '051ALA-054VAL-1')] = 2.680
BIND_SILVER['%s-%s' % (dbse, '051ALA-055ASN-1')] = -6.380
BIND_SILVER['%s-%s' % (dbse, '052CYS-056ALA-1')] = -7.166
BIND_SILVER['%s-%s' % (dbse, '054ARG-062GLN-1')] = -6.516
BIND_SILVER['%s-%s' % (dbse, '055ASN-058GLU-2')] = -5.672
BIND_SILVER['%s-%s' % (dbse, '060LEU-064TYR-1')] = -6.531
BIND_SILVER['%s-%s' % (dbse, '060TYR-064LEU-2')] = -7.576
BIND_SILVER['%s-%s' % (dbse, '061VAL-064TYR-2')] = -5.751
BIND_SILVER['%s-%s' % (dbse, '062LEU-066LYS-2')] = -7.396
BIND_SILVER['%s-%s' % (dbse, '064GLN-067ARG-1')] = -1.502
BIND_SILVER['%s-%s' % (dbse, '064TYR-067GLU-1')] = -6.519
BIND_SILVER['%s-%s' % (dbse, '066PHE-072THR-1')] = -1.963
BIND_SILVER['%s-%s' % (dbse, '068ASP-072GLN-1')] = -3.899
BIND_SILVER['%s-%s' % (dbse, '071GLU-075ASP-1')] = -7.283
BIND_SILVER['%s-%s' % (dbse, '072ASN-075ARG-2')] = -1.300
BIND_SILVER['%s-%s' % (dbse, '072THR-075PRO-1')] = -2.100
BIND_SILVER['%s-%s' % (dbse, '081ASN-084GLY-1')] = -5.985
BIND_SILVER['%s-%s' % (dbse, '081LEU-084LYS-1')] = -1.507
BIND_SILVER['%s-%s' % (dbse, '082LEU-106LEU-2')] = -7.836
BIND_SILVER['%s-%s' % (dbse, '084LEU-088MET-2')] = -7.448
BIND_SILVER['%s-%s' % (dbse, '087ALA-171PRO-2')] = -8.365
BIND_SILVER['%s-%s' % (dbse, '087LEU-090TYR-1')] = -6.187
BIND_SILVER['%s-%s' % (dbse, '088PHE-091ALA-1')] = -1.445
BIND_SILVER['%s-%s' % (dbse, '089MET-093GLY-1')] = -6.946
BIND_SILVER['%s-%s' % (dbse, '092SER-096ARG-2')] = -7.589
BIND_SILVER['%s-%s' % (dbse, '095GLN-183ILE-1')] = -8.448
BIND_SILVER['%s-%s' % (dbse, '095LYS-107ILE-2')] = -8.007
BIND_SILVER['%s-%s' % (dbse, '097GLU-100THR-2')] = -5.303
BIND_SILVER['%s-%s' % (dbse, '102GLN-106ILE-1')] = -6.676
BIND_SILVER['%s-%s' % (dbse, '104VAL-108ILE-1')] = -7.628
BIND_SILVER['%s-%s' % (dbse, '108LYS-112TYR-1')] = -7.397
BIND_SILVER['%s-%s' % (dbse, '108TYR-129TRP-1')] = -7.591
BIND_SILVER['%s-%s' % (dbse, '112SER-115ALA-2')] = -1.785
BIND_SILVER['%s-%s' % (dbse, '112TRP-115ARG-1')] = -2.022
BIND_SILVER['%s-%s' % (dbse, '113TRP-124HIE-2')] = -8.153
BIND_SILVER['%s-%s' % (dbse, '115GLN-118ARG-2')] = -6.585
BIND_SILVER['%s-%s' % (dbse, '119MET-122VAL-1')] = -1.388
BIND_SILVER['%s-%s' % (dbse, '121LYS-125ALA-1')] = -6.188
BIND_SILVER['%s-%s' % (dbse, '121VAL-155THR-2')] = -7.480
BIND_SILVER['%s-%s' % (dbse, '126VAL-129ALA-1')] = -1.340
BIND_SILVER['%s-%s' % (dbse, '133LYS-137ASN-1')] = -7.522
BIND_SILVER['%s-%s' % (dbse, '134GLU-138ARG-1')] = -7.375
BIND_SILVER['%s-%s' % (dbse, '135ARG-152ALA-2')] = -7.377
BIND_SILVER['%s-%s' % (dbse, '137SER-144LYS-1')] = -7.731
BIND_SILVER['%s-%s' % (dbse, '140SER-144THR-2')] = -7.668
BIND_SILVER['%s-%s' % (dbse, '142ALA-146PHE-1')] = -7.653
BIND_SILVER['%s-%s' % (dbse, '143VAL-147GLU-1')] = -7.454
BIND_SILVER['%s-%s' % (dbse, '146PHE-150LEU-1')] = -7.290
BIND_SILVER['%s-%s' % (dbse, '150LYS-158LEU-2')] = -8.530
BIND_SILVER['%s-%s' % (dbse, '157LYS-160VAL-1')] = 1.996
BIND_SILVER['%s-%s' % (dbse, '162ALA-176GLY-1')] = -8.443
BIND_SILVER['%s-%s' % (dbse, '167GLY-232GLY-1')] = -7.120
BIND_SILVER['%s-%s' % (dbse, '168GLY-172LYS-2')] = -7.440
BIND_SILVER['%s-%s' % (dbse, '171ALA-175GLU-1')] = -1.912
BIND_SILVER['%s-%s' % (dbse, '172GLY-175TRP-1')] = -5.297
BIND_SILVER['%s-%s' % (dbse, '197TYR-201LEU-2')] = -6.934
BIND_SILVER['%s-%s' % (dbse, '199SER-202TYR-1')] = -1.140
BIND_SILVER['%s-%s' % (dbse, '205THR-208GLU-1')] = -1.083
BIND_SILVER['%s-%s' % (dbse, '205THR-209LEU-2')] = -5.867
BIND_SILVER['%s-%s' % (dbse, '228ARG-232GLU-1')] = -7.561
# Set default
BIND = BIND_SILVER
# Reference information
BINDINFO_BRONZE = {}
BINDINFO_SILVER = {}
for rxn in HRXN:
BINDINFO_BRONZE['%s-%s' % (dbse, rxn)] = {'citation': 'merz3', 'method': 'MP2CF12', 'mode': 'SA', 'basis': 'adz'}
BINDINFO_SILVER['%s-%s' % (dbse, rxn)] = {'citation': 'merz3', 'method': 'DWCCSDTF12', 'mode': 'CP', 'basis': 'adz'} # addz?
# <<< Comment Lines >>>
TAGL = {}
rxnpattern = re.compile(r'^(.+)-(.+)-(.+)$')
for rxn in HRXN:
molname = rxnpattern.match(rxn)
TAGL['%s-%s' % (dbse, rxn)] = """Residue %s and %s interaction No. %s""" % (molname.group(1), molname.group(2), molname.group(3))
TAGL['%s-%s-dimer' % (dbse, rxn)] = """Dimer from %s""" % (rxn)
TAGL['%s-%s-monoA-CP' % (dbse, rxn)] = """Monomer A from %s""" % (rxn)
TAGL['%s-%s-monoB-CP' % (dbse, rxn)] = """Monomer B from %s""" % (rxn)
TAGL['%s-%s-monoA-unCP' % (dbse, rxn)] = """Monomer A from %s""" % (rxn)
TAGL['%s-%s-monoB-unCP' % (dbse, rxn)] = """Monomer B from %s""" % (rxn)
TAGL['dbse'] = 'interaction energies for peptide backbone-backbone complexes'
TAGL['shb'] = 'singly hydrogen-bonded complexes'
TAGL['ua'] = 'unaligned backbone structures'
TAGL['bbi25'] = 'representative sample of 25'
TAGL['default'] = 'entire database'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-dimer' % (dbse, '004GLU-063LEU-2')] = qcdb.Molecule("""
0 1
C 31.01400000 -3.84200000 7.91700000
C 32.77900000 -5.82000000 10.65500000
C 32.37100000 -4.63300000 9.78100000
O 32.83000000 -3.47700000 9.99000000
N 31.53500000 -4.90600000 8.77100000
H 31.20900000 -5.85900000 8.62600000
H 31.29400000 -2.86400000 8.30900000
H 32.22100000 -6.71200000 10.37200000
H 31.43191000 -3.93711000 6.90393200
H 29.91523000 -3.88500500 7.88761400
H 33.86081000 -5.95898200 10.51226000
H 32.57115000 -5.57809500 11.70775000
--
0 1
C 29.64500000 -8.60100000 6.64000000
C 31.49500000 -10.17100000 9.56100000
C 30.48100000 -8.70100000 7.90600000
O 30.84300000 -7.68500000 8.51100000
N 30.69600000 -9.92800000 8.36400000
H 29.35100000 -9.59600000 6.30500000
H 30.32100000 -10.72500000 7.85800000
H 32.01200000 -9.26900000 9.87800000
H 28.75868000 -8.00110100 6.89409300
H 30.21876000 -8.10158000 5.84540300
H 30.83944000 -10.49806000 10.38153000
H 32.23175000 -10.94459000 9.29880200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '005ASP-008SER-2')] = qcdb.Molecule("""
0 1
C 26.24300000 -6.23800000 -1.26400000
C 23.28800000 -8.18300000 0.13400000
C 25.18600000 -6.71300000 -0.27500000
O 25.16800000 -6.30000000 0.88000000
N 24.31100000 -7.61000000 -0.72800000
H 27.04400000 -5.75900000 -0.69800000
H 24.33700000 -7.87600000 -1.70700000
H 22.67200000 -7.36900000 0.51900000
H 26.66298000 -7.09677700 -1.80816800
H 25.82028000 -5.49661700 -1.95801400
H 22.64828000 -8.87143800 -0.43766880
H 23.74797000 -8.69263800 0.99347450
--
0 1
C 27.48200000 -7.33500000 3.49700000
C 25.64400000 -10.39800000 2.15900000
C 26.29300000 -9.42100000 3.13600000
O 26.27700000 -9.65100000 4.34600000
N 26.85700000 -8.33100000 2.62700000
H 26.86500000 -8.18600000 1.62300000
H 28.19300000 -7.85100000 4.14400000
H 24.90900000 -10.95900000 2.73100000
H 28.05180000 -6.59511600 2.91571300
H 26.74688000 -6.83531700 4.14500400
H 25.11716000 -9.88318200 1.34205500
H 26.38917000 -11.10225000 1.76057000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '005LEU-008VAL-2')] = qcdb.Molecule("""
0 1
C 14.16900000 62.02000000 7.59200000
C 12.42900000 58.71800000 8.29000000
C 13.07200000 61.01200000 7.90200000
O 11.95100000 61.39900000 8.24000000
N 13.36700000 59.71700000 7.80200000
H 14.58400000 62.25800000 8.57100000
H 14.29300000 59.43200000 7.51100000
H 11.51200000 58.78500000 7.70400000
H 14.95972000 61.61525000 6.94320200
H 13.74934000 62.95496000 7.19232400
H 12.84693000 57.70447000 8.20008400
H 12.16339000 58.94186000 9.33371500
--
0 1
C 10.16200000 62.96600000 11.02600000
C 10.21400000 59.16000000 11.38700000
C 10.05400000 60.64000000 11.75100000
O 9.52600000 60.96500000 12.81800000
N 10.48900000 61.54600000 10.88600000
H 10.93600000 61.23700000 10.03200000
H 10.21200000 63.26400000 12.07400000
H 9.18900000 58.78200000 11.38300000
H 10.88144000 63.57503000 10.45900000
H 9.13582400 63.11971000 10.66085000
H 10.63054000 58.95248000 10.39029000
H 10.74514000 58.61961000 12.18442000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '007GLN-010ASN-1')] = qcdb.Molecule("""
0 1
C 2.71600000 0.82000000 22.90400000
C 6.42600000 1.29500000 22.11000000
C 4.16200000 0.46200000 22.57800000
O 4.51600000 -0.71700000 22.51100000
N 4.99800000 1.48000000 22.38400000
H 2.22900000 -0.07200000 23.30100000
H 4.62300000 2.42400000 22.39500000
H 6.53500000 0.63900000 21.24300000
H 2.66950800 1.62761900 23.64938000
H 2.21975900 1.11228600 21.96682000
H 6.89564800 2.25983500 21.86808000
H 6.93538300 0.79349740 22.94608000
--
0 1
C 5.29400000 -2.80800000 25.39800000
C 7.40700000 0.34000000 25.70300000
C 7.05100000 -1.14600000 25.69200000
O 7.88300000 -2.00700000 25.99000000
N 5.79600000 -1.44000000 25.36200000
H 5.14500000 -0.68000000 25.19200000
H 5.59200000 -3.26200000 26.34600000
H 8.49600000 0.41700000 25.70700000
H 4.19525100 -2.81731800 25.34640000
H 5.73195000 -3.44291900 24.61373000
H 6.99946900 0.84474080 24.81466000
H 7.00958500 0.77023540 26.63411000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '007VAL-041VAL-1')] = qcdb.Molecule("""
0 1
C 22.55700000 0.21000000 5.45400000
C 19.77500000 1.94900000 3.54200000
C 21.87500000 0.97900000 4.30300000
O 22.41500000 1.19200000 3.20900000
N 20.64000000 1.38900000 4.57200000
H 21.83700000 -0.53400000 5.80400000
H 20.24700000 1.19900000 5.49200000
H 19.82400000 1.35600000 2.62700000
H 23.47928000 -0.28719010 5.11903500
H 22.73621000 0.90317170 6.28910400
H 18.76701000 1.96809900 3.98198100
H 20.08484000 2.97985500 3.31541500
--
0 1
C 23.51900000 1.36800000 -0.32800000
C 25.20800000 3.08800000 2.65500000
C 25.00500000 2.50600000 1.27600000
O 25.94600000 2.48800000 0.50700000
N 23.81700000 1.99500000 0.97100000
H 23.10500000 1.94600000 1.69300000
H 24.38600000 1.40300000 -0.99000000
H 24.55300000 2.55600000 3.34200000
H 23.23073000 0.31617780 -0.18456970
H 22.69658000 1.94062500 -0.78157200
H 26.25647000 2.90498000 2.93286400
H 24.94375000 4.15420700 2.71309200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '008ILE-012LEU-1')] = qcdb.Molecule("""
0 1
C 32.59400000 -4.62600000 57.97600000
C 33.24900000 -3.95000000 54.28700000
C 32.35300000 -4.27000000 56.51000000
O 31.20800000 -4.13900000 56.08400000
N 33.42400000 -4.16200000 55.71600000
H 31.64400000 -4.97300000 58.38600000
H 34.36000000 -4.23300000 56.10400000
H 32.67300000 -3.03700000 54.12300000
H 33.33891000 -5.43143000 58.05595000
H 32.92266000 -3.75266200 58.55846000
H 34.21676000 -3.84151300 53.77546000
H 32.68009000 -4.76497700 53.81566000
--
0 1
C 28.25400000 -3.45600000 53.71400000
C 28.74800000 -6.84300000 55.36600000
C 28.07800000 -5.67300000 54.64800000
O 26.91000000 -5.77200000 54.26300000
N 28.79000000 -4.56300000 54.47700000
H 29.72100000 -4.49600000 54.87400000
H 27.34700000 -3.13500000 54.22400000
H 28.21100000 -7.74000000 55.06900000
H 28.99593000 -2.64388400 53.71543000
H 27.96856000 -3.80399000 52.71029000
H 29.80490000 -6.93581600 55.07558000
H 28.63932000 -6.73144500 56.45492000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '008TRP-251HIE-2')] = qcdb.Molecule("""
0 1
C 27.44300000 25.35600000 25.79500000
C 27.24800000 25.85300000 29.56200000
C 27.86800000 25.37900000 27.24700000
O 29.06200000 25.29900000 27.53400000
N 26.91100000 25.57500000 28.15500000
H 26.36200000 25.50200000 25.73300000
H 25.94500000 25.63300000 27.85800000
H 28.24300000 26.30000000 29.59100000
H 27.96718000 26.17629000 25.28279000
H 27.68820000 24.38239000 25.34559000
H 26.56993000 26.61156000 29.98010000
H 27.32039000 24.95969000 30.19977000
--
0 1
C 32.16000000 24.06100000 28.81600000
C 32.25400000 23.97000000 25.01000000
C 32.66900000 23.70100000 26.45000000
O 33.74600000 23.16600000 26.71700000
N 31.82800000 24.10900000 27.39500000
H 30.93300000 24.51700000 27.13900000
H 33.24000000 24.03100000 28.96600000
H 31.17300000 24.10700000 24.98500000
H 31.74934000 24.96112000 29.29678000
H 31.70477000 23.16073000 29.25450000
H 32.76357000 24.88119000 24.66349000
H 32.49434000 23.11411000 24.36216000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '011ILE-014PHE-1')] = qcdb.Molecule("""
0 1
C 6.60700000 -3.99700000 21.99700000
C 10.15300000 -3.37000000 23.27900000
C 8.08100000 -4.24000000 22.31700000
O 8.60500000 -5.33100000 22.08800000
N 8.75100000 -3.23200000 22.87000000
H 6.12500000 -4.97500000 21.96500000
H 8.27600000 -2.34700000 23.01200000
H 10.74700000 -3.64000000 22.40400000
H 6.14211700 -3.40302500 22.79767000
H 6.50064600 -3.53968000 21.00224000
H 10.53565000 -2.41231400 23.66165000
H 10.27309000 -4.17774200 24.01597000
--
0 1
C 7.88900000 -8.33900000 24.05700000
C 9.43800000 -5.63700000 26.29300000
C 9.15700000 -7.00300000 25.66800000
O 9.81200000 -7.98900000 26.01000000
N 8.19800000 -7.07600000 24.74200000
H 7.63600000 -6.25400000 24.53500000
H 7.71400000 -9.11400000 24.80200000
H 10.42600000 -5.68600000 26.75200000
H 6.99501800 -8.24895900 23.42242000
H 8.73123000 -8.69944300 23.44812000
H 9.42975600 -4.82836500 25.54732000
H 8.69941400 -5.44013900 27.08403000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '012LEU-085ASP-1')] = qcdb.Molecule("""
0 1
C 12.00300000 7.41700000 2.33500000
C 12.21000000 3.98800000 0.65700000
C 11.69600000 5.96800000 1.99600000
O 10.80700000 5.37900000 2.61200000
N 12.39600000 5.38800000 1.02300000
H 12.14400000 7.43500000 3.41600000
H 13.08200000 5.93800000 0.52300000
H 11.83700000 3.44500000 1.52600000
H 12.89158000 7.82131400 1.82809900
H 11.13620000 8.05029000 2.09498600
H 11.49247000 3.90083100 -0.17219030
H 13.20110000 3.59431700 0.38729420
--
0 1
C 8.25300000 5.74700000 5.50500000
C 10.05100000 2.62600000 4.39500000
C 9.22000000 3.53900000 5.27200000
O 8.58800000 3.10900000 6.24200000
N 9.15200000 4.80000000 4.87400000
H 9.67500000 5.08600000 4.05200000
H 8.49500000 5.82400000 6.56600000
H 10.55200000 3.23300000 3.64500000
H 8.38614300 6.72893200 5.02742100
H 7.21057300 5.41751000 5.38341900
H 10.79572000 2.06500400 4.97867700
H 9.39303400 1.92446000 3.86121900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '014LYS-018GLU-1')] = qcdb.Molecule("""
0 1
C 16.22000000 16.91300000 11.79900000
C 13.19600000 14.75900000 12.64600000
C 15.38800000 15.68000000 12.13200000
O 15.91800000 14.56800000 12.19500000
N 14.08300000 15.86700000 12.31500000
H 17.16900000 16.55200000 11.39500000
H 13.71300000 16.81100000 12.28100000
H 13.61500000 14.21200000 13.49400000
H 15.71695000 17.53278000 11.04216000
H 16.42626000 17.49011000 12.71246000
H 12.20434000 15.13931000 12.93231000
H 13.12258000 14.03341000 11.82251000
--
0 1
C 16.14000000 10.66600000 12.71800000
C 16.84400000 12.38900000 9.37400000
C 16.87000000 11.29200000 10.43600000
O 17.36600000 10.19000000 10.19800000
N 16.30600000 11.60200000 11.60300000
H 15.98200000 12.55500000 11.73500000
H 17.12100000 10.35800000 13.08500000
H 16.92200000 11.90300000 8.41300000
H 15.61356000 11.20605000 13.51876000
H 15.58750000 9.76263000 12.42023000
H 15.88699000 12.93126000 9.36500000
H 17.68895000 13.07583000 9.52996800
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '014VAL-017GLU-1')] = qcdb.Molecule("""
0 1
C 10.38200000 95.07800000 19.05400000
C 8.57800000 91.68400000 19.21700000
C 8.78800000 93.16800000 18.93600000
O 7.87500000 93.82900000 18.40500000
N 9.98900000 93.67000000 19.26700000
H 10.67400000 93.05100000 19.69900000
H 9.65700000 95.48200000 18.34400000
H 9.47700000 91.31100000 19.70800000
H 10.33263000 95.69007000 19.96665000
H 11.40499000 95.05660000 18.65023000
H 7.70540900 91.52917000 19.86862000
H 8.44270900 91.12988000 18.27644000
--
0 1
C 13.71000000 90.61200000 18.69700000
C 11.31900000 89.13100000 21.24900000
C 12.56400000 90.50900000 19.69300000
O 11.86900000 91.48700000 19.96800000
N 12.37100000 89.32200000 20.25300000
H 13.86200000 89.61600000 18.27900000
H 12.96900000 88.52900000 20.01900000
H 10.45500000 89.73900000 20.98700000
H 13.50555000 91.34077000 17.89882000
H 14.62401000 90.92047000 19.22560000
H 11.01579000 88.07482000 21.29953000
H 11.63536000 89.40124000 22.26727000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '015GLN-018LEU-1')] = qcdb.Molecule("""
0 1
C 1.80300000 4.53500000 38.27400000
C -1.68700000 5.80100000 37.42600000
C 0.52100000 4.83600000 37.53700000
O 0.44900000 4.49100000 36.35200000
N -0.44700000 5.49800000 38.15700000
H 2.58300000 4.47900000 37.51100000
H -0.37100000 5.70900000 39.14900000
H -2.00700000 4.88500000 36.94100000
H 2.05480900 5.34169200 38.97816000
H 1.76967900 3.57664900 38.81294000
H -2.44452800 6.17223900 38.13193000
H -1.48605000 6.52935500 36.62655000
--
0 1
C 2.02300000 6.20200000 33.62800000
C -0.13900000 8.69500000 35.49200000
C 0.50300000 7.92500000 34.35300000
O 0.20900000 8.18000000 33.18000000
N 1.39500000 6.99100000 34.68200000
H 1.65600000 6.85500000 35.65300000
H 2.47200000 6.89800000 32.92200000
H -1.01700000 9.21100000 35.09700000
H 2.81022800 5.56530300 34.05799000
H 1.26451300 5.61665300 33.08757000
H -0.42638430 7.99309000 36.28870000
H 0.57226140 9.43715500 35.88355000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '015LEU-026LEU-1')] = qcdb.Molecule("""
0 1
C 39.68500000 9.51600000 9.12800000
C 36.88800000 12.06900000 8.71200000
C 37.76100000 10.85200000 8.43800000
O 37.42800000 10.03800000 7.57400000
N 38.85000000 10.70600000 9.18500000
H 39.06900000 11.42500000 9.86700000
H 39.66700000 9.09800000 8.12100000
H 37.33900000 12.65900000 9.50900000
H 40.72381000 9.76042500 9.39471100
H 39.32454000 8.72053000 9.79680300
H 36.78938000 12.68195000 7.80394700
H 35.89571000 11.71329000 9.02636800
--
0 1
C 39.89400000 12.81400000 13.62800000
C 39.65000000 15.12100000 10.62300000
C 39.63600000 13.23400000 12.18600000
O 39.13800000 12.44900000 11.37300000
N 39.97100000 14.48000000 11.88800000
H 40.31600000 13.66100000 14.17300000
H 40.36900000 15.06400000 12.62000000
H 39.08400000 14.44300000 9.98800000
H 38.93213000 12.54785000 14.09057000
H 40.60493000 11.97693000 13.69035000
H 39.04096000 16.01985000 10.79947000
H 40.58788000 15.39983000 10.12038000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '017ASN-021LYS-2')] = qcdb.Molecule("""
0 1
C 18.31000000 53.33500000 13.55200000
C 15.15800000 51.25200000 13.89900000
C 17.40800000 52.16400000 13.91800000
O 17.89100000 51.12400000 14.35300000
N 16.11200000 52.31800000 13.67200000
H 19.32100000 52.93600000 13.43500000
H 15.77000000 53.22700000 13.37500000
H 15.29600000 50.87600000 14.91300000
H 18.00289000 53.79188000 12.59966000
H 18.32156000 54.09524000 14.34692000
H 14.12871000 51.63311000 13.82610000
H 15.32087000 50.40273000 13.21914000
--
0 1
C 18.14100000 47.14800000 15.07000000
C 19.40300000 48.50500000 11.74600000
C 19.20000000 47.57500000 12.93200000
O 19.69100000 46.45100000 12.94400000
N 18.42200000 48.01000000 13.91900000
H 18.05200000 48.95500000 13.89000000
H 19.08200000 46.77900000 15.48700000
H 19.70200000 47.88000000 10.90300000
H 17.61988000 47.71872000 15.85276000
H 17.56658000 46.26118000 14.76408000
H 18.45753000 49.00950000 11.49786000
H 20.20791000 49.23261000 11.92684000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '017LEU-025ILE-1')] = qcdb.Molecule("""
0 1
C 7.17000000 24.28800000 3.37300000
C 4.69000000 27.02900000 2.57900000
C 5.88700000 25.09500000 3.42500000
O 4.98800000 24.82000000 4.21500000
N 5.80200000 26.09500000 2.57000000
H 7.14200000 23.76600000 2.41800000
H 6.52900000 26.20600000 1.87300000
H 3.86600000 26.65200000 3.18100000
H 8.05329200 24.94218000 3.41595900
H 7.20100600 23.54889000 4.18710000
H 5.00782900 27.99179000 3.00563500
H 4.32016700 27.16506000 1.55200900
--
0 1
C 2.15600000 22.62800000 3.98800000
C 1.72200000 26.09700000 5.49600000
C 1.39900000 24.65900000 5.10600000
O 0.33200000 24.13600000 5.44200000
N 2.32800000 24.01700000 4.40300000
H 3.20500000 24.48400000 4.20000000
H 1.13100000 22.45900000 3.65100000
H 2.57900000 26.42800000 4.91000000
H 2.84211400 22.38362000 3.16366600
H 2.34216800 21.96301000 4.84423000
H 0.86013660 26.74641000 5.28279400
H 2.01418400 26.14415000 6.55543600
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '019LEU-022MET-1')] = qcdb.Molecule("""
0 1
C 15.07200000 3.59400000 7.20300000
C 18.65000000 4.93300000 7.58500000
C 16.44300000 3.91200000 7.78300000
O 16.73400000 3.48300000 8.90700000
N 17.30400000 4.65300000 7.06700000
H 14.42400000 3.43700000 8.06600000
H 17.04400000 4.96100000 6.13800000
H 19.11400000 3.97900000 7.84500000
H 14.65841000 4.41967300 6.60533400
H 15.10208000 2.66939900 6.60784600
H 19.27722000 5.41750600 6.82220700
H 18.61226000 5.54113500 8.50083200
--
0 1
C 15.95900000 4.70400000 12.62200000
C 17.33800000 7.33700000 10.22600000
C 16.99400000 6.62000000 11.52800000
O 17.01400000 7.26900000 12.58600000
N 16.59900000 5.34800000 11.47400000
H 16.61400000 4.85500000 10.58600000
H 15.93900000 5.37200000 13.48600000
H 18.28000000 7.86400000 10.38200000
H 14.91518000 4.44913000 12.38646000
H 16.50130000 3.79414900 12.91878000
H 17.43170000 6.66521900 9.36001500
H 16.55190000 8.08422700 10.04243000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '023LYS-146SER-2')] = qcdb.Molecule("""
0 1
C 0.57400000 -11.81000000 5.51200000
C 2.98900000 -11.59600000 8.42900000
C 1.66800000 -11.55200000 7.67900000
O 0.59700000 -11.48100000 8.28100000
N 1.74900000 -11.62500000 6.35700000
H 2.66800000 -11.56900000 5.92300000
H -0.29900000 -12.08100000 6.10800000
H 3.79700000 -11.38500000 7.73300000
H 0.34273930 -10.88784000 4.95870300
H 0.77769330 -12.62477000 4.80161000
H 2.99807600 -10.85854000 9.24512700
H 3.16340300 -12.60725000 8.82517500
--
0 1
C 5.24800000 -11.18400000 3.29500000
C 6.86400000 -9.73600000 6.40300000
C 5.38400000 -10.70700000 4.72800000
O 4.46400000 -10.86500000 5.52700000
N 6.52500000 -10.10900000 5.04000000
H 6.24800000 -11.24000000 2.86400000
H 7.25300000 -10.02600000 4.34000000
H 6.01600000 -9.92500000 7.03800000
H 4.76020600 -12.16937000 3.26187800
H 4.67695500 -10.44496000 2.71386000
H 7.67955300 -10.37048000 6.78023800
H 7.13044300 -8.67023000 6.45905500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '024PHE-028ALA-1')] = qcdb.Molecule("""
0 1
C 13.10800000 32.54900000 28.14000000
C 12.74900000 35.41500000 25.67100000
C 12.61200000 33.37600000 26.94100000
O 11.68000000 32.95400000 26.24100000
N 13.25400000 34.52700000 26.71000000
H 13.63300000 33.17300000 28.85800000
H 13.97900000 34.85100000 27.34800000
H 11.78900000 35.82100000 25.99100000
H 12.21265000 32.12384000 28.61706000
H 13.75589000 31.72402000 27.80886000
H 13.45014000 36.25053000 25.52858000
H 12.57856000 34.88148000 24.72427000
--
0 1
C 8.18500000 33.56600000 24.48600000
C 10.66300000 30.69400000 23.84300000
C 9.37100000 31.51800000 23.84600000
O 8.40500000 31.23100000 23.11500000
N 9.36100000 32.65500000 24.56900000
H 10.15900000 32.90600000 25.14600000
H 7.28900000 33.04600000 24.83300000
H 10.61500000 30.03600000 22.97200000
H 8.36338800 34.44931000 25.11683000
H 8.02420200 33.89262000 23.44799000
H 11.53755000 31.35534000 23.75472000
H 10.73578000 30.06750000 24.74422000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '024THR-041GLU-2')] = qcdb.Molecule("""
0 1
C 1.19400000 22.61400000 8.20600000
C 3.82700000 20.42100000 9.80300000
C 2.28100000 21.57600000 8.29300000
O 2.65400000 20.99200000 7.30100000
N 2.78900000 21.39200000 9.48900000
H 1.21100000 23.22600000 9.10700000
H 2.41600000 21.94700000 10.25500000
H 4.18100000 19.95300000 8.89000000
H 1.38331300 23.25161000 7.32986100
H 0.20395010 22.13755000 8.15314000
H 4.67854600 20.86907000 10.33601000
H 3.34354000 19.68787000 10.46541000
--
0 1
C 2.44000000 18.11300000 4.50400000
C 5.77900000 19.46400000 5.76700000
C 4.73000000 18.81000000 4.86600000
O 5.06900000 18.22100000 3.82800000
N 3.44900000 18.88600000 5.22500000
H 3.19000000 19.39500000 6.06700000
H 2.64600000 17.04700000 4.62100000
H 5.29700000 20.08800000 6.52100000
H 1.44433900 18.32759000 4.91946100
H 2.44285400 18.33914000 3.42750100
H 6.31251500 18.62228000 6.23269500
H 6.45927300 20.07380000 5.15432300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '025GLY-029SER-2')] = qcdb.Molecule("""
0 1
C 7.13200000 67.30800000 -0.96500000
C 10.74700000 68.19700000 -0.17500000
C 8.63500000 67.16300000 -0.80700000
O 9.18300000 66.07800000 -1.00400000
N 9.30600000 68.25100000 -0.43700000
H 6.67100000 66.32300000 -0.88400000
H 6.93600000 67.69600000 -1.96500000
H 8.81500000 69.13900000 -0.37500000
H 11.25300000 67.75600000 -1.03600000
H 6.68160500 67.99151000 -0.23018490
H 11.14583000 69.21118000 -0.02543690
H 10.93559000 67.53729000 0.68477580
--
0 1
C 11.47700000 62.99700000 -0.75400000
C 8.62100000 63.50200000 1.75700000
C 9.68600000 62.81600000 0.91100000
O 9.91400000 61.62300000 1.08200000
N 10.39200000 63.56600000 0.05400000
H 10.10600000 64.52400000 -0.12400000
H 11.09600000 62.13800000 -1.30800000
H 8.51800000 62.91900000 2.66800000
H 11.85387000 63.73133000 -1.48113600
H 12.30145000 62.64109000 -0.11869770
H 8.90534800 64.52731000 2.03608300
H 7.65143200 63.48974000 1.23758900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '025ILE-070TRP-1')] = qcdb.Molecule("""
0 1
C 6.01900000 -13.17600000 -7.12800000
C 4.25900000 -14.74800000 -4.14500000
C 5.53000000 -14.25700000 -6.16900000
O 5.68200000 -15.44700000 -6.45200000
N 4.95200000 -13.83800000 -5.04500000
H 5.96200000 -12.20800000 -6.62800000
H 4.83400000 -12.84400000 -4.88600000
H 4.82600000 -15.66800000 -3.99700000
H 7.05334900 -13.39867000 -7.42890100
H 5.36553800 -13.16252000 -8.01276300
H 4.07119100 -14.25464000 -3.17994900
H 3.26892500 -14.96352000 -4.57314000
--
0 1
C 6.66100000 -17.72800000 -9.49500000
C 7.06900000 -18.48300000 -5.78500000
C 6.92400000 -18.78600000 -7.26900000
O 7.08500000 -19.93800000 -7.69900000
N 6.63300000 -17.73500000 -8.03700000
H 6.44800000 -16.84900000 -7.57700000
H 6.98900000 -18.69800000 -9.86000000
H 6.58900000 -17.52900000 -5.60200000
H 5.65266600 -17.51183000 -9.87779400
H 7.37585900 -16.95810000 -9.82092300
H 6.58467900 -19.26304000 -5.17922200
H 8.13849500 -18.40056000 -5.54130900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '027VAL-031LEU-1')] = qcdb.Molecule("""
0 1
C 10.66300000 30.69400000 23.84300000
C 8.18500000 33.56600000 24.48600000
C 9.37100000 31.51800000 23.84600000
O 8.40500000 31.23100000 23.11500000
N 9.36100000 32.65500000 24.56900000
H 10.61500000 30.03600000 22.97200000
H 10.15900000 32.90600000 25.14600000
H 7.28900000 33.04600000 24.83300000
H 11.53755000 31.35534000 23.75472000
H 10.73578000 30.06750000 24.74422000
H 8.36338800 34.44931000 25.11683000
H 8.02420200 33.89262000 23.44799000
--
0 1
C 4.81000000 31.03100000 21.67200000
C 7.95800000 31.28100000 19.57600000
C 6.48000000 31.14800000 19.92900000
O 5.62100000 31.06800000 19.03100000
N 6.19900000 31.16400000 21.23000000
H 6.95100000 31.23600000 21.91100000
H 4.38000000 30.11100000 21.27900000
H 8.03200000 31.47500000 18.50400000
H 4.75122500 31.05824000 22.77009000
H 4.19712700 31.85536000 21.27852000
H 8.43335500 32.10899000 20.12233000
H 8.46768900 30.33135000 19.79598000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '027VAL-068GLU-2')] = qcdb.Molecule("""
0 1
C 1.24900000 -16.76400000 -5.34400000
C 4.25900000 -14.74800000 -4.14500000
C 2.89000000 -15.04600000 -4.73700000
O 2.16200000 -14.11100000 -5.10300000
N 2.51800000 -16.32300000 -4.77000000
H 3.20200000 -17.03400000 -4.52000000
H 0.57800000 -15.91500000 -5.48500000
H 4.82600000 -15.66800000 -3.99700000
H 1.41459600 -17.23520000 -6.32407400
H 0.77053450 -17.47086000 -4.65015300
H 4.78275400 -14.06024000 -4.82520000
H 4.07119100 -14.25464000 -3.17994900
--
0 1
C 4.49800000 -20.56900000 -3.98500000
C 7.06900000 -18.48300000 -5.78500000
C 5.18700000 -19.34500000 -4.54600000
O 4.58900000 -18.26300000 -4.62200000
N 6.42700000 -19.51700000 -4.98200000
H 5.26800000 -21.09500000 -3.41800000
H 6.85900000 -20.43400000 -4.92500000
H 6.58900000 -17.52900000 -5.60200000
H 3.70499100 -20.25497000 -3.29036000
H 4.15245500 -21.27809000 -4.75167800
H 8.13849500 -18.40056000 -5.54130900
H 6.96417200 -18.70205000 -6.85785900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '029GLU-032VAL-1')] = qcdb.Molecule("""
0 1
C 5.68300000 7.08200000 2.74100000
C 2.70200000 8.99900000 1.32500000
C 4.86500000 8.21300000 2.12400000
O 5.38800000 9.30900000 1.92800000
N 3.58100000 7.97000000 1.86200000
H 6.49700000 7.55500000 3.29400000
H 3.21700000 7.03400000 2.00200000
H 3.17500000 9.43700000 0.44400000
H 5.07648700 6.49132900 3.44331800
H 6.12600800 6.44481700 1.96142200
H 1.73981700 8.56334900 1.01773600
H 2.55311400 9.80169100 2.06223800
--
0 1
C 6.04200000 11.71500000 4.59100000
C 2.33600000 10.75500000 4.70200000
C 3.59600000 11.62200000 4.73100000
O 3.50700000 12.85200000 4.80600000
N 4.76800000 10.99100000 4.63400000
H 4.77300000 9.97500000 4.59500000
H 6.09800000 12.36300000 5.46600000
H 1.49600000 11.41900000 4.49500000
H 6.90442800 11.03316000 4.62726000
H 6.09301700 12.37176000 3.71005400
H 2.40736900 10.00750000 3.89816100
H 2.15798000 10.27384000 5.67503500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '030TRP-178GLY-2')] = qcdb.Molecule("""
0 1
C 48.55800000 20.43700000 27.72000000
C 49.42900000 23.30200000 25.41700000
C 49.29300000 22.44600000 26.64900000
O 49.66600000 22.89100000 27.73100000
N 48.73300000 21.25500000 26.53200000
H 48.43900000 20.89900000 25.62600000
H 49.13700000 20.85700000 28.53100000
H 49.02700000 22.76300000 24.55500000
H 47.50412000 20.44059000 28.03516000
H 48.94433000 19.41797000 27.57057000
H 48.86150000 24.22684000 25.59757000
H 50.49582000 23.50705000 25.24422000
--
0 1
C 46.91000000 18.45700000 22.92300000
C 46.99900000 22.00400000 21.65200000
C 47.26200000 19.92500000 22.86800000
O 47.97300000 20.45000000 23.72500000
N 46.74800000 20.58800000 21.84400000
H 47.35100000 17.96200000 22.05600000
H 45.82600000 18.39600000 22.83300000
H 46.17900000 20.09300000 21.16600000
H 47.24700000 22.46000000 22.60400000
H 47.23510000 17.94775000 23.84223000
H 47.83769000 22.16152000 20.95789000
H 46.11238000 22.51262000 21.24554000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '033ASN-036TRP-2')] = qcdb.Molecule("""
0 1
C -15.84200000 -16.31900000 11.52700000
C -14.66100000 -17.23500000 8.03600000
C -15.06100000 -16.27300000 10.22700000
O -14.11900000 -15.49600000 10.09500000
N -15.43600000 -17.11400000 9.26800000
H -16.62900000 -17.07200000 11.44700000
H -16.19400000 -17.76200000 9.44200000
H -13.61300000 -17.40400000 8.29400000
H -15.14067000 -16.58153000 12.33274000
H -16.33130000 -15.35410000 11.72589000
H -15.01129000 -18.09848000 7.45145600
H -14.69713000 -16.32606000 7.41751600
--
0 1
C -14.81600000 -11.35000000 9.08200000
C -11.55600000 -11.57000000 11.05000000
C -13.82900000 -11.74800000 10.16200000
O -14.18600000 -12.49300000 11.07300000
N -12.59200000 -11.26400000 10.06400000
H -14.31300000 -10.68000000 8.38500000
H -12.36700000 -10.60800000 9.32200000
H -11.85500000 -12.42300000 11.66100000
H -15.16270000 -12.26473000 8.57893800
H -15.66921000 -10.82007000 9.53056500
H -10.60806000 -11.83272000 10.55767000
H -11.39262000 -10.72477000 11.73475000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '033VAL-115VAL-1')] = qcdb.Molecule("""
0 1
C 3.50000000 -15.42200000 -8.51000000
C 6.13100000 -14.90800000 -5.84500000
C 4.74700000 -15.67800000 -7.69300000
O 5.48800000 -16.63000000 -7.94400000
N 4.98700000 -14.79900000 -6.73400000
H 2.98500000 -14.54900000 -8.11300000
H 4.29600000 -14.08000000 -6.53900000
H 6.82200000 -15.64400000 -6.24400000
H 3.82476300 -15.22699000 -9.54271500
H 2.79753700 -16.26812000 -8.48486100
H 6.68485000 -13.96160000 -5.75792600
H 5.79784000 -15.27092000 -4.86148900
--
0 1
C 6.70200000 -18.82500000 -10.87800000
C 8.96800000 -16.86100000 -8.52700000
C 8.55800000 -17.77100000 -9.67700000
O 9.41400000 -18.41500000 -10.29300000
N 7.26400000 -17.85600000 -9.94300000
H 6.61100000 -17.32000000 -9.37700000
H 7.48300000 -19.33800000 -11.44000000
H 8.07700000 -16.42400000 -8.07400000
H 6.11234300 -19.56737000 -10.32016000
H 6.04861200 -18.28619000 -11.57997000
H 9.49428800 -17.49996000 -7.80259900
H 9.62625800 -16.05438000 -8.88205600
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '035TRP-056ILE-1')] = qcdb.Molecule("""
0 1
C 26.94200000 3.32900000 60.85700000
C 23.36400000 4.46100000 60.67500000
C 25.64000000 3.92200000 61.37000000
O 25.42900000 4.11100000 62.57300000
N 24.76500000 4.19500000 60.41200000
H 27.22000000 3.86700000 59.95300000
H 25.02900000 3.97900000 59.45500000
H 23.17300000 4.49000000 61.74800000
H 27.73424000 3.40537800 61.61629000
H 26.76701000 2.27547500 60.59344000
H 23.07798000 5.43057000 60.24126000
H 22.78204000 3.63957300 60.23163000
--
0 1
C 25.57300000 1.32400000 65.13200000
C 23.05800000 4.13700000 65.27600000
C 23.71400000 2.82900000 65.62100000
O 23.23400000 2.07500000 66.47400000
N 24.83200000 2.56700000 64.96700000
H 25.16900000 3.22600000 64.27100000
H 25.22900000 0.77300000 66.00000000
H 23.34500000 4.41800000 64.26200000
H 25.44655000 0.69534950 64.23824000
H 26.64101000 1.56951800 65.22732000
H 21.96725000 4.01821300 65.35444000
H 23.48034000 4.88360800 65.96463000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '037PRO-041ALA-1')] = qcdb.Molecule("""
0 1
C 1.45400000 4.71900000 34.53000000
C 3.13000000 7.08600000 32.05200000
C 1.84000000 5.40400000 33.22100000
O 1.32400000 5.02600000 32.16500000
N 2.69900000 6.41100000 33.26800000
H 1.21000000 3.67600000 34.32800000
H 3.04100000 6.73500000 34.16200000
H 2.25900000 7.29900000 31.42900000
H 2.25703800 4.78781000 35.27860000
H 0.59452870 5.20371900 35.01617000
H 3.61697200 8.04335100 32.28935000
H 3.79117200 6.41541400 31.48352000
--
0 1
C 1.18400000 4.22900000 28.45700000
C 3.04300000 1.89200000 30.88400000
C 2.30300000 2.39200000 29.64800000
O 2.12700000 1.63200000 28.69600000
N 1.86300000 3.66100000 29.62200000
H 1.95300000 4.23600000 30.45200000
H 0.36500000 3.56500000 28.17100000
H 3.62500000 1.01900000 30.57900000
H 0.75336240 5.20646700 28.71989000
H 1.86135100 4.31048600 27.59412000
H 3.71345000 2.67305600 31.27188000
H 2.30265300 1.59170400 31.64011000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '038ALA-041GLY-1')] = qcdb.Molecule("""
0 1
C 17.65300000 1.60500000 -0.70200000
C 15.39900000 -1.42800000 -0.29600000
C 16.38900000 0.75300000 -0.62700000
O 15.27600000 1.25100000 -0.74500000
N 16.54900000 -0.54500000 -0.39900000
H 17.36000000 2.64900000 -0.58200000
H 17.48800000 -0.93600000 -0.36800000
H 14.79900000 -1.35800000 -1.20500000
H 18.36375000 1.32388100 0.08907477
H 18.14824000 1.48370200 -1.67669100
H 15.74769000 -2.46461400 -0.17831850
H 14.74806000 -1.12601200 0.53771800
--
0 1
C 13.83700000 3.27200000 2.04300000
C 14.33400000 -0.32700000 3.19500000
C 13.71500000 1.04600000 3.00400000
O 12.57400000 1.27300000 3.44000000
N 14.41100000 1.96700000 2.33500000
H 15.37200000 1.77400000 2.06400000
H 13.55000000 3.77000000 2.97000000
H 14.59000000 3.85800000 1.55800000
H 13.50500000 -1.02800000 3.31800000
H 12.95501000 3.19940900 1.38967700
H 14.91210000 -0.61979280 2.30613800
H 15.01510000 -0.47284690 4.04637200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '038GLU-047GLU-2')] = qcdb.Molecule("""
0 1
C -16.11100000 27.50200000 -23.68000000
C -19.81800000 28.30500000 -23.42700000
C -17.57900000 27.62700000 -24.05100000
O -17.97700000 27.26800000 -25.14600000
N -18.39800000 28.13100000 -23.14300000
H -16.07300000 27.42500000 -22.59400000
H -18.03600000 28.47400000 -22.25800000
H -20.06200000 28.01900000 -24.45000000
H -15.66274000 26.61151000 -24.14486000
H -15.56511000 28.41564000 -23.95797000
H -20.44833000 27.71065000 -22.74918000
H -20.08475000 29.36545000 -23.30747000
--
0 1
C -17.09400000 26.75600000 -28.46700000
C -19.69700000 24.69900000 -26.61100000
C -18.90800000 25.25800000 -27.78600000
O -19.13600000 24.89500000 -28.94000000
N -17.96900000 26.13900000 -27.48500000
H -17.81700000 26.37700000 -26.52100000
H -17.21900000 26.27900000 -29.44000000
H -19.53500000 25.34800000 -25.75200000
H -17.31758000 27.82730000 -28.57807000
H -16.04417000 26.62794000 -28.16457000
H -20.77290000 24.68317000 -26.83945000
H -19.31530000 23.70215000 -26.34532000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '039SER-046MET-1')] = qcdb.Molecule("""
0 1
C 37.13200000 14.35600000 -9.09000000
C 37.95400000 10.90500000 -10.49300000
C 36.97200000 13.07700000 -9.89500000
O 35.97000000 12.91200000 -10.57200000
N 37.96700000 12.20500000 -9.82700000
H 38.12900000 14.38400000 -8.64600000
H 38.74600000 12.40400000 -9.21500000
H 36.96600000 10.70700000 -10.89800000
H 36.95649000 15.24861000 -9.70842300
H 36.39719000 14.34813000 -8.27147300
H 38.70130000 10.84719000 -11.29811000
H 38.20371000 10.13707000 -9.74605600
--
0 1
C 32.75800000 11.46200000 -10.10200000
C 34.57500000 10.81700000 -13.39000000
C 33.45800000 10.81100000 -12.34800000
O 32.36000000 10.30800000 -12.60000000
N 33.73400000 11.39100000 -11.18300000
H 34.65700000 11.78700000 -11.03800000
H 32.02300000 10.66300000 -10.19700000
H 35.46500000 11.27400000 -12.95300000
H 33.26581000 11.34828000 -9.13287600
H 32.21483000 12.41853000 -10.10563000
H 34.81162000 9.78669800 -13.69412000
H 34.25590000 11.43015000 -14.24570000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '040THR-048VAL-2')] = qcdb.Molecule("""
0 1
C 7.24400000 -6.35700000 9.43600000
C 8.35500000 -4.20300000 12.36200000
C 7.42800000 -5.90300000 10.88200000
O 6.82800000 -6.48300000 11.80000000
N 8.22400000 -4.85900000 11.07700000
H 7.92400000 -5.79500000 8.79500000
H 8.75800000 -4.49000000 10.29500000
H 7.63700000 -4.61500000 13.06400000
H 7.43872200 -7.43736800 9.36608100
H 6.21398500 -6.12580100 9.12678100
H 9.36929100 -4.33457600 12.76685000
H 8.15464700 -3.12719300 12.25021000
--
0 1
C 3.78500000 -6.44400000 13.96100000
C 7.27600000 -6.83600000 15.43900000
C 5.75700000 -6.85100000 15.34700000
O 5.07200000 -7.13400000 16.32800000
N 5.23100000 -6.59400000 14.15400000
H 5.86000000 -6.39800000 13.38200000
H 3.27500000 -6.67300000 14.89800000
H 7.68900000 -6.46100000 14.49800000
H 3.49436200 -5.41141400 13.71749000
H 3.33258800 -7.14564000 13.24474000
H 7.57248100 -6.17648200 16.26794000
H 7.65234400 -7.85801800 15.59342000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '040THR-049GLN-2')] = qcdb.Molecule("""
0 1
C 7.24400000 -6.35700000 9.43600000
C 8.61000000 -9.79500000 8.59000000
C 8.47400000 -8.29300000 8.59900000
O 9.25100000 -7.59500000 7.94800000
N 7.50300000 -7.79400000 9.34300000
H 6.90800000 -8.42600000 9.87200000
H 7.92400000 -5.79500000 8.79500000
H 7.76400000 -10.23900000 9.11900000
H 6.21398500 -6.12580100 9.12678100
H 7.37657100 -6.02989500 10.47784000
H 8.64695400 -10.12382000 7.54094700
H 9.54550000 -10.07275000 9.09763600
--
0 1
C 3.46200000 -9.09800000 11.28000000
C 5.20700000 -12.30500000 10.21500000
C 4.60200000 -10.05000000 10.92500000
O 5.76500000 -9.63700000 10.81600000
N 4.27000000 -11.31900000 10.73100000
H 2.67700000 -9.66500000 11.78600000
H 3.31500000 -11.61700000 10.88600000
H 5.92400000 -11.79500000 9.57100000
H 3.85491800 -8.33492000 11.96799000
H 3.03655100 -8.60627500 10.39276000
H 5.77137800 -12.82757000 11.00138000
H 4.65710300 -13.03771000 9.60610800
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '041GLY-045LEU-2')] = qcdb.Molecule("""
0 1
C 13.83700000 3.27200000 2.04300000
C 11.50000000 2.16700000 -0.76900000
C 12.62200000 3.17200000 1.14300000
O 11.61700000 3.85600000 1.36000000
N 12.67500000 2.32900000 0.10300000
H 13.55000000 3.77000000 2.97000000
H 14.59000000 3.85800000 1.55800000
H 13.53700000 1.81900000 -0.09100000
H 11.16200000 3.15200000 -1.10700000
H 14.27088000 2.28557400 2.26371700
H 11.72368000 1.55384000 -1.65443800
H 10.66302000 1.74269900 -0.19503180
--
0 1
C 8.04700000 5.34400000 1.30800000
C 9.26600000 3.02800000 4.07400000
C 8.37800000 4.00900000 3.30800000
O 7.28400000 4.35600000 3.74500000
N 8.84600000 4.45000000 2.14200000
H 9.77500000 4.17200000 1.83800000
H 7.77900000 6.22500000 1.89300000
H 8.64000000 2.57400000 4.84500000
H 8.62430600 5.66496500 0.42839820
H 7.09836000 4.87435500 1.00880800
H 9.63134800 2.22771300 3.41365000
H 10.10993000 3.52005200 4.57964100
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '041LEU-045LEU-1')] = qcdb.Molecule("""
0 1
C 36.48200000 37.02700000 7.19500000
C 32.85400000 36.41300000 8.20500000
C 35.01500000 36.60900000 7.07000000
O 34.63600000 35.94700000 6.10200000
N 34.20500000 36.96500000 8.06700000
H 36.88100000 37.13600000 6.18500000
H 34.60600000 37.44000000 8.86500000
H 32.96600000 35.34500000 8.38900000
H 36.56172000 37.99564000 7.71015400
H 37.06618000 36.25545000 7.71791600
H 32.33727000 36.83410000 9.08002200
H 32.23854000 36.49906000 7.29736300
--
0 1
C 33.35500000 32.73500000 4.49900000
C 33.56700000 36.12000000 2.79500000
C 33.30900000 34.62700000 2.97400000
O 32.88400000 33.92500000 2.05200000
N 33.53300000 34.14500000 4.19400000
H 33.86100000 34.77300000 4.92000000
H 33.88700000 32.15100000 3.74800000
H 33.12500000 36.42700000 1.84600000
H 33.78742000 32.52023000 5.48737800
H 32.29491000 32.44398000 4.46000500
H 33.08971000 36.66644000 3.62180000
H 34.64620000 36.33014000 2.76080800
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '042LEU-047ILE-2')] = qcdb.Molecule("""
0 1
C 27.98600000 25.44300000 -1.06900000
C 27.41300000 29.03600000 0.02400000
C 27.55500000 26.63500000 -0.22400000
O 26.97700000 26.46100000 0.84900000
N 27.82400000 27.84600000 -0.70100000
H 27.25800000 24.65500000 -0.87100000
H 28.38000000 27.94300000 -1.54400000
H 27.65500000 28.92900000 1.08400000
H 27.98148000 25.67219000 -2.14484900
H 28.98297000 25.09248000 -0.76373210
H 27.93231000 29.92357000 -0.36656000
H 26.32263000 29.15515000 -0.05904148
--
0 1
C 27.12400000 24.69000000 4.01500000
C 24.61500000 27.52600000 3.59800000
C 25.41000000 26.42500000 4.28700000
O 25.25600000 26.17700000 5.48300000
N 26.25800000 25.75500000 3.51300000
H 26.29600000 25.99600000 2.52600000
H 26.68400000 24.21300000 4.89200000
H 25.26200000 28.40300000 3.54400000
H 27.25983000 23.93366000 3.22791800
H 28.07774000 25.14166000 4.32546900
H 24.32018000 27.24857000 2.57520400
H 23.72691000 27.78661000 4.19245400
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '044SER-047GLU-1')] = qcdb.Molecule("""
0 1
C -4.41100000 -1.31100000 3.89100000
C -2.56600000 0.77800000 1.28300000
C -4.08700000 -0.59800000 2.56700000
O -4.87600000 -0.61100000 1.62300000
N -2.93400000 0.07100000 2.50000000
H -5.14900000 -2.08900000 3.68800000
H -2.41400000 0.23300000 3.35700000
H -3.40000000 1.38500000 0.94800000
H -3.53373000 -1.76463100 4.37537300
H -4.87483900 -0.58014620 4.56975300
H -1.66523100 1.39371600 1.42267600
H -2.38752400 0.02507657 0.50117360
--
0 1
C -5.30200000 -3.26000000 -0.85800000
C -1.62000000 -2.49700000 -0.34800000
C -2.92200000 -2.98600000 -1.01900000
O -2.96700000 -3.33800000 -2.19900000
N -4.00500000 -2.98100000 -0.26300000
H -3.95000000 -2.69900000 0.70600000
H -5.22600000 -4.10500000 -1.54700000
H -0.97900000 -2.11000000 -1.13900000
H -6.02596200 -3.53589800 -0.07712933
H -5.68418700 -2.41475100 -1.44917400
H -1.77867100 -1.68076000 0.37212200
H -1.10755600 -3.31988200 0.17187120
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '044TRP-054LEU-1')] = qcdb.Molecule("""
0 1
C 10.95000000 -17.77800000 14.61300000
C 8.09500000 -17.57500000 17.10300000
C 9.73000000 -18.22000000 15.39800000
O 9.33700000 -19.39000000 15.34600000
N 9.15700000 -17.28500000 16.14700000
H 11.04400000 -16.70100000 14.73600000
H 9.54800000 -16.34800000 16.13300000
H 7.88900000 -18.64300000 17.12000000
H 11.83458000 -18.28799000 15.02219000
H 10.81699000 -17.96678000 13.53751000
H 8.42178300 -17.28645000 18.11293000
H 7.16770200 -17.04780000 16.83434000
--
0 1
C 10.86300000 -22.79600000 14.63400000
C 8.21700000 -22.12500000 17.25000000
C 9.09500000 -22.92200000 16.29800000
O 9.07200000 -24.15500000 16.27100000
N 9.88700000 -22.20000000 15.52000000
H 9.81900000 -21.18700000 15.55200000
H 10.78000000 -23.87900000 14.66100000
H 8.35200000 -21.07100000 17.01400000
H 10.68926000 -22.44134000 13.60734000
H 11.86068000 -22.50299000 14.99285000
H 7.15596500 -22.39611000 17.14654000
H 8.57411400 -22.27476000 18.27958000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '048GLU-052LYS-1')] = qcdb.Molecule("""
0 1
C 29.19600000 -4.52700000 -8.01800000
C 30.56800000 -5.20200000 -4.49700000
C 29.56100000 -5.28400000 -6.71900000
O 29.21100000 -6.46300000 -6.56200000
N 30.18000000 -4.58900000 -5.75600000
H 28.33500000 -5.03100000 -8.46100000
H 30.48000000 -3.64400000 -5.95100000
H 31.12100000 -6.11400000 -4.73200000
H 28.93132000 -3.49150700 -7.75781500
H 30.02741000 -4.54496500 -8.73802700
H 31.25723000 -4.56627900 -3.92182400
H 29.69681000 -5.52679000 -3.90918000
--
0 1
C 28.28100000 -9.83500000 -5.01600000
C 25.58300000 -7.16200000 -5.57000000
C 26.25400000 -8.54400000 -5.36800000
O 25.56900000 -9.57900000 -5.29400000
N 27.58200000 -8.56000000 -5.20700000
H 28.11800000 -7.70900000 -5.35700000
H 27.81800000 -10.58500000 -5.66300000
H 24.54500000 -7.26700000 -5.25000000
H 29.34249000 -9.78675100 -5.30045600
H 28.17246000 -10.21160000 -3.98818900
H 26.06876000 -6.39287400 -4.95155100
H 25.58721000 -6.87263200 -6.63124900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '051ALA-054VAL-1')] = qcdb.Molecule("""
0 1
C 11.19000000 25.57800000 16.41500000
C 11.03900000 29.37100000 16.32400000
C 10.62200000 26.98500000 16.41900000
O 9.40700000 27.16400000 16.50900000
N 11.48500000 27.98400000 16.29700000
H 10.43800000 24.94100000 15.95400000
H 12.47400000 27.79300000 16.18300000
H 10.24500000 29.45700000 17.07000000
H 12.10912000 25.48662000 15.81761000
H 11.34264000 25.22586000 17.44587000
H 11.84798000 30.03823000 16.65618000
H 10.58828000 29.70976000 15.37949000
--
0 1
C 6.39400000 27.35400000 13.71200000
C 3.59400000 27.50000000 16.23200000
C 5.69100000 27.63400000 15.02000000
O 6.30800000 28.04200000 16.02200000
N 4.39500000 27.37700000 15.02700000
H 5.79400000 27.77400000 12.90900000
H 3.92500000 27.14300000 14.15800000
H 4.23000000 27.46900000 17.12000000
H 7.38353000 27.83373000 13.73832000
H 6.45338400 26.26317000 13.58333000
H 3.05653700 28.45976000 16.23055000
H 2.88687400 26.65965000 16.29349000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '051ALA-055ASN-1')] = qcdb.Molecule("""
0 1
C 15.43500000 31.69400000 28.98700000
C 18.02000000 32.43800000 26.28000000
C 16.54400000 32.41200000 28.22200000
O 17.05600000 33.43400000 28.66700000
N 16.94500000 31.87500000 27.07200000
H 15.02000000 32.40600000 29.70300000
H 16.49700000 31.01700000 26.75900000
H 18.89500000 32.58600000 26.91400000
H 14.63110000 31.37813000 28.30585000
H 15.83188000 30.83962000 29.55490000
H 18.28187000 31.71785000 25.49082000
H 17.73736000 33.42688000 25.88983000
--
0 1
C 18.69200000 36.91200000 28.75800000
C 14.96700000 36.73500000 27.93400000
C 16.29200000 37.30600000 28.41700000
O 16.37800000 38.47000000 28.76200000
N 17.35400000 36.50400000 28.36800000
H 17.21000000 35.53100000 28.11800000
H 18.64700000 37.38300000 29.74400000
H 14.36000000 37.58000000 27.59900000
H 19.32477000 36.01379000 28.81109000
H 19.15022000 37.62747000 28.05934000
H 15.13268000 36.04503000 27.09347000
H 14.44896000 36.21332000 28.75222000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '052CYS-056ALA-1')] = qcdb.Molecule("""
0 1
C 26.72000000 17.60100000 -3.75600000
C 26.45300000 15.64600000 -7.03100000
C 26.60100000 16.44500000 -4.74700000
O 26.50400000 15.29500000 -4.32700000
N 26.55200000 16.73200000 -6.05900000
H 27.09600000 17.17500000 -2.82400000
H 26.55000000 17.70000000 -6.36300000
H 25.59500000 15.01900000 -6.78400000
H 27.40527000 18.39530000 -4.08689700
H 25.72790000 18.02497000 -3.54153100
H 26.29104000 16.06583000 -8.03474900
H 27.33872000 14.99450000 -6.99856900
--
0 1
C 26.29500000 11.22500000 -4.33200000
C 29.47700000 13.05300000 -3.31400000
C 28.49500000 11.91700000 -3.58300000
O 28.77600000 10.75200000 -3.29800000
N 27.31000000 12.25000000 -4.09700000
H 27.09800000 13.22300000 -4.29400000
H 26.10000000 10.69600000 -3.39600000
H 30.46400000 12.61500000 -3.15100000
H 25.36511000 11.71534000 -4.65583800
H 26.63023000 10.48275000 -5.07138000
H 29.52301000 13.76051000 -4.15501800
H 29.15797000 13.56143000 -2.39219900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '054ARG-062GLN-1')] = qcdb.Molecule("""
0 1
C -5.93600000 14.71300000 11.54100000
C -7.26700000 11.13900000 11.56900000
C -5.94500000 13.19300000 11.45500000
O -4.90300000 12.57200000 11.30100000
N -7.11800000 12.58900000 11.57200000
H -6.95800000 15.07200000 11.52700000
H -7.94600000 13.16400000 11.70600000
H -6.33700000 10.65900000 11.27000000
H -5.45554200 15.04857000 12.47189000
H -5.42263200 15.16711000 10.68063000
H -7.55862900 10.74068000 12.55200000
H -8.02788900 10.89217000 10.81394000
--
0 1
C -1.35200000 12.46600000 11.67100000
C -3.58200000 9.31000000 11.71300000
C -2.31700000 10.18000000 11.90600000
O -1.24100000 9.68200000 12.23900000
N -2.45400000 11.49100000 11.70900000
H -3.37700000 11.84900000 11.50300000
H -0.42700000 11.95300000 11.40400000
H -4.39300000 9.97900000 11.43200000
H -1.55580100 13.25626000 10.93347000
H -1.22149900 12.98224000 12.63353000
H -3.45255300 8.55740200 10.92127000
H -3.85167500 8.81225000 12.65614000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '055ASN-058GLU-2')] = qcdb.Molecule("""
0 1
C 3.19200000 17.07400000 -9.88600000
C 5.08000000 20.00600000 -8.31000000
C 4.31100000 18.03600000 -9.50600000
O 5.45500000 17.83700000 -9.93200000
N 4.03100000 19.04300000 -8.66900000
H 3.23300000 16.30400000 -9.11900000
H 3.09700000 19.15000000 -8.29000000
H 5.40500000 20.48900000 -9.23400000
H 2.17964000 17.50367000 -9.86338600
H 3.40369000 16.59269000 -10.85219000
H 4.70588400 20.79572000 -7.64188500
H 5.97005700 19.52724000 -7.87573800
--
0 1
C 9.03100000 16.38600000 -9.35400000
C 7.29200000 17.70300000 -6.23800000
C 8.40400000 17.23400000 -7.16100000
O 9.55900000 17.14700000 -6.74400000
N 8.06200000 16.93000000 -8.41100000
H 7.09600000 17.02900000 -8.69800000
H 9.54200000 15.53700000 -8.89500000
H 7.70000000 18.51200000 -5.63400000
H 8.51880300 16.03279000 -10.26114000
H 9.80257100 17.12296000 -9.62152600
H 6.43562400 18.10098000 -6.80212100
H 7.02139000 16.89545000 -5.54184000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '060LEU-064TYR-1')] = qcdb.Molecule("""
0 1
C 26.99500000 5.04200000 -5.25200000
C 30.19300000 4.10900000 -7.06200000
C 28.06600000 4.21300000 -5.93800000
O 27.96100000 2.98900000 -6.01300000
N 29.12300000 4.85700000 -6.41300000
H 26.49100000 4.37700000 -4.55500000
H 29.12500000 5.87000000 -6.44800000
H 29.77000000 3.49600000 -7.86000000
H 27.43791000 5.87827900 -4.69122800
H 26.25463000 5.41182500 -5.97662900
H 30.89245000 4.82784200 -7.51370000
H 30.71154000 3.43933300 -6.36010300
--
0 1
C 27.08700000 -0.08000000 -5.13600000
C 29.22300000 1.16000000 -2.22300000
C 28.29800000 0.31100000 -3.06400000
O 27.86700000 -0.74100000 -2.57100000
N 28.02000000 0.70600000 -4.31100000
H 28.33800000 1.60500000 -4.65100000
H 26.87700000 -1.04800000 -4.68000000
H 29.87500000 0.45300000 -1.70700000
H 26.12878000 0.44577620 -5.25996300
H 27.48949000 -0.29528570 -6.13682600
H 29.80564000 1.88508100 -2.81018700
H 28.62863000 1.67276100 -1.45241500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '060TYR-064LEU-2')] = qcdb.Molecule("""
0 1
C 0.74400000 20.88200000 9.46700000
C 3.82900000 19.04800000 8.16100000
C 1.69600000 19.73500000 9.12300000
O 1.44700000 18.57500000 9.47800000
N 2.82000000 20.06000000 8.47800000
H 0.03500000 20.51800000 10.21200000
H 2.96300000 21.01600000 8.16900000
H 3.36900000 18.28200000 7.53800000
H 1.30436300 21.72894000 9.88971700
H 0.18058210 21.20498000 8.57916800
H 4.65839300 19.49515000 7.59340600
H 4.20918500 18.55045000 9.06537900
--
0 1
C 1.86700000 14.55400000 9.81100000
C 1.62200000 17.04600000 12.67600000
C 1.63700000 15.71000000 11.95000000
O 1.54000000 14.64800000 12.57400000
N 1.78300000 15.76400000 10.62500000
H 1.82800000 16.67400000 10.17600000
H 0.95000000 13.97500000 9.93200000
H 1.73100000 16.86900000 13.74700000
H 0.65600000 17.52100000 12.49900000
H 2.00216700 14.80708000 8.74907700
H 2.69819000 13.91906000 10.15156000
H 2.41953600 17.71840000 12.32698000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '061VAL-064TYR-2')] = qcdb.Molecule("""
0 1
C 27.19000000 8.05900000 14.90900000
C 30.05500000 6.38400000 13.02700000
C 28.60200000 7.63200000 14.54300000
O 29.56400000 7.99700000 15.22000000
N 28.74700000 6.84300000 13.48900000
H 26.49400000 7.64200000 14.18100000
H 27.92900000 6.62000000 12.93000000
H 30.58500000 5.91700000 13.86500000
H 26.96984000 7.69081000 15.92190000
H 27.09454000 9.15434600 14.87576000
H 29.91740000 5.63481800 12.23341000
H 30.66991000 7.21521400 12.65154000
--
0 1
C 31.65200000 11.35800000 15.03100000
C 33.02500000 8.66700000 12.69000000
C 32.80300000 9.95400000 13.45100000
O 33.58700000 10.87600000 13.23500000
N 31.82100000 10.08800000 14.34800000
H 31.21400000 9.31100000 14.57600000
H 31.75600000 12.16100000 14.29800000
H 34.03700000 8.31700000 12.89800000
H 32.94400000 8.86800000 11.62100000
H 30.63884000 11.44243000 15.45098000
H 32.39121000 11.52107000 15.82911000
H 32.30761000 7.89313400 13.00061000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '062LEU-066LYS-2')] = qcdb.Molecule("""
0 1
C 17.65400000 16.07700000 -4.70500000
C 21.17800000 15.82600000 -3.21600000
C 18.76700000 15.83400000 -3.68000000
O 18.49900000 15.43400000 -2.54700000
N 20.02000000 16.06100000 -4.08500000
H 17.53500000 17.15800000 -4.78700000
H 20.17300000 16.38000000 -5.03300000
H 21.16100000 14.77700000 -2.92300000
H 17.92643000 15.67964000 -5.69388200
H 16.70656000 15.64038000 -4.35613000
H 22.11540000 16.07129000 -3.73669600
H 21.14046000 16.40209000 -2.27967200
--
0 1
C 19.20600000 13.81200000 0.82900000
C 17.65800000 17.31000000 0.68300000
C 18.11400000 15.96100000 1.23700000
O 17.91100000 15.67800000 2.41500000
N 18.72500000 15.12600000 0.39600000
H 18.82400000 15.39200000 -0.58000000
H 18.40600000 13.30400000 1.37200000
H 17.51800000 17.96400000 1.54500000
H 19.48015000 13.19193000 -0.03723072
H 20.05264000 13.88387000 1.52759000
H 18.40326000 17.75670000 0.00843105
H 16.69298000 17.18229000 0.17071660
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '064GLN-067ARG-1')] = qcdb.Molecule("""
0 1
C 10.81800000 -25.83400000 -6.60000000
C 14.08600000 -27.13200000 -5.21000000
C 11.83700000 -26.33500000 -5.59400000
O 11.54600000 -26.46100000 -4.39700000
N 13.03400000 -26.64400000 -6.08700000
H 9.98800000 -25.39600000 -6.04100000
H 13.19300000 -26.62000000 -7.08800000
H 13.69600000 -27.96300000 -4.62000000
H 11.25422000 -25.07493000 -7.26597200
H 10.44391000 -26.68721000 -7.18487700
H 14.94122000 -27.49924000 -5.79629000
H 14.40518000 -26.34161000 -4.51472000
--
0 1
C 11.57400000 -23.68400000 -1.98900000
C 14.96000000 -23.69900000 -3.79900000
C 13.93100000 -23.61200000 -2.67200000
O 14.30600000 -23.41800000 -1.51800000
N 12.64300000 -23.71300000 -3.00300000
H 12.39500000 -23.79800000 -3.98800000
H 11.64600000 -22.75300000 -1.41700000
H 15.93300000 -23.88700000 -3.34000000
H 10.58182000 -23.72648000 -2.46204900
H 11.68063000 -24.50391000 -1.26347700
H 14.71306000 -24.52340000 -4.48411700
H 15.01139000 -22.74833000 -4.34998800
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '064TYR-067GLU-1')] = qcdb.Molecule("""
0 1
C 31.49500000 -10.17100000 9.56100000
C 34.83300000 -11.75500000 8.70300000
C 32.51500000 -11.24200000 9.19800000
O 32.14300000 -12.38700000 8.95000000
N 33.79300000 -10.86500000 9.14600000
H 32.01200000 -9.26900000 9.87800000
H 34.02500000 -9.90000000 9.35500000
H 35.81500000 -11.28900000 8.79700000
H 34.80600000 -12.69700000 9.25100000
H 30.89282000 -9.98786000 8.65886700
H 30.83944000 -10.49806000 10.38153000
H 34.59261000 -11.90902000 7.64069500
--
0 1
C 30.20800000 -14.32300000 6.30500000
C 33.86000000 -13.48900000 5.38200000
C 32.37800000 -13.83100000 5.15300000
O 31.98700000 -14.10000000 3.99900000
N 31.59000000 -13.83200000 6.24500000
H 31.99200000 -13.51000000 7.11600000
H 30.03400000 -14.98100000 5.45600000
H 34.05100000 -12.60000000 4.78200000
H 30.04722000 -14.86880000 7.24640700
H 29.53462000 -13.45661000 6.22794000
H 34.11625000 -13.28766000 6.43261900
H 34.45950000 -14.29445000 4.93272600
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '066PHE-072THR-1')] = qcdb.Molecule("""
0 1
C 38.12400000 19.12600000 10.45700000
C 41.27800000 17.83300000 12.11600000
C 39.19300000 18.88700000 11.51900000
O 39.06900000 19.36600000 12.67200000
N 40.24100000 18.16000000 11.15200000
H 37.95200000 18.18900000 9.92300000
H 40.33900000 17.83600000 10.19900000
H 40.81700000 17.65000000 13.08300000
H 37.19343000 19.45829000 10.94034000
H 38.45213000 19.89211000 9.73908200
H 41.82961000 16.91696000 11.85792000
H 41.95229000 18.69250000 12.24478000
--
0 1
C 38.70100000 16.94300000 15.29800000
C 35.73100000 19.33300000 15.56700000
C 37.41600000 17.65600000 14.93500000
O 36.80400000 17.37900000 13.92800000
N 36.99200000 18.60300000 15.75300000
H 39.39200000 17.17500000 14.49400000
H 37.52600000 18.79100000 16.59100000
H 35.02500000 18.70900000 15.02300000
H 35.30000000 19.53400000 16.54900000
H 39.13196000 17.25018000 16.26232000
H 38.52783000 15.85691000 15.27771000
H 35.82698000 20.29205000 15.03690000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '068ASP-072GLN-1')] = qcdb.Molecule("""
0 1
C -12.64500000 14.92300000 -14.97700000
C -16.34600000 14.13400000 -14.69400000
C -14.15200000 15.06000000 -15.06700000
O -14.65100000 16.10000000 -15.49400000
N -14.89300000 14.05000000 -14.62400000
H -12.29300000 15.94900000 -15.06800000
H -14.45500000 13.20100000 -14.28000000
H -16.65500000 14.38200000 -15.70700000
H -12.33835000 14.54139000 -13.99194000
H -12.25022000 14.30387000 -15.79604000
H -16.81552000 13.18993000 -14.38051000
H -16.72844000 14.97696000 -14.09973000
--
0 1
C -17.32700000 18.82000000 -17.09600000
C -15.11100000 19.30700000 -14.01600000
C -15.87100000 19.62100000 -15.30300000
O -15.80500000 20.74200000 -15.80400000
N -16.59700000 18.64600000 -15.84000000
H -16.64500000 17.75100000 -15.37100000
H -16.67500000 19.35400000 -17.79000000
H -15.29900000 20.13200000 -13.32900000
H -17.54602000 17.83440000 -17.53260000
H -18.24433000 19.41767000 -16.98968000
H -15.42652000 18.35970000 -13.55440000
H -14.02744000 19.29478000 -14.20510000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '071GLU-075ASP-1')] = qcdb.Molecule("""
0 1
C 15.57200000 63.34800000 -3.02500000
C 15.98300000 65.88800000 -0.20200000
C 15.59700000 64.69300000 -2.29900000
O 15.32100000 65.73700000 -2.89600000
N 15.95100000 64.67600000 -1.01400000
H 15.75800000 63.53700000 -4.08400000
H 16.16700000 63.79100000 -0.57200000
H 15.00000000 66.35600000 -0.26900000
H 16.33731000 62.66589000 -2.62622700
H 14.56696000 62.91645000 -2.90815100
H 16.15603000 65.64604000 0.85701650
H 16.71665000 66.62310000 -0.56448320
--
0 1
C 13.57000000 68.34300000 -4.88600000
C 17.29200000 67.52600000 -5.37000000
C 15.97900000 68.31700000 -5.39800000
O 15.95400000 69.45600000 -5.87200000
N 14.89100000 67.71000000 -4.92400000
H 15.00600000 66.83100000 -4.43400000
H 13.70300000 69.39800000 -4.65200000
H 18.05100000 68.16800000 -5.81900000
H 13.06464000 68.25306000 -5.85889300
H 12.95575000 67.90806000 -4.08380200
H 17.60412000 67.24020000 -4.35466700
H 17.23709000 66.62674000 -6.00112200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '072ASN-075ARG-2')] = qcdb.Molecule("""
0 1
C 33.89600000 6.61200000 3.83600000
C 34.69000000 4.61600000 6.97000000
C 34.42900000 5.50500000 4.73100000
O 35.00900000 4.53800000 4.23900000
N 34.23500000 5.64000000 6.03900000
H 33.70400000 6.16000000 2.86000000
H 33.83900000 6.50000000 6.40300000
H 35.72900000 4.36500000 6.75300000
H 32.95879000 7.03835000 4.22311000
H 34.65839000 7.39666800 3.72174900
H 34.60598000 4.97937900 8.00484100
H 34.07659000 3.71393200 6.82855600
--
0 1
C 32.89300000 1.83500000 2.75400000
C 31.64600000 2.43700000 6.29600000
C 32.07900000 1.69100000 5.04400000
O 32.07800000 0.45900000 5.01100000
N 32.45500000 2.44000000 4.00800000
H 32.35000000 3.45000000 4.07300000
H 32.11400000 1.16700000 2.37800000
H 31.68400000 1.74500000 7.13900000
H 33.09690000 2.61038500 2.00087100
H 33.79963000 1.23396200 2.91765600
H 32.32449000 3.28473700 6.47204500
H 30.61180000 2.78413400 6.15484200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '072THR-075PRO-1')] = qcdb.Molecule("""
0 1
C 24.87800000 30.59800000 37.00200000
C 23.98600000 30.69500000 40.66600000
C 24.70900000 30.14000000 38.42400000
O 25.02000000 28.99400000 38.74700000
N 24.27500000 31.03500000 39.28700000
H 25.20800000 31.63500000 37.01100000
H 24.01800000 31.95400000 38.95000000
H 24.23600000 29.65300000 40.87700000
H 25.65233000 29.97188000 36.53468000
H 23.88452000 30.55410000 36.53182000
H 24.56616000 31.33691000 41.34524000
H 22.90592000 30.80656000 40.84203000
--
0 1
C 20.51900000 27.60800000 41.28700000
C 24.09000000 26.46200000 40.66400000
C 21.87700000 27.50400000 40.59800000
O 22.12000000 28.12800000 39.56400000
N 22.75700000 26.70500000 41.19600000
H 20.67700000 27.73700000 42.35700000
H 22.47800000 26.20800000 42.03200000
H 24.52600000 27.41400000 40.38900000
H 19.88837000 28.40174000 40.86006000
H 19.91999000 26.69611000 41.14682000
H 24.71285000 25.99288000 41.43987000
H 24.03805000 25.80829000 39.78084000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '081ASN-084GLY-1')] = qcdb.Molecule("""
0 1
C 13.82900000 2.70400000 76.43400000
C 13.74200000 6.48100000 76.11400000
C 13.94200000 4.07700000 75.80300000
O 14.21100000 4.22600000 74.61600000
N 13.73500000 5.11200000 76.60900000
H 12.78400000 2.62700000 76.73200000
H 13.50700000 4.96200000 77.58400000
H 13.09800000 6.53000000 75.23300000
H 14.06479000 1.88174600 75.74241000
H 14.39647000 2.61560100 77.37217000
H 13.31125000 7.15543700 76.86871000
H 14.73520000 6.83877900 75.80488000
--
0 1
C 16.99800000 4.23000000 72.56000000
C 17.53300000 6.51800000 75.52300000
C 17.78400000 5.81300000 74.20700000
O 18.82500000 5.98600000 73.58000000
N 16.81400000 5.03000000 73.76300000
H 15.95700000 4.92100000 74.29200000
H 16.01600000 3.95900000 72.17000000
H 17.51700000 4.81600000 71.81200000
H 17.64000000 7.58500000 75.31700000
H 17.56474000 3.30234800 72.72814000
H 16.52165000 6.31557900 75.90535000
H 18.29329000 6.23713200 76.26669000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '081LEU-084LYS-1')] = qcdb.Molecule("""
0 1
C 34.35100000 -7.61400000 4.94900000
C 33.84100000 -8.35400000 1.25300000
C 34.10200000 -8.39000000 3.66300000
O 33.98400000 -9.61500000 3.68700000
N 34.03300000 -7.68800000 2.53300000
H 34.86500000 -8.27200000 5.65200000
H 34.05500000 -6.67200000 2.57800000
H 32.97500000 -9.01300000 1.33800000
H 34.99681000 -6.74889700 4.73799900
H 33.40068000 -7.27977600 5.39080700
H 33.67216000 -7.61640600 0.45459590
H 34.71946000 -8.97837600 1.03284400
--
0 1
C 36.79800000 -11.79300000 4.08200000
C 37.46400000 -9.52200000 1.09100000
C 37.42500000 -10.78000000 1.95200000
O 37.85300000 -11.85300000 1.52300000
N 36.89700000 -10.65500000 3.17100000
H 36.63200000 -9.73000000 3.49300000
H 37.78100000 -12.25100000 4.21000000
H 37.51400000 -9.83900000 0.05000000
H 36.42100000 -11.44903000 5.05645300
H 36.12353000 -12.54555000 3.64753300
H 36.54672000 -8.93834700 1.25818800
H 38.35989000 -8.93906600 1.35095700
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '082LEU-106LEU-2')] = qcdb.Molecule("""
0 1
C 21.00100000 3.07600000 2.09400000
C 23.62700000 5.79100000 2.46200000
C 22.97000000 4.43000000 2.56200000
O 23.55000000 3.50500000 3.13600000
N 21.76000000 4.31500000 2.03200000
H 21.33700000 5.11600000 1.57200000
H 21.65400000 2.23300000 2.32300000
H 22.86400000 6.54500000 2.28500000
H 20.22093000 3.15573100 2.86545000
H 20.51025000 2.86982600 1.13137100
H 24.11967000 5.97366700 3.42839000
H 24.35603000 5.81690800 1.63869000
--
0 1
C 19.26900000 7.51000000 -0.68000000
C 21.81600000 9.19600000 1.55000000
C 20.33800000 7.72700000 0.37400000
O 20.75800000 6.76700000 1.03200000
N 20.79800000 8.95300000 0.54500000
H 19.22500000 8.35300000 -1.37100000
H 20.46300000 9.71700000 -0.03400000
H 21.81500000 8.41000000 2.30200000
H 22.79900000 9.21800000 1.08000000
H 19.63525000 6.59249100 -1.16377800
H 18.30339000 7.33972600 -0.18139080
H 21.66437000 10.13711000 2.09892200
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '084LEU-088MET-2')] = qcdb.Molecule("""
0 1
C 29.89300000 14.63600000 9.04800000
C 31.80200000 14.09700000 5.81700000
C 30.72900000 14.90100000 7.81300000
O 30.99800000 16.05300000 7.49900000
N 31.11900000 13.86900000 7.07800000
H 29.32900000 15.54900000 9.23200000
H 30.92400000 12.92200000 7.38200000
H 32.67500000 14.72400000 6.00500000
H 29.19122000 13.80820000 8.86840500
H 30.52296000 14.42170000 9.92391000
H 32.11987000 13.15770000 5.34090500
H 31.18783000 14.67492000 5.11073800
--
0 1
C 31.99300000 19.25900000 5.79300000
C 28.32400000 18.23400000 6.10200000
C 29.56400000 19.07200000 5.76300000
O 29.46100000 20.20900000 5.31300000
N 30.75400000 18.52500000 6.01700000
H 30.79600000 17.59500000 6.41900000
H 31.92800000 20.19700000 6.33900000
H 27.46500000 18.71500000 5.63300000
H 32.86196000 18.71177000 6.18726400
H 32.14801000 19.50673000 4.73252900
H 28.42025000 17.21390000 5.70184100
H 28.15506000 18.20895000 7.18866100
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '087ALA-171PRO-2')] = qcdb.Molecule("""
0 1
C 24.55400000 -8.66200000 -7.54600000
C 22.97800000 -5.22600000 -8.00400000
C 24.01800000 -6.29500000 -7.69600000
O 25.16100000 -5.97900000 -7.36900000
N 23.63300000 -7.56300000 -7.82200000
H 22.67300000 -7.78000000 -8.08100000
H 24.92500000 -8.56000000 -6.52500000
H 21.98900000 -5.65100000 -7.84900000
H 24.02034000 -9.62042000 -7.62749100
H 25.42861000 -8.66200000 -8.21312500
H 23.12578000 -4.37475800 -7.32315700
H 23.06799000 -4.89555600 -9.04932700
--
0 1
C 19.11500000 -10.06900000 -8.07800000
C 20.12900000 -7.30800000 -5.66900000
C 19.85500000 -8.85000000 -7.54200000
O 20.74200000 -8.30100000 -8.19900000
N 19.48000000 -8.42900000 -6.34300000
H 18.05200000 -9.88400000 -7.98300000
H 18.72600000 -8.91600000 -5.86100000
H 21.11400000 -7.12200000 -6.09600000
H 19.44272000 -10.35012000 -9.08971800
H 19.32640000 -10.97752000 -7.49499500
H 19.53978000 -6.38066000 -5.72250000
H 20.26252000 -7.57213400 -4.60956300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '087LEU-090TYR-1')] = qcdb.Molecule("""
0 1
C 16.60300000 4.96400000 15.18700000
C 17.39400000 1.45500000 13.87700000
C 17.06600000 3.84500000 14.25000000
O 17.51400000 4.11600000 13.13000000
N 16.95600000 2.59300000 14.69100000
H 16.35700000 5.82500000 14.56300000
H 16.62200000 2.42300000 15.63700000
H 18.46200000 1.55600000 13.67500000
H 15.70522000 4.71654300 15.77245000
H 17.41544000 5.25344000 15.86976000
H 17.23827000 0.51629330 14.42888000
H 16.89844000 1.39674200 12.89668000
--
0 1
C 17.12600000 4.55000000 9.52800000
C 14.78000000 2.00000000 11.15000000
C 15.51000000 2.78300000 10.05500000
O 15.30400000 2.54800000 8.86000000
N 16.37800000 3.70700000 10.45900000
H 16.52900000 3.83900000 11.45300000
H 16.58700000 4.62100000 8.58400000
H 14.65700000 0.97600000 10.79500000
H 17.19936000 5.56695400 9.94082300
H 18.13360000 4.17566500 9.29431100
H 15.34760000 1.99321600 12.09223000
H 13.78416000 2.43425000 11.32241000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '088PHE-091ALA-1')] = qcdb.Molecule("""
0 1
C 8.88700000 97.91800000 24.54500000
C 7.01100000 100.71100000 26.37800000
C 7.70500000 98.73200000 25.14100000
O 6.52300000 98.34800000 25.07500000
N 8.03300000 99.87300000 25.76400000
H 8.51900000 97.09500000 23.92500000
H 9.00800000 100.11800000 25.90600000
H 6.44000000 100.08900000 27.06600000
H 9.54203400 98.58050000 23.96017000
H 9.47528500 97.49463000 25.37245000
H 7.46875000 101.54240000 26.93414000
H 6.26045700 101.06570000 25.65630000
--
0 1
C 4.57800000 98.59500000 21.82800000
C 5.46800000 102.10100000 23.16500000
C 4.66900000 100.93200000 22.55600000
O 3.47300000 101.09900000 22.25300000
N 5.28300000 99.74400000 22.40700000
H 6.25700000 99.63200000 22.66800000
H 4.14500000 98.89200000 20.87200000
H 4.71700000 102.75200000 23.61500000
H 5.24695200 97.73820000 21.65951000
H 3.75337300 98.29375000 22.49075000
H 6.17450700 101.76120000 23.93662000
H 5.99833600 102.66930000 22.38670000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '089MET-093GLY-1')] = qcdb.Molecule("""
0 1
C 10.15500000 28.16600000 14.94000000
C 8.42100000 30.63000000 12.60000000
C 9.71300000 29.47500000 14.29900000
O 10.00700000 30.54700000 14.83800000
N 8.95800000 29.40800000 13.20400000
H 10.34700000 28.38800000 15.99200000
H 8.77300000 28.50600000 12.77000000
H 9.24600000 31.30200000 12.35200000
H 9.36884600 27.39869000 14.88347000
H 11.09582000 27.80197000 14.50143000
H 7.86333900 30.40004000 11.68015000
H 7.77766200 31.14956000 13.32537000
--
0 1
C 9.76000000 34.38600000 15.64000000
C 8.28300000 31.69800000 17.90400000
C 8.92900000 33.03900000 17.51800000
O 9.03200000 33.95700000 18.33700000
N 9.40100000 33.13300000 16.28000000
H 9.34800000 32.30800000 15.69300000
H 9.66400000 34.25100000 14.56200000
H 9.03300000 35.14400000 15.93400000
H 7.65100000 31.85800000 18.77800000
H 10.76051000 34.80161000 15.83037000
H 7.66408600 31.29681000 17.08792000
H 9.06646600 30.96557000 18.14838000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '092SER-096ARG-2')] = qcdb.Molecule("""
0 1
C 20.72500000 2.77400000 15.80200000
C 22.42300000 0.67100000 13.12000000
C 21.05600000 2.18900000 14.44300000
O 20.40600000 2.51900000 13.45700000
N 22.05700000 1.30800000 14.38000000
H 19.64600000 2.88300000 15.86900000
H 22.58100000 1.08300000 15.21400000
H 22.56400000 1.44400000 12.36100000
H 21.06547000 2.09683400 16.59920000
H 21.17889000 3.77111700 15.90070000
H 23.36780000 0.11837040 13.22937000
H 21.61754000 0.00530976 12.77635000
--
0 1
C 18.67300000 2.67700000 10.22100000
C 16.97100000 1.13700000 13.25700000
C 17.14200000 1.63400000 11.82800000
O 16.20400000 1.61900000 11.03600000
N 18.35800000 2.05700000 11.50300000
H 19.07400000 2.05600000 12.22600000
H 17.84100000 3.31800000 9.91700000
H 15.98600000 0.67700000 13.32300000
H 19.57535000 3.29629000 10.33164000
H 18.81920000 1.97051000 9.39064000
H 17.73756000 0.39736570 13.53146000
H 17.00310000 2.00729400 13.92898000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '095GLN-183ILE-1')] = qcdb.Molecule("""
0 1
C 12.33100000 5.30600000 -10.12800000
C 15.66800000 3.88400000 -11.25800000
C 14.34800000 4.62700000 -11.27400000
O 13.99100000 5.23600000 -12.30100000
N 13.62400000 4.61400000 -10.15500000
H 13.95200000 4.12300000 -9.32700000
H 12.11500000 5.75900000 -11.09500000
H 15.74200000 3.31400000 -10.33200000
H 11.49792000 4.62935000 -9.88690300
H 12.32161000 6.12967300 -9.39897700
H 15.69438000 3.20422600 -12.12241000
H 16.51174000 4.58604700 -11.33028000
--
0 1
C 15.18400000 3.83200000 -5.59000000
C 16.79300000 1.28500000 -7.88000000
C 15.47000000 3.00800000 -6.82200000
O 14.80600000 3.17100000 -7.84900000
N 16.48200000 2.14700000 -6.75500000
H 16.06900000 3.81600000 -4.95000000
H 17.01500000 2.05300000 -5.89900000
H 16.73600000 1.86300000 -8.80400000
H 14.94688000 4.85878500 -5.90541300
H 14.34217000 3.38624000 -5.03989800
H 17.81981000 0.90047880 -7.79170300
H 16.03889000 0.48457490 -7.90533000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '095LYS-107ILE-2')] = qcdb.Molecule("""
0 1
C 16.07000000 16.05100000 25.83200000
C 18.67100000 13.26400000 25.80500000
C 17.34300000 13.97600000 26.07900000
O 16.40700000 13.39700000 26.63500000
N 17.26500000 15.23200000 25.65600000
H 18.11300000 15.66400000 25.29800000
H 15.29700000 15.51300000 26.38300000
H 19.39800000 14.01400000 25.49000000
H 16.33112000 16.97462000 26.36934000
H 15.61791000 16.31006000 24.86324000
H 19.05395000 12.74713000 26.69730000
H 18.55279000 12.54318000 24.98254000
--
0 1
C 20.35100000 18.72600000 24.18900000
C 21.45900000 16.82300000 27.28600000
C 20.41400000 17.64700000 25.24500000
O 19.66200000 16.67700000 25.17100000
N 21.30700000 17.80200000 26.21600000
H 21.18100000 19.40800000 24.34400000
H 21.86700000 18.64900000 26.23700000
H 20.60400000 16.16500000 27.27600000
H 20.41973000 18.27131000 23.18973000
H 19.43017000 19.31796000 24.29695000
H 22.38030000 16.23033000 27.18626000
H 21.53046000 17.30278000 28.27327000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '097GLU-100THR-2')] = qcdb.Molecule("""
0 1
C 9.39000000 -7.30300000 29.75300000
C 11.89300000 -4.60400000 28.68400000
C 9.94400000 -6.02200000 29.10700000
O 9.23700000 -5.38500000 28.30400000
N 11.21300000 -5.68900000 29.38900000
H 8.81200000 -7.81300000 28.97900000
H 11.73900000 -6.21700000 30.07300000
H 11.35200000 -3.67100000 28.83400000
H 10.16424000 -8.00472900 30.09669000
H 8.70615500 -7.07017100 30.58255000
H 12.92471000 -4.48928400 29.04788000
H 11.93164000 -4.81763100 27.60564000
--
0 1
C 8.28800000 -6.21100000 24.73100000
C 12.02100000 -6.57700000 25.44600000
C 10.70500000 -6.50700000 24.70400000
O 10.70400000 -6.65400000 23.48200000
N 9.60400000 -6.26900000 25.39600000
H 9.64500000 -6.19000000 26.40600000
H 8.39900000 -6.31300000 23.66100000
H 12.67400000 -5.86700000 24.93100000
H 7.63027500 -7.02978600 25.05809000
H 7.80957600 -5.23635300 24.90756000
H 11.97940000 -6.28878400 26.50675000
H 12.45965000 -7.57595600 25.30575000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '102GLN-106ILE-1')] = qcdb.Molecule("""
0 1
C 15.81800000 -10.94900000 7.38500000
C 15.16100000 -12.86700000 10.61500000
C 15.07000000 -11.52800000 8.59900000
O 13.88700000 -11.25200000 8.80700000
N 15.74900000 -12.35700000 9.38900000
H 15.34400000 -10.00200000 7.11800000
H 16.72300000 -12.54500000 9.20000000
H 14.20200000 -13.33800000 10.39200000
H 16.88595000 -10.75380000 7.56211300
H 15.74583000 -11.63969000 6.53192300
H 15.81762000 -13.61630000 11.08126000
H 14.98669000 -12.01714000 11.29127000
--
0 1
C 10.49500000 -10.10800000 10.72100000
C 13.37800000 -7.68100000 10.07700000
C 11.98600000 -8.21800000 10.41200000
O 11.07200000 -7.44000000 10.67500000
N 11.80700000 -9.53900000 10.39700000
H 12.56900000 -10.15000000 10.12400000
H 9.73000000 -9.44900000 10.32500000
H 13.45100000 -6.68400000 10.51600000
H 10.31506000 -11.09194000 10.26330000
H 10.31658000 -10.16052000 11.80516000
H 14.18648000 -8.29659100 10.49819000
H 13.50531000 -7.57943600 8.98912300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '104VAL-108ILE-1')] = qcdb.Molecule("""
0 1
C 37.23300000 -8.66000000 10.91900000
C 38.92400000 -12.07000000 10.79300000
C 38.24200000 -9.74400000 10.55300000
O 39.19900000 -9.49700000 9.81500000
N 38.01900000 -10.95400000 11.06000000
H 37.41800000 -7.80600000 10.26900000
H 37.23800000 -11.09100000 11.69500000
H 39.94200000 -11.78700000 11.07200000
H 36.21551000 -9.04550400 10.75743000
H 37.39077000 -8.35867900 11.96510000
H 38.62482000 -12.95752000 11.36989000
H 38.93919000 -12.32176000 9.72230500
--
0 1
C 42.35200000 -9.76400000 7.69200000
C 38.97900000 -9.48700000 5.99100000
C 40.48600000 -9.52100000 6.18000000
O 41.24800000 -9.41500000 5.22100000
N 40.92600000 -9.67400000 7.42400000
H 40.26700000 -9.74200000 8.19500000
H 42.85600000 -8.86000000 7.34500000
H 38.74500000 -9.62600000 4.94300000
H 42.52499000 -9.89266800 8.77066600
H 42.75928000 -10.61663000 7.12884700
H 38.48049000 -10.27524000 6.57423600
H 38.57739000 -8.49842400 6.25826000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '108LYS-112TYR-1')] = qcdb.Molecule("""
0 1
C -0.54400000 1.81600000 15.86000000
C 1.42400000 4.71400000 14.31800000
C -0.26800000 3.09400000 15.09800000
O -1.17400000 3.63000000 14.49200000
N 0.98300000 3.56000000 15.11100000
H -1.49400000 1.41800000 15.49300000
H 1.66300000 3.07500000 15.68900000
H 0.84600000 5.59700000 14.59600000
H 0.25127980 1.07268500 15.70185000
H -0.65763980 2.09831300 16.91706000
H 2.48742400 4.91111600 14.51869000
H 1.25912900 4.53704800 13.24492000
--
0 1
C -2.78600000 6.27300000 12.11600000
C -2.35400000 2.69900000 10.91500000
C -2.83200000 4.14500000 10.91300000
O -3.57300000 4.57400000 10.03300000
N -2.48700000 4.85700000 11.97400000
H -1.93800000 4.40900000 12.70200000
H -3.86500000 6.41500000 12.17000000
H -2.52100000 2.29300000 9.91500000
H -2.32373200 6.62150100 13.05134000
H -2.41530500 6.85011400 11.25604000
H -1.28375700 2.61886300 11.15616000
H -2.96919000 2.12759800 11.62566000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '108TYR-129TRP-1')] = qcdb.Molecule("""
0 1
C 21.45900000 16.82300000 27.28600000
C 21.42500000 17.10300000 31.02900000
C 21.55700000 17.48100000 28.64000000
O 21.94300000 18.63900000 28.77400000
N 21.23200000 16.69900000 29.65300000
H 20.60400000 16.16500000 27.27600000
H 20.85600000 15.77500000 29.47000000
H 22.00800000 18.02200000 31.07100000
H 21.34434000 17.56150000 26.47886000
H 22.38030000 16.23033000 27.18626000
H 20.48909000 17.27499000 31.58080000
H 21.98440000 16.31516000 31.55472000
--
0 1
C 24.53000000 20.73600000 27.37500000
C 23.10500000 21.74000000 30.73700000
C 24.01800000 21.66700000 29.53600000
O 25.04200000 22.35200000 29.51200000
N 23.66300000 20.86900000 28.53500000
H 22.83600000 20.28600000 28.59800000
H 25.31900000 21.48700000 27.39300000
H 22.39100000 20.91900000 30.65600000
H 25.01147000 19.74856000 27.31878000
H 23.98691000 20.83089000 26.42313000
H 23.70773000 21.66240000 31.65390000
H 22.56577000 22.69806000 30.70023000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '112SER-115ALA-2')] = qcdb.Molecule("""
0 1
C 19.26500000 -11.56600000 27.68600000
C 17.01200000 -9.19000000 29.53700000
C 18.85500000 -10.36300000 28.48800000
O 19.68000000 -9.66900000 29.06000000
N 17.55000000 -10.18600000 28.61100000
H 18.62000000 -11.61400000 26.80700000
H 16.91200000 -10.76800000 28.07800000
H 17.33900000 -8.18900000 29.24300000
H 20.30707000 -11.47286000 27.34628000
H 19.08773000 -12.47159000 28.28474000
H 15.91356000 -9.24237600 29.51081000
H 17.37671000 -9.36836500 30.55934000
--
0 1
C 21.68800000 -10.28800000 32.04500000
C 18.01100000 -11.01800000 32.71300000
C 19.43000000 -10.50600000 32.92100000
O 19.73200000 -9.87600000 33.94800000
N 20.31500000 -10.75400000 31.95100000
H 20.03400000 -11.27900000 31.12800000
H 22.10200000 -10.61400000 32.99900000
H 17.36000000 -10.52800000 33.43800000
H 22.30271000 -10.72446000 31.24397000
H 21.74199000 -9.18956700 32.02197000
H 17.67236000 -10.77590000 31.69481000
H 17.95706000 -12.10543000 32.86979000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '112TRP-115ARG-1')] = qcdb.Molecule("""
0 1
C 12.99700000 107.58400000 30.66100000
C 11.31500000 109.73200000 33.35200000
C 12.09900000 108.66300000 31.29500000
O 11.42800000 109.42400000 30.60000000
N 12.10600000 108.74900000 32.62400000
H 13.16400000 107.85700000 29.61600000
H 12.64500000 108.07400000 33.15800000
H 10.26900000 109.63600000 33.05700000
H 13.95973000 107.56090000 31.19262000
H 12.50602000 106.59990000 30.68299000
H 11.39995000 109.52640000 34.42927000
H 11.62295000 110.76470000 33.13142000
--
0 1
C 12.56800000 112.51500000 29.00500000
C 13.50400000 112.80200000 32.70700000
C 13.05700000 113.20500000 31.29500000
O 12.82300000 114.38400000 31.01500000
N 12.99300000 112.23500000 30.37600000
H 13.27900000 111.29200000 30.61500000
H 12.88800000 113.52300000 28.73400000
H 13.17300000 113.46900000 33.50200000
H 13.00543000 111.79710000 28.29560000
H 11.47648000 112.52440000 28.86901000
H 13.16244000 111.78340000 32.94300000
H 14.59802000 112.89490000 32.64005000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '113TRP-124HIE-2')] = qcdb.Molecule("""
0 1
C 15.69200000 18.90600000 -3.72600000
C 16.61100000 15.27700000 -4.31600000
C 15.75100000 16.46500000 -3.89100000
O 14.57900000 16.29400000 -3.54500000
N 16.36500000 17.63700000 -3.91800000
H 17.33700000 17.65900000 -4.20600000
H 14.70500000 18.77000000 -3.28900000
H 17.65500000 15.58800000 -4.31600000
H 16.28129000 19.57541000 -3.08208300
H 15.59310000 19.41205000 -4.69766700
H 16.48905000 14.39645000 -3.66811600
H 16.33723000 15.03294000 -5.35305800
--
0 1
C 20.52300000 19.52100000 -4.04000000
C 21.15600000 15.79500000 -3.64800000
C 20.27400000 18.02900000 -4.10900000
O 19.16700000 17.59400000 -4.43900000
N 21.30000000 17.24200000 -3.78000000
H 21.54400000 19.73700000 -4.35300000
H 22.15300000 17.66300000 -3.43100000
H 20.10700000 15.57500000 -3.49900000
H 19.79850000 20.04400000 -4.68153200
H 20.37331000 19.85075000 -3.00132000
H 21.47959000 15.24341000 -4.54300700
H 21.70552000 15.47354000 -2.75095500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '115GLN-118ARG-2')] = qcdb.Molecule("""
0 1
C 30.32800000 52.65700000 61.95600000
C 26.93700000 54.24800000 61.33000000
C 29.13200000 53.22000000 61.20800000
O 29.09000000 53.17900000 59.97700000
N 28.15300000 53.72900000 61.94300000
H 30.06900000 52.54100000 63.01000000
H 28.25300000 53.77900000 62.95000000
H 26.36300000 53.39700000 60.95800000
H 30.52267000 51.68065000 61.48820000
H 31.19440000 53.32616000 61.84842000
H 26.32112000 54.75007000 62.09067000
H 27.12494000 54.93424000 60.49110000
--
0 1
C 29.73400000 54.20900000 56.40600000
C 26.60100000 55.80500000 57.88100000
C 27.69900000 55.43600000 56.90000000
O 27.69000000 55.91800000 55.76600000
N 28.63800000 54.58200000 57.29900000
H 28.59600000 54.18400000 58.23300000
H 30.02600000 55.10000000 55.83800000
H 25.65200000 55.75600000 57.34500000
H 26.75300000 56.84300000 58.18000000
H 30.61510000 53.88913000 56.98163000
H 29.47935000 53.42987000 55.67244000
H 26.51774000 55.17149000 58.77639000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '119MET-122VAL-1')] = qcdb.Molecule("""
0 1
C 45.95400000 -1.61800000 9.16300000
C 45.36600000 0.73800000 6.24400000
C 45.23200000 -0.89200000 8.03600000
O 44.03200000 -1.09900000 7.82500000
N 45.95700000 -0.02600000 7.33100000
H 45.29500000 -2.41400000 9.51700000
H 46.93200000 0.11700000 7.56300000
H 44.64800000 1.45400000 6.65000000
H 46.15100000 1.29100000 5.72700000
H 46.91743000 -2.05935100 8.86801200
H 46.12011000 -0.91022000 9.98850300
H 44.85330000 0.09929351 5.50970500
--
0 1
C 42.46700000 -3.99300000 6.27200000
C 44.76000000 -2.15600000 3.84700000
C 43.55000000 -2.89700000 4.40500000
O 42.57800000 -3.13000000 3.68800000
N 43.59800000 -3.27700000 5.67900000
H 44.44200000 -3.10200000 6.21900000
H 42.17700000 -4.81600000 5.61600000
H 44.42000000 -1.60800000 2.96500000
H 42.71491000 -4.41473300 7.25723200
H 41.59705000 -3.32680800 6.36882200
H 45.18522000 -1.45155800 4.57703100
H 45.53226000 -2.87443100 3.53479500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '121LYS-125ALA-1')] = qcdb.Molecule("""
0 1
C 44.76000000 -2.15600000 3.84700000
C 42.46700000 -3.99300000 6.27200000
C 43.55000000 -2.89700000 4.40500000
O 42.57800000 -3.13000000 3.68800000
N 43.59800000 -3.27700000 5.67900000
H 44.42000000 -1.60800000 2.96500000
H 44.44200000 -3.10200000 6.21900000
H 42.17700000 -4.81600000 5.61600000
H 45.18522000 -1.45155800 4.57703100
H 45.53226000 -2.87443100 3.53479500
H 42.71491000 -4.41473300 7.25723200
H 41.59705000 -3.32680800 6.36882200
--
0 1
C 38.70400000 -3.61700000 2.64900000
C 39.95800000 -0.09600000 3.37500000
C 39.08000000 -1.21600000 2.83200000
O 38.06400000 -0.96800000 2.18100000
N 39.46300000 -2.45900000 3.11500000
H 40.33800000 -2.61300000 3.60100000
H 38.33600000 -3.42200000 1.63900000
H 39.36800000 0.82000000 3.44100000
H 40.77600000 0.06800000 2.67200000
H 39.36231000 -4.49666000 2.59581800
H 37.83042000 -3.84151200 3.27864100
H 40.38072000 -0.34283620 4.36008000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '121VAL-155THR-2')] = qcdb.Molecule("""
0 1
C 13.19700000 20.16700000 11.57000000
C 12.05000000 18.43000000 8.37400000
C 12.94300000 18.78300000 9.55300000
O 14.05300000 18.25200000 9.66900000
N 12.45100000 19.68400000 10.40300000
H 11.54000000 20.08900000 10.21300000
H 14.18900000 19.71600000 11.58000000
H 11.24900000 19.16500000 8.31400000
H 13.34590000 21.25582000 11.52204000
H 12.74725000 19.91914000 12.54278000
H 12.64959000 18.47294000 7.45277600
H 11.58365000 17.44310000 8.51019800
--
0 1
C 7.45000000 21.05700000 10.59900000
C 10.02700000 21.93000000 7.94900000
C 8.76700000 21.13800000 9.84500000
O 9.77800000 20.57900000 10.29100000
N 8.78200000 21.82200000 8.70900000
H 6.64500000 21.36600000 9.93700000
H 7.94500000 22.27900000 8.36300000
H 10.75500000 21.22300000 8.33200000
H 7.29745200 20.01786000 10.92600000
H 7.47500000 21.74773000 11.45473000
H 9.88999300 21.63518000 6.89813700
H 10.50478000 22.91227000 8.07891000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '126VAL-129ALA-1')] = qcdb.Molecule("""
0 1
C 19.00600000 31.54700000 21.22500000
C 18.52800000 32.86900000 24.78400000
C 19.26400000 31.80100000 22.71300000
O 20.15100000 31.19300000 23.30700000
N 18.47800000 32.67900000 23.33400000
H 19.39900000 30.55300000 21.01900000
H 17.84800000 33.25700000 22.78700000
H 19.55400000 33.11700000 25.06000000
H 17.92475000 31.55751000 21.02302000
H 19.55905000 32.25316000 20.58824000
H 17.87341000 33.70335000 25.07620000
H 18.25515000 31.94650000 25.31743000
--
0 1
C 19.81200000 27.80600000 24.20100000
C 16.75000000 29.60300000 25.63200000
C 17.94900000 28.64700000 25.54100000
O 18.21800000 27.87900000 26.47100000
N 18.64100000 28.66500000 24.39800000
H 18.30500000 29.22600000 23.62300000
H 19.54200000 26.77300000 24.42800000
H 16.50600000 29.72800000 26.68900000
H 20.14170000 27.85783000 23.15285000
H 20.62281000 28.09665000 24.88519000
H 17.00434000 30.59025000 25.21889000
H 15.87302000 29.16771000 25.13057000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '133LYS-137ASN-1')] = qcdb.Molecule("""
0 1
C 0.71300000 5.33600000 27.76300000
C 1.38700000 1.63800000 28.30600000
C 1.13800000 4.03500000 28.42900000
O 1.68100000 4.05300000 29.54200000
N 0.88400000 2.90600000 27.78200000
H 1.36800000 6.11000000 28.16200000
H 0.36300000 2.94300000 26.91100000
H 1.01500000 1.50900000 29.32400000
H 0.89068970 5.33301400 26.67745000
H -0.31956720 5.59768800 28.03745000
H 1.03141800 0.78489000 27.70954000
H 2.48548500 1.63657500 28.36370000
--
0 1
C 3.67700000 2.99200000 32.71200000
C 4.91700000 5.31200000 29.93200000
C 4.85700000 4.57500000 31.26000000
O 5.70900000 4.82000000 32.13100000
N 3.84800000 3.70000000 31.42100000
H 3.15200000 3.60500000 30.69000000
H 3.69600000 3.71900000 33.52600000
H 5.90400000 5.76700000 29.84700000
H 2.72434100 2.44288800 32.74227000
H 4.51397500 2.29830400 32.88010000
H 4.76365400 4.63428600 29.07925000
H 4.15324200 6.10359500 29.93914000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '134GLU-138ARG-1')] = qcdb.Molecule("""
0 1
C -5.47100000 -3.30200000 72.20800000
C -4.40800000 -0.33600000 74.43800000
C -5.04100000 -1.87800000 72.63200000
O -4.79900000 -1.01800000 71.77900000
N -4.88300000 -1.63800000 73.94100000
H -5.07000000 -3.43500000 71.20200000
H -5.10300000 -2.37300000 74.61000000
H -4.99500000 0.45700000 73.96800000
H -4.99341800 -4.03843000 72.87101000
H -6.55733600 -3.46127200 72.14086000
H -4.53425400 -0.26426490 75.52837000
H -3.35791000 -0.17223760 74.15429000
--
0 1
C -3.32900000 2.19200000 70.15700000
C -1.36500000 -1.07900000 70.06500000
C -1.77100000 0.34700000 69.75100000
O -1.13700000 1.00200000 68.92000000
N -2.85400000 0.81700000 70.37700000
H -3.37000000 0.20500000 71.00600000
H -3.57800000 2.32900000 69.10300000
H -0.33300000 -1.20700000 69.73000000
H -4.22387700 2.37881900 70.76880000
H -2.53485900 2.91558200 70.39315000
H -1.43198700 -1.27996200 71.14441000
H -2.02256900 -1.75743300 69.50168000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '135ARG-152ALA-2')] = qcdb.Molecule("""
0 1
C 14.42800000 5.43400000 10.89000000
C 14.29100000 4.73900000 7.15400000
C 13.98300000 5.53900000 9.44500000
O 13.08900000 6.31900000 9.12500000
N 14.60500000 4.75100000 8.57800000
H 15.37200000 4.89300000 10.92900000
H 15.32500000 4.12200000 8.92100000
H 13.58900000 5.53700000 6.91700000
H 14.55926000 6.44487500 11.30340000
H 13.67904000 4.88107900 11.47595000
H 15.19575000 4.88384600 6.54536100
H 13.82916000 3.78414200 6.86254000
--
0 1
C 9.61000000 5.47000000 9.33500000
C 10.59400000 8.22700000 6.89600000
C 9.73200000 7.33500000 7.77800000
O 8.50400000 7.47400000 7.78600000
N 10.34800000 6.40400000 8.49400000
H 11.36100000 6.37100000 8.49300000
H 8.54400000 5.56600000 9.15300000
H 11.65700000 8.05600000 7.07800000
H 9.91149400 4.44145900 9.08760200
H 9.74431000 5.66352200 10.40948000
H 10.28615000 7.90699200 5.88960800
H 10.33999000 9.28405700 7.06366100
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '137SER-144LYS-1')] = qcdb.Molecule("""
0 1
C 26.86200000 49.40800000 54.49800000
C 26.15800000 50.39700000 58.07800000
C 26.32700000 49.33100000 55.90900000
O 25.67300000 48.35500000 56.28300000
N 26.61500000 50.35300000 56.70000000
H 27.59300000 50.21400000 54.43300000
H 27.16500000 51.12800000 56.33800000
H 25.72700000 49.44200000 58.37900000
H 27.33000000 48.44171000 54.25870000
H 26.04270000 49.61534000 53.79390000
H 27.01124000 50.60527000 58.74027000
H 25.38427000 51.17435000 58.16204000
--
0 1
C 24.84400000 44.87100000 56.13900000
C 25.45200000 46.36600000 59.60100000
C 25.08800000 45.34100000 58.53500000
O 24.73600000 44.20500000 58.85600000
N 25.16400000 45.74300000 57.27000000
H 25.45500000 46.69700000 57.06600000
H 24.41200000 43.93000000 56.48500000
H 25.70900000 47.30500000 59.11300000
H 24.13207000 45.35928000 55.45728000
H 25.75743000 44.63939000 55.57155000
H 24.59013000 46.50324000 60.27059000
H 26.33313000 46.02721000 60.16565000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '140SER-144THR-2')] = qcdb.Molecule("""
0 1
C -27.66700000 11.00300000 -22.84300000
C -25.41500000 13.54300000 -24.57200000
C -26.71400000 11.61700000 -23.86800000
O -26.35100000 10.97300000 -24.85200000
N -26.29700000 12.86100000 -23.63700000
H -27.71400000 9.92900000 -23.03700000
H -26.62500000 13.36600000 -22.81800000
H -25.87100000 13.53300000 -25.56000000
H -27.30905000 11.15781000 -21.81446000
H -28.68080000 11.41427000 -22.95732000
H -25.26131000 14.58709000 -24.26173000
H -24.45083000 13.01972000 -24.65295000
--
0 1
C -24.65800000 10.06900000 -28.09100000
C -23.49800000 8.40600000 -24.86900000
C -23.63000000 8.68500000 -26.36100000
O -23.00500000 8.02200000 -27.18500000
N -24.44400000 9.68100000 -26.70100000
H -24.99800000 10.13100000 -25.97900000
H -25.05200000 9.21300000 -28.64100000
H -22.60600000 7.78500000 -24.74500000
H -25.39211000 10.88515000 -28.16163000
H -23.71053000 10.36369000 -28.56582000
H -23.37525000 9.34731400 -24.31325000
H -24.37686000 7.85338700 -24.50538000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '142ALA-146PHE-1')] = qcdb.Molecule("""
0 1
C 33.55000000 94.86800000 46.73100000
C 35.97100000 97.65100000 45.54600000
C 34.40300000 95.74100000 45.79100000
O 34.35300000 95.53500000 44.57400000
N 35.18700000 96.67900000 46.34300000
H 33.80500000 93.83000000 46.52600000
H 35.16900000 96.78300000 47.35400000
H 35.30500000 98.17100000 44.85500000
H 33.70052000 94.99519000 47.81321000
H 32.48246000 95.00923000 46.50648000
H 36.43341000 98.39401000 46.21242000
H 36.72009000 97.21313000 44.86988000
--
0 1
C 34.76000000 95.95100000 40.63600000
C 36.62700000 93.16800000 42.46700000
C 36.01800000 94.02700000 41.34400000
O 36.18100000 93.72900000 40.15200000
N 35.32400000 95.12000000 41.69400000
H 35.06000000 95.30900000 42.65700000
H 34.32600000 95.29100000 39.88900000
H 37.39000000 92.50800000 42.05100000
H 33.97288000 96.62404000 41.00675000
H 35.52326000 96.46537000 40.03362000
H 37.12414000 93.80579000 43.21270000
H 35.81858000 92.54917000 42.88354000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '143VAL-147GLU-1')] = qcdb.Molecule("""
0 1
C -1.86200000 9.06900000 -7.73200000
C -1.72900000 5.24100000 -7.46700000
C -1.43000000 7.67500000 -7.27000000
O -0.51700000 7.55000000 -6.45200000
N -2.05900000 6.63000000 -7.81800000
H -1.07700000 9.76500000 -7.43100000
H -2.84200000 6.81500000 -8.44200000
H -1.81000000 5.11300000 -6.38400000
H -1.95655300 9.09526500 -8.82761400
H -2.79855100 9.37478300 -7.24274600
H -2.42071900 4.53998200 -7.95699800
H -0.69671890 4.97302500 -7.73641600
--
0 1
C 1.54100000 5.71700000 -4.03200000
C 3.10700000 7.59300000 -6.97000000
C 3.01800000 6.83000000 -5.64900000
O 4.03100000 6.57600000 -5.00000000
N 1.80200000 6.44000000 -5.27500000
H 1.00400000 6.73000000 -5.83200000
H 1.80100000 6.35800000 -3.19200000
H 4.15000000 7.55900000 -7.29000000
H 0.46699580 5.48783100 -3.96878100
H 2.13326100 4.79419200 -3.94452300
H 2.48037000 7.11195900 -7.73546400
H 2.83065700 8.64838900 -6.82932900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '146PHE-150LEU-1')] = qcdb.Molecule("""
0 1
C 35.29100000 3.15000000 19.07200000
C 32.79300000 2.00300000 21.70800000
C 34.58400000 3.01000000 20.42000000
O 35.10400000 3.49300000 21.41900000
N 33.44500000 2.30600000 20.44100000
H 36.34200000 3.36700000 19.26800000
H 33.03500000 1.97800000 19.57100000
H 32.55100000 2.94400000 22.20500000
H 35.22787000 2.22328600 18.48275000
H 34.86745000 3.99197100 18.50482000
H 31.85335000 1.46779100 21.50649000
H 33.49182000 1.45675800 22.35860000
--
0 1
C 36.08300000 4.39300000 24.97900000
C 38.17900000 2.13700000 22.73200000
C 37.81100000 3.06600000 23.90200000
O 38.61900000 3.32300000 24.79600000
N 36.58400000 3.58100000 23.89100000
H 35.97900000 3.40000000 23.09600000
H 36.81400000 5.16900000 25.19500000
H 39.05100000 1.55200000 23.02500000
H 35.12747000 4.86644900 24.70918000
H 35.98399000 3.81770400 25.91133000
H 37.36631000 1.44696500 22.46110000
H 38.44675000 2.74302700 21.85391000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '150LYS-158LEU-2')] = qcdb.Molecule("""
0 1
C 9.85300000 2.84200000 -3.55200000
C 10.78000000 4.84700000 -0.46600000
C 10.33700000 4.01700000 -2.71400000
O 10.59100000 5.10800000 -3.23200000
N 10.47800000 3.78100000 -1.41700000
H 9.31000000 2.16500000 -2.88600000
H 10.26800000 2.86500000 -1.04700000
H 11.01900000 5.76500000 -0.99800000
H 9.16822400 3.17310000 -4.34664100
H 10.71315000 2.29424100 -3.96443900
H 9.91530300 5.05294900 0.18198460
H 11.64222000 4.62279400 0.17922190
--
0 1
C 13.85500000 7.24900000 -3.77000000
C 10.43300000 8.49100000 -2.65800000
C 11.93400000 8.51000000 -2.95700000
O 12.59900000 9.54200000 -2.81700000
N 12.46000000 7.35400000 -3.35700000
H 11.86100000 6.53400000 -3.41500000
H 14.48900000 7.68800000 -2.99900000
H 10.06600000 7.46700000 -2.60500000
H 14.13178000 6.19365900 -3.91018500
H 14.04924000 7.81293900 -4.69425300
H 10.17089000 9.01371600 -1.72630000
H 9.87438900 9.03877200 -3.43124000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '157LYS-160VAL-1')] = qcdb.Molecule("""
0 1
C 3.73200000 8.86900000 8.77600000
C 5.41400000 11.67200000 6.81700000
C 4.49300000 10.13500000 8.44200000
O 5.07500000 10.73200000 9.33700000
N 4.55300000 10.54300000 7.17100000
H 3.43200000 8.95100000 9.82500000
H 4.08200000 10.01800000 6.44700000
H 6.35000000 11.53500000 7.36400000
H 2.84034700 8.73392400 8.14615100
H 4.40318400 8.00483200 8.66319000
H 5.71915900 11.69125000 5.76035100
H 5.02919900 12.64014000 7.17003500
--
0 1
C 4.97300000 13.32400000 11.83600000
C 8.48500000 13.69100000 13.21600000
C 6.48000000 13.17800000 11.91200000
O 7.07600000 12.38500000 11.18200000
N 7.09100000 13.89700000 12.85200000
H 4.68700000 14.23400000 12.35700000
H 6.55200000 14.58800000 13.36600000
H 8.86800000 12.79900000 12.72700000
H 4.63646700 13.38184000 10.79034000
H 4.50454700 12.47876000 12.36148000
H 9.10910400 14.52937000 12.87303000
H 8.55369600 13.58398000 14.30862000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '162ALA-176GLY-1')] = qcdb.Molecule("""
0 1
C -17.74000000 15.19200000 -3.17800000
C -19.41700000 14.10800000 -6.43200000
C -18.74600000 14.52100000 -4.10900000
O -19.69500000 13.88200000 -3.65200000
N -18.52100000 14.65400000 -5.41600000
H -17.74200000 14.64900000 -2.23200000
H -17.71400000 15.18400000 -5.72800000
H -20.44200000 14.37700000 -6.19400000
H -16.72386000 15.16183000 -3.59818400
H -18.04092000 16.23043000 -2.97522200
H -19.16579000 14.51369000 -7.42311700
H -19.37444000 13.01229000 -6.51928100
--
0 1
C -22.69100000 11.53800000 -5.04700000
C -22.98100000 14.60100000 -2.82700000
C -23.41300000 13.34400000 -3.56700000
O -24.58600000 12.97500000 -3.58500000
N -22.43900000 12.70700000 -4.21300000
H -21.49200000 13.06500000 -4.13100000
H -21.75700000 10.99000000 -5.17000000
H -23.40600000 10.88100000 -4.54900000
H -21.89300000 14.64500000 -2.88500000
H -23.08232000 11.77048000 -6.04840900
H -23.36436000 15.52015000 -3.29412100
H -23.21900000 14.58874000 -1.75312500
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '167GLY-232GLY-1')] = qcdb.Molecule("""
0 1
C -21.57600000 9.57000000 -2.09200000
C -20.37700000 7.34000000 0.75300000
C -21.61500000 8.53700000 -0.97500000
O -22.67400000 8.01200000 -0.64600000
N -20.45500000 8.23400000 -0.39400000
H -22.44500000 10.20800000 -2.05100000
H -21.61900000 9.02700000 -3.03400000
H -19.59500000 8.65100000 -0.73300000
H -20.89200000 6.40600000 0.52200000
H -20.66074000 10.18017000 -2.09124500
H -19.32348000 7.11306700 0.97346980
H -20.86994000 7.79696100 1.62374400
--
0 1
C -25.40700000 7.36400000 1.57700000
C -24.12100000 4.80400000 -0.91800000
C -24.99400000 5.41800000 0.16600000
O -26.03300000 4.87100000 0.52800000
N -24.57900000 6.57100000 0.67700000
H -23.73400000 6.99300000 0.30800000
H -24.86300000 8.27300000 1.83500000
H -26.32700000 7.64800000 1.06300000
H -23.18500000 5.36400000 -0.95400000
H -25.67084000 6.85081200 2.51349600
H -23.90490000 3.74081100 -0.73653500
H -24.61164000 4.94192400 -1.89280900
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '168GLY-172LYS-2')] = qcdb.Molecule("""
0 1
C 29.97500000 85.51300000 43.09100000
C 28.46200000 89.04000000 43.07000000
C 29.20500000 86.74000000 43.54900000
O 28.70500000 86.74100000 44.67100000
N 29.09600000 87.76800000 42.69800000
H 30.09400000 84.84500000 43.94300000
H 30.95700000 85.82400000 42.74100000
H 29.51400000 87.69400000 41.77500000
H 29.03000000 89.47400000 43.89100000
H 29.49880000 84.93974000 42.28192000
H 28.46200000 89.74031000 42.22173000
H 27.43080000 88.94035000 43.43973000
--
0 1
C 27.48200000 87.78600000 48.05100000
C 24.81700000 85.78900000 46.24600000
C 25.56100000 86.52100000 47.36000000
O 25.06000000 86.62900000 48.49100000
N 26.74200000 87.04900000 47.03900000
H 27.17300000 86.89600000 46.13100000
H 27.44300000 87.23500000 48.98800000
H 23.74900000 85.90300000 46.42100000
H 28.53329000 87.90968000 47.75186000
H 27.02877000 88.75824000 48.29460000
H 25.00271000 86.14087000 45.22047000
H 25.05560000 84.71708000 46.18223000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '171ALA-175GLU-1')] = qcdb.Molecule("""
0 1
C 14.98600000 -3.67500000 22.14700000
C 11.18700000 -3.38600000 22.07700000
C 13.59900000 -3.04200000 22.08200000
O 13.46500000 -1.81600000 22.03600000
N 12.55600000 -3.86700000 22.09600000
H 15.68700000 -2.89400000 22.45000000
H 12.72000000 -4.86800000 22.08900000
H 11.03100000 -2.78000000 21.18400000
H 15.02070000 -4.47983800 22.89602000
H 15.30987000 -4.05657500 21.16746000
H 10.51184000 -4.25334700 22.03381000
H 10.98391000 -2.76218200 22.95995000
--
0 1
C 11.85300000 1.81500000 22.39000000
C 13.73600000 0.17000000 25.31900000
C 13.40300000 1.16000000 24.21100000
O 14.09000000 2.16100000 24.06300000
N 12.33900000 0.90900000 23.44600000
H 11.81400000 0.06000000 23.61600000
H 11.02500000 1.29900000 21.90200000
H 13.58800000 0.69400000 26.26400000
H 11.44898000 2.75557700 22.79260000
H 12.58831000 1.99522200 21.59198000
H 13.09472000 -0.72358300 25.30248000
H 14.78968000 -0.14379990 25.28301000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '172GLY-175TRP-1')] = qcdb.Molecule("""
0 1
C -28.21300000 14.47800000 -2.71000000
C -27.17200000 17.90900000 -3.95200000
C -27.38700000 15.73300000 -2.89500000
O -26.25800000 15.81800000 -2.40600000
N -27.94100000 16.72000000 -3.59600000
H -28.85200000 14.31300000 -3.57700000
H -28.84900000 14.61400000 -1.83400000
H -28.87700000 16.61500000 -3.97600000
H -27.76600000 18.53800000 -4.61600000
H -27.55033000 13.61155000 -2.56810700
H -26.93740000 18.49120000 -3.04866600
H -26.27170000 17.58541000 -4.49491100
--
0 1
C -22.98100000 14.60100000 -2.82700000
C -23.52700000 17.62200000 -5.09600000
C -23.03600000 16.25600000 -4.61900000
O -22.24200000 15.61000000 -5.30200000
N -23.48900000 15.81900000 -3.44600000
H -24.22100000 16.32600000 -2.96900000
H -21.89300000 14.64500000 -2.88500000
H -23.72300000 17.53900000 -6.16600000
H -23.21900000 14.58874000 -1.75312500
H -23.29337000 13.69209000 -3.36207800
H -24.45293000 17.94165000 -4.59551800
H -22.70930000 18.34757000 -4.97388100
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '197TYR-201LEU-2')] = qcdb.Molecule("""
0 1
C 19.26600000 20.16900000 9.99800000
C 21.03600000 22.26700000 7.30200000
C 19.78500000 20.62600000 8.61400000
O 19.57500000 19.93600000 7.60900000
N 20.48000000 21.76100000 8.57600000
H 18.96400000 19.12600000 9.91300000
H 20.59100000 22.31400000 9.42400000
H 20.22800000 22.44100000 6.59000000
H 20.06652000 20.24370000 10.74872000
H 18.39923000 20.77160000 10.30715000
H 21.60486000 23.19755000 7.44510600
H 21.69911000 21.51272000 6.85327900
--
0 1
C 19.44300000 18.91200000 3.81900000
C 20.93200000 16.82600000 6.68300000
C 20.31100000 17.16100000 5.33000000
O 20.07600000 16.23800000 4.52300000
N 20.05100000 18.46900000 5.08600000
H 20.17800000 19.14200000 5.83600000
H 18.71800000 18.17400000 3.47600000
H 21.53300000 15.92100000 6.56900000
H 18.95578000 19.89001000 3.94597700
H 20.21336000 18.98406000 3.03711000
H 21.57929000 17.64764000 7.02348000
H 20.16373000 16.64485000 7.44912300
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '199SER-202TYR-1')] = qcdb.Molecule("""
0 1
C 43.86400000 26.88100000 10.53600000
C 43.20800000 24.30000000 7.80900000
C 43.40000000 26.28500000 9.20200000
O 42.94600000 27.00200000 8.31400000
N 43.51200000 24.96600000 9.08500000
H 43.49500000 27.90700000 10.56100000
H 43.90400000 24.43600000 9.85400000
H 43.79600000 24.75100000 7.00300000
H 43.49007000 26.34465000 11.42059000
H 44.96194000 26.92526000 10.58669000
H 43.46626000 23.23348000 7.88538800
H 42.15043000 24.41349000 7.52852400
--
0 1
C 39.44100000 28.17900000 8.36500000
C 39.42900000 24.37700000 8.11400000
C 39.06400000 25.83200000 7.84100000
O 38.24000000 26.11900000 6.98200000
N 39.67800000 26.76100000 8.58100000
H 40.30700000 26.48000000 9.32800000
H 38.37700000 28.38800000 8.48700000
H 39.15600000 23.79600000 7.22900000
H 40.00118000 28.77311000 9.10204200
H 39.72832000 28.49440000 7.35111100
H 40.50351000 24.25223000 8.31362800
H 38.85022000 23.99139000 8.96624700
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '205THR-208GLU-1')] = qcdb.Molecule("""
0 1
C 27.52100000 12.50600000 26.48200000
C 29.67700000 10.31900000 28.70500000
C 28.05900000 11.83100000 27.72700000
O 27.63600000 12.13900000 28.84700000
N 29.02300000 10.93300000 27.56400000
H 26.99500000 13.40000000 26.81100000
H 29.34100000 10.70300000 26.62900000
H 28.94100000 9.74600000 29.27000000
H 28.34116000 12.82075000 25.81997000
H 26.77913000 11.87706000 25.96813000
H 30.48129000 9.63955500 28.38644000
H 30.06111000 11.10674000 29.36977000
--
0 1
C 28.58400000 15.47400000 30.65900000
C 31.59300000 13.31500000 29.94300000
C 30.60700000 14.23600000 30.64100000
O 30.85100000 14.63700000 31.78200000
N 29.50600000 14.58700000 29.99000000
H 29.33700000 14.29100000 29.03500000
H 29.17400000 16.23700000 31.17100000
H 32.12100000 12.77800000 30.73300000
H 28.00426000 16.04422000 29.91822000
H 28.00915000 14.98034000 31.45640000
H 31.10898000 12.57904000 29.28415000
H 32.32319000 13.91761000 29.38292000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '205THR-209LEU-2')] = qcdb.Molecule("""
0 1
C 27.52100000 12.50600000 26.48200000
C 29.67700000 10.31900000 28.70500000
C 28.05900000 11.83100000 27.72700000
O 27.63600000 12.13900000 28.84700000
N 29.02300000 10.93300000 27.56400000
H 26.99500000 13.40000000 26.81100000
H 29.34100000 10.70300000 26.62900000
H 28.94100000 9.74600000 29.27000000
H 28.34116000 12.82075000 25.81997000
H 26.77913000 11.87706000 25.96813000
H 30.48129000 9.63955500 28.38644000
H 30.06111000 11.10674000 29.36977000
--
0 1
C 26.86500000 12.72100000 32.66600000
C 28.58400000 15.47400000 30.65900000
C 27.79100000 14.79300000 31.75900000
O 27.31800000 15.47600000 32.67200000
N 27.66900000 13.46700000 31.70400000
H 28.08700000 12.97100000 30.92600000
H 26.13800000 13.39700000 33.11100000
H 29.17400000 16.23700000 31.17100000
H 26.30931000 11.90977000 32.17293000
H 27.42080000 12.32765000 33.52992000
H 29.28648000 14.79819000 30.14929000
H 28.00426000 16.04422000 29.91822000
units angstrom
""")
GEOS['%s-%s-dimer' % (dbse, '228ARG-232GLU-1')] = qcdb.Molecule("""
0 1
C 48.27300000 30.47900000 27.65500000
C 46.29600000 28.47400000 25.10100000
C 47.53200000 30.13600000 26.37000000
O 47.41000000 30.96700000 25.48200000
N 47.04600000 28.89700000 26.27700000
H 48.24600000 31.56700000 27.76600000
H 47.26200000 28.22400000 27.00400000
H 46.93800000 28.52200000 24.22000000
H 47.79481000 30.00687000 28.52588000
H 49.32228000 30.15311000 27.60200000
H 45.95848000 27.43677000 25.24327000
H 45.42364000 29.12286000 24.93374000
--
0 1
C 46.62400000 32.86600000 22.40200000
C 45.05200000 33.85700000 25.70700000
C 45.40700000 33.90800000 24.22600000
O 44.98000000 34.79300000 23.48100000
N 46.20200000 32.93500000 23.79700000
H 46.52900000 32.22200000 24.44300000
H 47.02700000 33.83900000 22.11300000
H 44.24900000 34.57300000 25.88800000
H 47.42942000 32.12782000 22.27396000
H 45.78943000 32.67093000 21.71248000
H 44.71165000 32.84806000 25.98306000
H 45.91216000 34.12701000 26.33726000
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
#########################################################################
| lgpl-3.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/dask/tests/test_utils.py | 1 | 6945 | import os
import pickle
import numpy as np
import pytest
from dask.compatibility import BZ2File, GzipFile, LZMAFile, LZMA_AVAILABLE
from dask.utils import (textblock, filetext, takes_multiple_arguments,
Dispatch, tmpfile, random_state_data, file_size,
infer_storage_options, eq_strict, memory_repr,
methodcaller, M, skip_doctest)
SKIP_XZ = pytest.mark.skipif(not LZMA_AVAILABLE, reason="no lzma library")
@pytest.mark.parametrize('myopen,compression',
[(open, None), (GzipFile, 'gzip'), (BZ2File, 'bz2'),
SKIP_XZ((LZMAFile, 'xz'))])
def test_textblock(myopen, compression):
text = b'123 456 789 abc def ghi'.replace(b' ', os.linesep.encode())
with filetext(text, open=myopen, mode='wb') as fn:
text = ''.join(textblock(fn, 1, 11, compression)).encode()
assert text == ('456 789 '.replace(' ', os.linesep)).encode()
assert set(map(len, text.split())) == set([3])
k = 3 + len(os.linesep)
assert ''.join(textblock(fn, 0, k, compression)).encode() == ('123' + os.linesep).encode()
assert ''.join(textblock(fn, k, k, compression)).encode() == b''
@pytest.mark.parametrize('myopen,compression',
[(open, None), (GzipFile, 'gzip'), (BZ2File, 'bz2'),
SKIP_XZ((LZMAFile, 'xz'))])
def test_filesize(myopen, compression):
text = b'123 456 789 abc def ghi'.replace(b' ', os.linesep.encode())
with filetext(text, open=myopen, mode='wb') as fn:
assert file_size(fn, compression) == len(text)
def test_textblock_multibyte_linesep():
text = b'12 34 56 78'.replace(b' ', b'\r\n')
with filetext(text, mode='wb') as fn:
text = [line.encode()
for line in textblock(fn, 5, 13, linesep='\r\n', buffersize=2)]
assert text == [line.encode() for line in ('56\r\n', '78')]
def test_takes_multiple_arguments():
assert takes_multiple_arguments(map)
assert not takes_multiple_arguments(sum)
def multi(a, b, c):
return a, b, c
class Singular(object):
def __init__(self, a):
pass
class Multi(object):
def __init__(self, a, b):
pass
assert takes_multiple_arguments(multi)
assert not takes_multiple_arguments(Singular)
assert takes_multiple_arguments(Multi)
def test_dispatch():
foo = Dispatch()
foo.register(int, lambda a: a + 1)
foo.register(float, lambda a: a - 1)
foo.register(tuple, lambda a: tuple(foo(i) for i in a))
foo.register(object, lambda a: a)
class Bar(object):
pass
b = Bar()
assert foo(1) == 2
assert foo(1.0) == 0.0
assert foo(b) == b
assert foo((1, 2.0, b)) == (2, 1.0, b)
def test_gh606():
encoding = 'utf-16-le'
euro = u'\u20ac'
yen = u'\u00a5'
linesep = os.linesep
bin_euro = u'\u20ac'.encode(encoding)
bin_linesep = linesep.encode(encoding)
data = (euro * 10) + linesep + (yen * 10) + linesep + (euro * 10)
bin_data = data.encode(encoding)
with tmpfile() as fn:
with open(fn, 'wb') as f:
f.write(bin_data)
stop = len(bin_euro) * 10 + len(bin_linesep) + 1
res = ''.join(textblock(fn, 1, stop, encoding=encoding)).encode(encoding)
assert res == ((yen * 10) + linesep).encode(encoding)
stop = len(bin_euro) * 10 + len(bin_linesep) + 1
res = ''.join(textblock(fn, 0, stop, encoding=encoding)).encode(encoding)
assert res == ((euro * 10) + linesep + (yen * 10) + linesep).encode(encoding)
def test_random_state_data():
seed = 37
state = np.random.RandomState(seed)
n = 100000
# Use an integer
states = random_state_data(n, seed)
assert len(states) == n
# Use RandomState object
states2 = random_state_data(n, state)
for s1, s2 in zip(states, states2):
assert (s1 == s2).all()
# Consistent ordering
states = random_state_data(10, 1234)
states2 = random_state_data(20, 1234)[:10]
for s1, s2 in zip(states, states2):
assert (s1 == s2).all()
def test_infer_storage_options():
so = infer_storage_options('/mnt/datasets/test.csv')
assert so.pop('protocol') == 'file'
assert so.pop('path') == '/mnt/datasets/test.csv'
assert not so
assert infer_storage_options('./test.csv')['path'] == './test.csv'
assert infer_storage_options('../test.csv')['path'] == '../test.csv'
so = infer_storage_options('C:\\test.csv')
assert so.pop('protocol') == 'file'
assert so.pop('path') == 'C:\\test.csv'
assert not so
assert infer_storage_options('d:\\test.csv')['path'] == 'd:\\test.csv'
assert infer_storage_options('\\test.csv')['path'] == '\\test.csv'
assert infer_storage_options('.\\test.csv')['path'] == '.\\test.csv'
assert infer_storage_options('test.csv')['path'] == 'test.csv'
so = infer_storage_options(
'hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm',
inherit_storage_options={'extra': 'value'})
assert so.pop('protocol') == 'hdfs'
assert so.pop('username') == 'username'
assert so.pop('password') == 'pwd'
assert so.pop('host') == 'Node'
assert so.pop('port') == 123
assert so.pop('path') == '/mnt/datasets/test.csv'
assert so.pop('url_query') == 'q=1'
assert so.pop('url_fragment') == 'fragm'
assert so.pop('extra') == 'value'
assert not so
so = infer_storage_options('hdfs://User-name@Node-name.com/mnt/datasets/test.csv')
assert so.pop('username') == 'User-name'
assert so.pop('host') == 'Node-name.com'
assert infer_storage_options('s3://Bucket-name.com/test.csv')['host'] == 'Bucket-name.com'
assert infer_storage_options('http://127.0.0.1:8080/test.csv')['host'] == '127.0.0.1'
with pytest.raises(KeyError):
infer_storage_options('file:///bucket/file.csv', {'path': 'collide'})
with pytest.raises(KeyError):
infer_storage_options('hdfs:///bucket/file.csv', {'protocol': 'collide'})
def test_infer_storage_options_c():
so = infer_storage_options(r'c:\foo\bar')
assert so['protocol'] == 'file'
def test_eq_strict():
assert eq_strict('a', 'a')
assert not eq_strict(b'a', u'a')
def test_memory_repr():
for power, mem_repr in enumerate(['1.0 bytes', '1.0 KB', '1.0 MB', '1.0 GB']):
assert memory_repr(1024 ** power) == mem_repr
def test_method_caller():
a = [1, 2, 3, 3, 3]
f = methodcaller('count')
assert f(a, 3) == a.count(3)
assert methodcaller('count') is f
assert M.count is f
assert pickle.loads(pickle.dumps(f)) is f
assert 'count' in dir(M)
def test_skip_doctest():
example = """>>> xxx
>>>
>>> # comment
>>> xxx"""
res = skip_doctest(example)
assert res == """>>> xxx # doctest: +SKIP
>>>
>>> # comment
>>> xxx # doctest: +SKIP"""
assert skip_doctest(None) == ''
| mit |
eleonrk/SickRage | lib/pgi/static.py | 18 | 5616 | # Copyright 2012,2014 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
import ctypes
from .gtype import PGType as GType
from .enum import GEnumBase as GEnum, EnumBase as Enum
from .enum import GFlagsBase as GFlags, FlagsBase as Flags
from .gerror import PGError as GError
from .obj import InterfaceBase as GInterface
from .properties import list_properties
from . import version_info as pygobject_version
def _init_glib(glib_module):
global OptionContext, OptionGroup, spawn_async
OptionContext = glib_module.OptionContext
OptionGroup = glib_module.OptionGroup
spawn_async = glib_module.spawn_async
GType, GEnum, GFlags, GError, GInterface, list_properties, pygobject_version
Flags, Enum
GBoxed = None
GObject = None
GObjectWeakRef = None
GParamSpec = None
GPointer = None
Warning = None
TYPE_INVALID = None
OptionContext = None
OptionGroup = None
spawn_async = None
features = {'generic-c-marshaller': True}
def _gvalue_set(self, boxed):
# XXX
return type(self).__mro__[1].set_boxed(self, boxed)
def _gvalue_get(self):
# XXX
return type(self).__mro__[1].get_boxed(self)
def type_register(class_):
"""
:param class_: a Python class that is a descendant of :obj:`GObject.Object`
The GObject.type_register() function registers the specified Python class
as a GObject type. `class_` must be a descendant of GObject.Object.
The function generates a name for the new type.
"""
raise NotImplementedError
def new(gtype_or_similar, **kwargs):
"""
:param type: a Python GObject type
:type type: :obj:`GObject.Object`
:param kwargs: set of property-value pairs
:returns: a new object of the specified `type`
The Gobject.new() function returns a new object of the specified type.
`type` must specify a type that is a descendant of gobject.GObject.
GObject properties can set via keyword arguments.
"""
return GType(gtype_or_similar).pytype(**kwargs)
def _min_value(ctypes_type):
signed = ctypes_type(-1).value == -1
if signed:
return - 2 ** (ctypes.sizeof(ctypes_type) * 8 - 1)
return 0
def _max_value(ctypes_type):
signed = ctypes_type(-1).value == -1
return 2 ** (ctypes.sizeof(ctypes_type) * 8 - signed) - 1
G_MINDOUBLE = 2.2250738585072014e-308
G_MAXDOUBLE = 1.7976931348623157e+308
G_MINFLOAT = 1.1754943508222875e-38
G_MAXFLOAT = 3.4028234663852886e+38
G_MINSHORT = _min_value(ctypes.c_short)
G_MAXSHORT = _max_value(ctypes.c_short)
G_MAXUSHORT = _max_value(ctypes.c_ushort)
G_MININT = _min_value(ctypes.c_int)
G_MAXINT = _max_value(ctypes.c_int)
G_MAXUINT = _max_value(ctypes.c_uint)
G_MINLONG = _min_value(ctypes.c_long)
G_MAXLONG = _max_value(ctypes.c_long)
G_MAXULONG = _max_value(ctypes.c_ulong)
G_MAXSIZE = _max_value(ctypes.c_size_t)
G_MINSSIZE = _min_value(ctypes.c_ssize_t)
G_MAXSSIZE = _max_value(ctypes.c_ssize_t)
G_MINOFFSET = _min_value(ctypes.c_int64)
G_MAXOFFSET = _max_value(ctypes.c_int64)
class Pid(object):
def __init__(*args, **kwargs):
raise NotImplementedError
def add_emission_hook(type, name, callback, *user_data):
"""
:param type: a Python GObject instance or type
:type type: :obj:`GObject.Object`
:param name: a signal name
:type name: :obj:`str`
:param callback: a function
:param user_data: zero or more extra arguments that will be passed to callback
The add_emission_hook() function adds an emission hook for the signal
specified by name, which will get called for any emission of that signal,
independent of the instance. This is possible only for signals which don't
have the :obj:`GObject.SignalFlags.NO_HOOKS` flag set.
"""
raise NotImplementedError
def signal_new(signal_name, type, flags, return_type, param_types):
"""
:param signal_name: the name of the signal
:type signal_name: :obj:`str`
:param type: a Python GObject instance or type that the signal is associated with
:type type: :obj:`GObject.Object`
:param flags: the signal flags
:type flags: :obj:`GObject.SignalFlags`
:param return_type: the return type of the signal handler
:type return_type: :obj:`type`
:param param_types: the parameter types passed to the signal handler
:type param_types: [:obj:`type`]
:returns: a unique integer signal ID
:rtype: :obj:`int`
The :obj:`GObject.signal_new`\() function registers a signal with the
specified `signal_name` for the specified object `type`.
`return_type` is the type of the return value from a signal handler and may
be a gobject type, type ID or instance. The `param_types` parameter is a
list of additional types that are passed to the signal handler. Each
parameter type may be specified as a gobject type, type ID or instance.
For example, to add a signal to the :obj:`Gtk.Window` type called "my-signal"
that calls a handler with a :obj:`Gtk.Button` widget and an integer value and a
return value that is a boolean, use::
GObject.signal_new("my_signal", Gtk.Window, GObject.SignalFlags.RUN_LAST, GObject.TYPE_BOOLEAN, (Gtk.Button, GObject.TYPE_INT))
"""
raise NotImplementedError
def source_new(*args, **kwargs):
raise NotImplementedError
def source_set_callback(*args, **kwargs):
raise NotImplementedError
def io_channel_read(*args, **kwargs):
raise NotImplementedError
| gpl-3.0 |
cocoloco69/pynet | week4/w4e6.py | 1 | 1425 | #!/usr/bin/env python
import sys
from netmiko import ConnectHandler
import re
import time
from getpass import getpass
def send_command(rconn,cmd,my_prompt):
rconn.sendline(cmd)
rconn.expect(my_prompt)
return rconn.before
def main():
password = getpass()
py_router_1 = {
'device_type':'cisco_ios',
'ip':'50.76.53.27',
'username':'pyclass',
'password':password,
}
py_router_2 = {
'device_type':'cisco_ios',
'ip':'50.76.53.27',
'username':'pyclass',
'password':password,
'port':8022,
}
py_srx = {
'device_type':'juniper',
'ip':'50.76.53.27',
'username':'pyclass',
'password':password,
'secret':'',
'port':9822,
}
''' Init connections to all devices '''
rtr1 = ConnectHandler(**py_router_1)
rtr2 = ConnectHandler(**py_router_2)
srx = ConnectHandler(**py_srx)
'''rtr1 show arp'''
output = rtr1.send_command("show arp")
print "RTR1 Output.\n %s" % output
'''rtr2 show arp'''
output = rtr2.send_command("show arp")
print "RTR2 Output.\n %s" % output
'''srx show arp'''
output =srx.send_command("show arp")
print "SRX Output.\n %s" % output
if __name__ == "__main__":
main()
| apache-2.0 |
h4ck3rm1k3/ansible | test/units/TestVaultEditor.py | 118 | 5729 | #!/usr/bin/env python
from unittest import TestCase
import getpass
import os
import shutil
import time
import tempfile
from binascii import unhexlify
from binascii import hexlify
from nose.plugins.skip import SkipTest
from ansible import errors
from ansible.utils.vault import VaultLib
from ansible.utils.vault import VaultEditor
# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Util import Counter
HAS_COUNTER = True
except ImportError:
HAS_COUNTER = False
# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
try:
from Crypto.Protocol.KDF import PBKDF2
HAS_PBKDF2 = True
except ImportError:
HAS_PBKDF2 = False
# AES IMPORTS
try:
from Crypto.Cipher import AES as AES
HAS_AES = True
except ImportError:
HAS_AES = False
class TestVaultEditor(TestCase):
def _is_fips(self):
try:
data = open('/proc/sys/crypto/fips_enabled').read().strip()
except:
return False
if data != '1':
return False
return True
def test_methods_exist(self):
v = VaultEditor(None, None, None)
slots = ['create_file',
'decrypt_file',
'edit_file',
'encrypt_file',
'rekey_file',
'read_data',
'write_data',
'shuffle_files']
for slot in slots:
assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
def test_decrypt_1_0(self):
if self._is_fips():
raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.0 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
def test_decrypt_1_1_newline(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible\nansible\n", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.1 file with newline in password"
#assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
def test_decrypt_1_1(self):
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.1.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.decrypt_file()
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error decrypting 1.1 file"
assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
def test_rekey_migration(self):
if self._is_fips():
raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
raise SkipTest
dirpath = tempfile.mkdtemp()
filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
shutil.rmtree(dirpath)
shutil.copytree("vault_test_data", dirpath)
ve = VaultEditor(None, "ansible", filename)
# make sure the password functions for the cipher
error_hit = False
try:
ve.rekey_file('ansible2')
except errors.AnsibleError, e:
error_hit = True
# verify decrypted content
f = open(filename, "rb")
fdata = f.read()
f.close()
shutil.rmtree(dirpath)
assert error_hit == False, "error rekeying 1.0 file to 1.1"
# ensure filedata can be decrypted, is 1.1 and is AES256
vl = VaultLib("ansible2")
dec_data = None
error_hit = False
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError, e:
error_hit = True
assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
assert error_hit == False, "error decrypting migrated 1.0 file"
assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
| gpl-3.0 |
gnuhub/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geoapp/test_feeds.py | 78 | 3789 | from xml.dom import minidom
from django.test import TestCase
from models import City
class GeoFeedTest(TestCase):
urls = 'django.contrib.gis.tests.geoapp.urls'
def assertChildNodes(self, elem, expected):
"Taken from regressiontests/syndication/tests.py."
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def test_geofeed_rss(self):
"Tests geographic feeds using GeoRSS over RSSv2."
# Uses `GEOSGeometry` in `item_geometry`
doc1 = minidom.parseString(self.client.get('/feeds/rss1/').content)
# Uses a 2-tuple in `item_geometry`
doc2 = minidom.parseString(self.client.get('/feeds/rss2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2.getElementsByTagName('channel')[0],
['title', 'link', 'description', 'language',
'lastBuildDate', 'item', 'georss:box', 'atom:link']
)
# Incrementing through the feeds.
for feed in [feed1, feed2]:
# Ensuring the georss namespace was added to the <rss> element.
self.assertEqual(feed.getAttribute(u'xmlns:georss'), u'http://www.georss.org/georss')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the georss element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'georss:point'])
def test_geofeed_atom(self):
"Testing geographic feeds using GeoRSS over Atom."
doc1 = minidom.parseString(self.client.get('/feeds/atom1/').content)
doc2 = minidom.parseString(self.client.get('/feeds/atom2/').content)
feed1, feed2 = doc1.firstChild, doc2.firstChild
# Making sure the box got added to the second GeoRSS feed.
self.assertChildNodes(feed2, ['title', 'link', 'id', 'updated', 'entry', 'georss:box'])
for feed in [feed1, feed2]:
# Ensuring the georsss namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute(u'xmlns:georss'), u'http://www.georss.org/georss')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), City.objects.count())
# Ensuring the georss element was added to each entry in the feed.
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'georss:point'])
def test_geofeed_w3c(self):
"Testing geographic feeds using W3C Geo."
doc = minidom.parseString(self.client.get('/feeds/w3cgeo1/').content)
feed = doc.firstChild
# Ensuring the geo namespace was added to the <feed> element.
self.assertEqual(feed.getAttribute(u'xmlns:geo'), u'http://www.w3.org/2003/01/geo/wgs84_pos#')
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), City.objects.count())
# Ensuring the geo:lat and geo:lon element was added to each item in the feed.
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'geo:lat', 'geo:lon'])
# Boxes and Polygons aren't allowed in W3C Geo feeds.
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo2/') # Box in <channel>
self.assertRaises(ValueError, self.client.get, '/feeds/w3cgeo3/') # Polygons in <entry>
| apache-2.0 |
zxsted/scipy | scipy/linalg/tests/test_solvers.py | 95 | 9591 | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve_sylvester, solve_lyapunov, \
solve_discrete_lyapunov, solve_continuous_are, solve_discrete_are
class TestSolveLyapunov(TestCase):
cases = [
(np.array([[1, 2], [3, 4]]),
np.array([[9, 10], [11, 12]])),
# a, q all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a real; q complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; q real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# An example from Kitagawa, 1977
(np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
[1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
[0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
# Companion matrix example. a complex; q real; a.shape[0] = 11
(np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
0.010+0.j],
[1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
0.000+0.j],
[0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
0.000+0.j]]),
np.eye(11)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T)),
# https://github.com/scipy/scipy/issues/4176
(np.matrix([[0, 1], [-1/2, -1]]),
(np.array(np.matrix([0, 3]).T * np.matrix([0, 3]).T.T))),
]
def check_continuous_case(self, a, q):
x = solve_lyapunov(a, q)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
def check_discrete_case(self, a, q, method=None):
x = solve_discrete_lyapunov(a, q, method=method)
assert_array_almost_equal(np.dot(np.dot(a, x),a.conj().transpose()) - x, -1.0*q)
def test_cases(self):
for case in self.cases:
self.check_continuous_case(case[0], case[1])
self.check_discrete_case(case[0], case[1])
self.check_discrete_case(case[0], case[1], method='direct')
self.check_discrete_case(case[0], case[1], method='bilinear')
class TestSolveContinuousARE(TestCase):
cases = [
# An example from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[0, 1], [0, 0]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Difficult from a numerical standpoint, again from Laub, A. J.
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Complex a; real b, q, r
(np.matrix([[0, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[0, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if (A'X + XA - XBR^-1B'X+Q=0) is true"""
x = solve_continuous_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x + x*a - x*b*inv(r)*b.getH()*x + q, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveDiscreteARE(TestCase):
cases = [
# Difficult from a numerical standpoint, again from Laub, A. J.
# (http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf)
(np.matrix([[4, 3], [-9.0/2.0, -7.0/2.0]]),
np.matrix([[1,], [-1,]]),
np.matrix([[9, 6], [6, 4]]),
np.matrix([[1,],])),
# Another example from Laub
(np.matrix([[0.9512, 0], [0, 0.9048]]),
np.matrix([[4.877, 4.877], [-1.1895, 3.569]]),
np.matrix([[0.005, 0],[0, 0.02]]),
np.matrix([[1.0/3.0, 0],[0, 3]])),
# Complex a; real b, q, r
(np.matrix([[2, 1-2j], [0, -3j]]),
np.matrix([[0,], [1,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, q, r; complex b
(np.matrix([[2, 1], [0, -1]]),
np.matrix([[-2j,], [1j,]]),
np.matrix([[1, 0], [0, 2]]),
np.matrix([[1,],])),
# Real a, b; complex q, r
(np.matrix([[3, 1], [0, -1]]),
np.matrix([[1, 2], [1, 3]]),
np.matrix([[1, -3j], [1-1j, 2]]),
np.matrix([[-2j, 2], [1j, 3]])),
]
def check_case(self, a, b, q, r):
"""Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
x = solve_discrete_are(a, b, q, r)
assert_array_almost_equal(
a.getH()*x*a-(a.getH()*x*b)*inv(r+b.getH()*x*b)*(b.getH()*x*a)+q-x, 0.0)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2], case[3])
class TestSolveSylvester(TestCase):
cases = [
# a, b, c all real.
(np.array([[1, 2], [0, 4]]),
np.array([[5, 6], [0, 8]]),
np.array([[9, 10], [11, 12]])),
# a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
# quasi-triangular form.
(np.array([[1.0, 0, 0, 0], [0, 1.0, 2.0, 0.0], [0, 0, 3.0, -4], [0, 0, 2, 5]]),
np.array([[2.0, 0, 0,1.0], [0, 1.0, 0.0, 0.0], [0, 0, 1.0, -1], [0, 0, 1, 1]]),
np.array([[1.0, 0, 0, 0], [0, 1.0, 0, 0], [0, 0, 1.0, 0], [0, 0, 0, 1.0]])),
# a, b, c all complex.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 2j], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and b real; c complex.
(np.array([[1.0, 2.0], [3.0, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a and c complex; b real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0-2j, 2.0+2j],[-1.0-1j, 2.0]])),
# a complex; b and c real.
(np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
np.array([[-1.0, 0], [3.0, 4.0]]),
np.array([[2.0, 2.0],[-1.0, 2.0]])),
# not square matrices, real
(np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5]]),
np.array([[1, 2], [3, 4], [5, 6]])),
# not square matrices, complex
(np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
np.array([[2, 3], [4, 5-1j]]),
np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
]
def check_case(self, a, b, c):
x = solve_sylvester(a, b, c)
assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
def test_cases(self):
for case in self.cases:
self.check_case(case[0], case[1], case[2])
def test_trivial(self):
a = np.array([[1.0, 0.0], [0.0, 1.0]])
b = np.array([[1.0]])
c = np.array([2.0, 2.0]).reshape(-1,1)
x = solve_sylvester(a, b, c)
assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1,1))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
wildermason/external_skia | gm/rebaseline_server/imagepairset_test.py | 65 | 5740 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test imagepairset.py
"""
# System-level imports
import unittest
# Local imports
import column
import imagepair
import imagepairset
BASE_URL_1 = 'http://base/url/1'
BASE_URL_2 = 'http://base/url/2'
DIFF_BASE_URL = 'http://diff/base/url'
IMAGEPAIR_1_AS_DICT = {
imagepair.KEY__IMAGEPAIRS__EXTRACOLUMNS: {
'builder': 'MyBuilder',
'test': 'test1',
},
imagepair.KEY__IMAGEPAIRS__IMAGE_A_URL: 'test1/1111.png',
imagepair.KEY__IMAGEPAIRS__IMAGE_B_URL: 'test1/1111.png',
imagepair.KEY__IMAGEPAIRS__IS_DIFFERENT: False,
}
IMAGEPAIR_2_AS_DICT = {
imagepair.KEY__IMAGEPAIRS__DIFFERENCES: {
'maxDiffPerChannel': [1, 2, 3],
'numDifferingPixels': 111,
'percentDifferingPixels': 22.222,
},
imagepair.KEY__IMAGEPAIRS__EXTRACOLUMNS: {
'builder': 'MyBuilder',
'test': 'test2',
},
imagepair.KEY__IMAGEPAIRS__IMAGE_A_URL: 'test2/2222.png',
imagepair.KEY__IMAGEPAIRS__IMAGE_B_URL: 'test2/22223.png',
imagepair.KEY__IMAGEPAIRS__IS_DIFFERENT: True,
}
IMAGEPAIR_3_AS_DICT = {
imagepair.KEY__IMAGEPAIRS__DIFFERENCES: {
'maxDiffPerChannel': [4, 5, 6],
'numDifferingPixels': 111,
'percentDifferingPixels': 44.444,
},
imagepair.KEY__IMAGEPAIRS__EXPECTATIONS: {
'bugs': [1001, 1002],
'ignoreFailure': True,
},
imagepair.KEY__IMAGEPAIRS__EXTRACOLUMNS: {
'builder': 'MyBuilder',
'test': 'test3',
},
imagepair.KEY__IMAGEPAIRS__IMAGE_A_URL: 'test3/3333.png',
imagepair.KEY__IMAGEPAIRS__IMAGE_B_URL: 'test3/33334.png',
imagepair.KEY__IMAGEPAIRS__IS_DIFFERENT: True,
}
SET_A_DESCRIPTION = 'expectations'
SET_B_DESCRIPTION = 'actuals'
class ImagePairSetTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None # do not truncate diffs when tests fail
def shortDescription(self):
"""Tells unittest framework to not print docstrings for test cases."""
return None
def test_success(self):
"""Assembles some ImagePairs into an ImagePairSet, and validates results.
"""
image_pairs = [
MockImagePair(base_url=BASE_URL_1, dict_to_return=IMAGEPAIR_1_AS_DICT),
MockImagePair(base_url=BASE_URL_1, dict_to_return=IMAGEPAIR_2_AS_DICT),
MockImagePair(base_url=BASE_URL_1, dict_to_return=IMAGEPAIR_3_AS_DICT),
]
expected_imageset_dict = {
'extraColumnHeaders': {
'builder': {
'headerText': 'builder',
'isFilterable': True,
'isSortable': True,
'valuesAndCounts': [('MyBuilder', 3)],
},
'test': {
'headerText': 'which GM test',
'headerUrl': 'http://learn/about/gm/tests',
'isFilterable': True,
'isSortable': False,
},
},
'imagePairs': [
IMAGEPAIR_1_AS_DICT,
IMAGEPAIR_2_AS_DICT,
IMAGEPAIR_3_AS_DICT,
],
'imageSets': {
'imageA': {
'baseUrl': BASE_URL_1,
'description': SET_A_DESCRIPTION,
},
'imageB': {
'baseUrl': BASE_URL_1,
'description': SET_B_DESCRIPTION,
},
'diffs': {
'baseUrl': DIFF_BASE_URL + '/diffs',
'description': 'color difference per channel',
},
'whiteDiffs': {
'baseUrl': DIFF_BASE_URL + '/whitediffs',
'description': 'differing pixels in white',
},
},
}
image_pair_set = imagepairset.ImagePairSet(
descriptions=(SET_A_DESCRIPTION, SET_B_DESCRIPTION),
diff_base_url=DIFF_BASE_URL)
for image_pair in image_pairs:
image_pair_set.add_image_pair(image_pair)
# The 'builder' column header uses the default settings,
# but the 'test' column header has manual adjustments.
image_pair_set.set_column_header_factory(
'test',
column.ColumnHeaderFactory(
header_text='which GM test',
header_url='http://learn/about/gm/tests',
is_filterable=True,
is_sortable=False,
include_values_and_counts=False))
self.assertEqual(image_pair_set.as_dict(), expected_imageset_dict)
def test_mismatched_base_url(self):
"""Confirms that mismatched base_urls will cause an exception."""
image_pair_set = imagepairset.ImagePairSet(
diff_base_url=DIFF_BASE_URL)
image_pair_set.add_image_pair(
MockImagePair(base_url=BASE_URL_1, dict_to_return=IMAGEPAIR_1_AS_DICT))
image_pair_set.add_image_pair(
MockImagePair(base_url=BASE_URL_1, dict_to_return=IMAGEPAIR_2_AS_DICT))
with self.assertRaises(Exception):
image_pair_set.add_image_pair(
MockImagePair(base_url=BASE_URL_2,
dict_to_return=IMAGEPAIR_3_AS_DICT))
class MockImagePair(object):
"""Mock ImagePair object, which will return canned results."""
def __init__(self, base_url, dict_to_return):
"""
Args:
base_url: base_url attribute for this object
dict_to_return: dictionary to return from as_dict()
"""
self.base_url = base_url
self.extra_columns_dict = dict_to_return.get(
imagepair.KEY__IMAGEPAIRS__EXTRACOLUMNS, None)
self._dict_to_return = dict_to_return
def as_dict(self):
return self._dict_to_return
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(ImagePairSetTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
| bsd-3-clause |
epiphany27/NewsBlur | apps/rss_feeds/migrations/0029_premium_null.py | 18 | 10205 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Feed.premium_subscribers'
db.alter_column('feeds', 'premium_subscribers', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# Changing field 'Feed.premium_subscribers'
db.alter_column('feeds', 'premium_subscribers', self.gf('django.db.models.fields.IntegerField')())
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'null': 'True'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedfetchhistory': {
'Meta': {'object_name': 'FeedFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.pagefetchhistory': {
'Meta': {'object_name': 'PageFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'ordering': "['-story_date']", 'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'Meta': {'object_name': 'Tag'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
| mit |
gsmaxwell/phase_offset_rx | gnuradio-core/src/python/gnuradio/blks2impl/wfm_rcv_pll.py | 17 | 9788 | #
# Copyright 2005,2006 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio.blks2impl.fm_emph import fm_deemph
import math
class wfm_rcv_pll(gr.hier_block2):
def __init__ (self, demod_rate, audio_decimation):
"""
Hierarchical block for demodulating a broadcast FM signal.
The input is the downconverted complex baseband signal (gr_complex).
The output is two streams of the demodulated audio (float) 0=Left, 1=Right.
@param demod_rate: input sample rate of complex baseband input.
@type demod_rate: float
@param audio_decimation: how much to decimate demod_rate to get to audio.
@type audio_decimation: integer
"""
gr.hier_block2.__init__(self, "wfm_rcv_pll",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(2, 2, gr.sizeof_float)) # Output signature
bandwidth = 250e3
audio_rate = demod_rate / audio_decimation
# We assign to self so that outsiders can grab the demodulator
# if they need to. E.g., to plot its output.
#
# input: complex; output: float
loop_bw = 2*math.pi/100.0
max_freq = 2.0*math.pi*90e3/demod_rate
self.fm_demod = gr.pll_freqdet_cf (loop_bw, max_freq,-max_freq)
# input: float; output: float
self.deemph_Left = fm_deemph (audio_rate)
self.deemph_Right = fm_deemph (audio_rate)
# compute FIR filter taps for audio filter
width_of_transition_band = audio_rate / 32
audio_coeffs = gr.firdes.low_pass (1.0 , # gain
demod_rate, # sampling rate
15000 ,
width_of_transition_band,
gr.firdes.WIN_HAMMING)
# input: float; output: float
self.audio_filter = gr.fir_filter_fff (audio_decimation, audio_coeffs)
if 1:
# Pick off the stereo carrier/2 with this filter. It attenuated 10 dB so apply 10 dB gain
# We pick off the negative frequency half because we want to base band by it!
## NOTE THIS WAS HACKED TO OFFSET INSERTION LOSS DUE TO DEEMPHASIS
stereo_carrier_filter_coeffs = gr.firdes.complex_band_pass(10.0,
demod_rate,
-19020,
-18980,
width_of_transition_band,
gr.firdes.WIN_HAMMING)
#print "len stereo carrier filter = ",len(stereo_carrier_filter_coeffs)
#print "stereo carrier filter ", stereo_carrier_filter_coeffs
#print "width of transition band = ",width_of_transition_band, " audio rate = ", audio_rate
# Pick off the double side band suppressed carrier Left-Right audio. It is attenuated 10 dB so apply 10 dB gain
stereo_dsbsc_filter_coeffs = gr.firdes.complex_band_pass(20.0,
demod_rate,
38000-15000/2,
38000+15000/2,
width_of_transition_band,
gr.firdes.WIN_HAMMING)
#print "len stereo dsbsc filter = ",len(stereo_dsbsc_filter_coeffs)
#print "stereo dsbsc filter ", stereo_dsbsc_filter_coeffs
# construct overlap add filter system from coefficients for stereo carrier
self.stereo_carrier_filter = gr.fir_filter_fcc(audio_decimation, stereo_carrier_filter_coeffs)
# carrier is twice the picked off carrier so arrange to do a commplex multiply
self.stereo_carrier_generator = gr.multiply_cc();
# Pick off the rds signal
stereo_rds_filter_coeffs = gr.firdes.complex_band_pass(30.0,
demod_rate,
57000 - 1500,
57000 + 1500,
width_of_transition_band,
gr.firdes.WIN_HAMMING)
#print "len stereo dsbsc filter = ",len(stereo_dsbsc_filter_coeffs)
#print "stereo dsbsc filter ", stereo_dsbsc_filter_coeffs
# construct overlap add filter system from coefficients for stereo carrier
self.rds_signal_filter = gr.fir_filter_fcc(audio_decimation, stereo_rds_filter_coeffs)
self.rds_carrier_generator = gr.multiply_cc();
self.rds_signal_generator = gr.multiply_cc();
self_rds_signal_processor = gr.null_sink(gr.sizeof_gr_complex);
loop_bw = 2*math.pi/100.0
max_freq = -2.0*math.pi*18990/audio_rate;
min_freq = -2.0*math.pi*19010/audio_rate;
self.stereo_carrier_pll_recovery = gr.pll_refout_cc(loop_bw, max_freq, min_freq);
#self.stereo_carrier_pll_recovery.squelch_enable(False) #pll_refout does not have squelch yet, so disabled for now
# set up mixer (multiplier) to get the L-R signal at baseband
self.stereo_basebander = gr.multiply_cc();
# pick off the real component of the basebanded L-R signal. The imaginary SHOULD be zero
self.LmR_real = gr.complex_to_real();
self.Make_Left = gr.add_ff();
self.Make_Right = gr.sub_ff();
self.stereo_dsbsc_filter = gr.fir_filter_fcc(audio_decimation, stereo_dsbsc_filter_coeffs)
if 1:
# send the real signal to complex filter to pick off the carrier and then to one side of a multiplier
self.connect (self, self.fm_demod,self.stereo_carrier_filter,self.stereo_carrier_pll_recovery, (self.stereo_carrier_generator,0))
# send the already filtered carrier to the otherside of the carrier
self.connect (self.stereo_carrier_pll_recovery, (self.stereo_carrier_generator,1))
# the resulting signal from this multiplier is the carrier with correct phase but at -38000 Hz.
# send the new carrier to one side of the mixer (multiplier)
self.connect (self.stereo_carrier_generator, (self.stereo_basebander,0))
# send the demphasized audio to the DSBSC pick off filter, the complex
# DSBSC signal at +38000 Hz is sent to the other side of the mixer/multiplier
self.connect (self.fm_demod,self.stereo_dsbsc_filter, (self.stereo_basebander,1))
# the result is BASEBANDED DSBSC with phase zero!
# Pick off the real part since the imaginary is theoretically zero and then to one side of a summer
self.connect (self.stereo_basebander, self.LmR_real, (self.Make_Left,0))
#take the same real part of the DSBSC baseband signal and send it to negative side of a subtracter
self.connect (self.LmR_real,(self.Make_Right,1))
# Make rds carrier by taking the squared pilot tone and multiplying by pilot tone
self.connect (self.stereo_basebander,(self.rds_carrier_generator,0))
self.connect (self.stereo_carrier_pll_recovery,(self.rds_carrier_generator,1))
# take signal, filter off rds, send into mixer 0 channel
self.connect (self.fm_demod,self.rds_signal_filter,(self.rds_signal_generator,0))
# take rds_carrier_generator output and send into mixer 1 channel
self.connect (self.rds_carrier_generator,(self.rds_signal_generator,1))
# send basebanded rds signal and send into "processor" which for now is a null sink
self.connect (self.rds_signal_generator,self_rds_signal_processor)
if 1:
# pick off the audio, L+R that is what we used to have and send it to the summer
self.connect(self.fm_demod, self.audio_filter, (self.Make_Left, 1))
# take the picked off L+R audio and send it to the PLUS side of the subtractor
self.connect(self.audio_filter,(self.Make_Right, 0))
# The result of Make_Left gets (L+R) + (L-R) and results in 2*L
# The result of Make_Right gets (L+R) - (L-R) and results in 2*R
self.connect(self.Make_Left , self.deemph_Left, (self, 0))
self.connect(self.Make_Right, self.deemph_Right, (self, 1))
# NOTE: mono support will require variable number of outputs in hier_block2s
# See ticket:174 in Trac database
#else:
# self.connect (self.fm_demod, self.audio_filter, self)
| gpl-3.0 |
Onager/plaso | tests/parsers/winreg_plugins/test_lib.py | 1 | 4466 | # -*- coding: utf-8 -*-
"""Windows Registry plugin related functions and classes for testing."""
from dfwinreg import fake as dfwinreg_fake
from dfwinreg import registry as dfwinreg_registry
from plaso.containers import sessions
from plaso.parsers import winreg_parser
from plaso.storage.fake import writer as fake_writer
from tests.parsers import test_lib
class RegistryPluginTestCase(test_lib.ParserTestCase):
"""The unit test case for a Windows Registry plugin."""
# pylint: disable=protected-access
def _AssertFiltersOnKeyPath(self, plugin, key_path):
"""Asserts if the key path matches one of the plugin filters.
Args:
plugin (WindowsRegistryPlugin): Windows Registry plugin.
key_path (str): Windows Registry key path.
"""
_, _, key_name = key_path.rpartition('\\')
registry_key = dfwinreg_fake.FakeWinRegistryKey(key_name, key_path=key_path)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertTrue(result)
def _AssertNotFiltersOnKeyPath(self, plugin, key_path):
"""Asserts if the key path does not match one of the plugin filters.
Args:
plugin (WindowsRegistryPlugin): Windows Registry plugin.
key_path (str): Windows Registry key path.
"""
_, _, key_name = key_path.rpartition('\\')
registry_key = dfwinreg_fake.FakeWinRegistryKey(key_name, key_path=key_path)
result = self._CheckFiltersOnKeyPath(plugin, registry_key)
self.assertFalse(result)
def _CheckFiltersOnKeyPath(self, plugin, registry_key):
"""Checks if the key path matches one of the plugin filters.
Args:
plugin (WindowsRegistryPlugin): Windows Registry plugin.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
bool: True if the key path matches one of the plugin filters,
False otherwise.
"""
result = False
for path_filter in plugin.FILTERS:
if path_filter.Match(registry_key):
result = True
return result
def _GetWinRegistryFromFileEntry(self, file_entry):
"""Retrieves a Windows Registry from a file entry.
Args:
file_entry (dfvfs.FileEntry): file entry that references a test file.
Returns:
dfwinreg.WinRegistry: Windows Registry or None.
"""
file_object = file_entry.GetFileObject()
if not file_object:
return None
win_registry_reader = winreg_parser.FileObjectWinRegistryFileReader()
registry_file = win_registry_reader.Open(file_object)
if not registry_file:
file_object.close()
return None
win_registry = dfwinreg_registry.WinRegistry()
key_path_prefix = win_registry.GetRegistryFileMapping(registry_file)
win_registry.MapFile(key_path_prefix, registry_file)
return win_registry
def _ParseKeyWithPlugin(
self, registry_key, plugin, file_entry=None, knowledge_base_values=None,
parser_chain=None, timezone='UTC'):
"""Parses a key within a Windows Registry file using the plugin.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry Key.
plugin (WindowsRegistryPlugin): Windows Registry plugin.
file_entry (Optional[dfvfs.FileEntry]): file entry.
knowledge_base_values (Optional[dict[str, str]]): knowledge base values.
parser_chain (Optional[str]): parsing chain up to this point.
timezone (Optional[str]): timezone.
Returns:
FakeStorageWriter: storage writer.
"""
self.assertIsNotNone(registry_key)
session = sessions.Session()
storage_writer = fake_writer.FakeStorageWriter(session)
storage_writer.Open()
parser_mediator = self._CreateParserMediator(
storage_writer, file_entry=file_entry,
knowledge_base_values=knowledge_base_values, timezone=timezone)
# Most tests aren't explicitly checking for parser chain values,
# or setting them, so we'll just append the plugin name if no explicit
# parser chain argument is supplied.
if parser_chain is None:
# AppendToParserChain needs to be run after SetFileEntry.
parser_mediator.AppendToParserChain(plugin)
else:
# In the rare case that a test is checking for a particular chain, we
# provide a way set it directly. There's no public API for this,
# as access to the parser chain should be very infrequent.
parser_mediator._parser_chain_components = parser_chain.split('/')
plugin.Process(parser_mediator, registry_key)
return storage_writer
| apache-2.0 |
sgallagher/reviewboard | reviewboard/webapi/resources/review_group_user.py | 2 | 9019 | from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.utils import six
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from reviewboard.reviews.models import Group
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.user import UserResource
class ReviewGroupUserResource(UserResource):
"""Provides information on users that are members of a review group."""
name = 'review_group_user'
item_result_key = 'user'
list_result_key = 'users'
uri_name = 'users'
# We do not want the watched resource to be available under this resource
# as it will have the wrong URL and does not make sense as a sub-resource;
# we will be serializing a link to the user resource and it can be found
# from there.
item_child_resources = []
allowed_methods = ('GET', 'POST', 'DELETE')
policy_id = 'review_group_user'
def get_queryset(self, request, group_name, local_site_name=None,
*args, **kwargs):
group = Group.objects.get(name=group_name,
local_site__name=local_site_name)
return group.users.all()
def get_href_parent_ids(self, obj, **kwargs):
"""Return the href parent IDs for the object.
Args:
obj (django.contrib.auth.models.User):
The user.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The parent IDs to be used to determine the href of the resource.
"""
# Since we do not have a direct link to the model parent (the
# Group.users field is a many-to-many field so we cannot use it because
# the reverse relation is not unique), we have to manually generate the
# parent IDs from the parent resource.
parent_id_key = self._parent_resource.uri_object_key
return {
parent_id_key: kwargs[parent_id_key],
}
def get_related_links(self, obj=None, request=None, *args, **kwargs):
"""Return the related links for the resource.
Args:
obj (django.contrib.auth.models.User, optional):
The user for which links are being generated.
request (django.http.HttpRequest):
The current HTTP request.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
Returns:
dict:
The related links for the resource.
"""
links = super(ReviewGroupUserResource, self).get_related_links(
obj, request, *args, **kwargs)
# We only want the 'user' link when this is an item resource.
if self.uri_object_key in kwargs:
username = kwargs[self.uri_object_key]
links['user'] = {
'href': resources.user.get_item_url(username=username),
'method': 'GET',
}
return links
def get_serializer_for_object(self, obj):
"""Return the serializer for an object.
If the object is a :py:class:`~django.contrib.auth.models.User`
instance, we will serialize it (instead of the
:py:class:`~reviewboard.webapi.resources.user.UserResource` resource
so that the links will be correct. Otherwise, the POST and DELETE links
will be for the actual user instead of for this resource.
Args:
obj (django.db.models.base.Model):
The model being serialized.
Returns:
djblets.webapi.resources.base.WebAPIResource:
The resource that should be used to serialize the object.
"""
if isinstance(obj, User):
return self
return super(ReviewGroupUserResource, self).get_serializer_for_object(
obj)
def has_access_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_list_access_permissions(self, request, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_modify_permissions(self, request, group, username, local_site):
return (
resources.review_group.has_modify_permissions(request, group) or
(request.user.username == username and
group.is_accessible_by(request.user))
)
def has_delete_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(required={
'username': {
'type': six.text_type,
'description': 'The user to add to the group.',
'added_in': '1.6.14',
},
})
def create(self, request, username, *args, **kwargs):
"""Adds a user to a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, username,
local_site)):
return self.get_no_access_error(request)
try:
if local_site:
user = local_site.users.get(username=username)
else:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return INVALID_USER
group.users.add(user)
return 201, {
self.item_result_key: user,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Removes a user from a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
user = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, user.username,
local_site)):
return self.get_no_access_error(request)
group.users.remove(user)
return 204, {}
@webapi_check_local_site
@webapi_request_fields(optional={
'fullname': {
'type': bool,
'description': ''
},
'q': {
'type': six.text_type,
'description': 'Limit the results to usernames starting with the '
'provided value. This is case-insensitive.',
},
})
@augment_method_from(UserResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of users belonging to a specific review group.
This includes only the users who have active accounts on the site.
Any account that has been disabled (for inactivity, spam reasons,
or anything else) will be excluded from the list.
The list of users can be filtered down using the ``q`` and
``fullname`` parameters.
Setting ``q`` to a value will by default limit the results to
usernames starting with that value. This is a case-insensitive
comparison.
If ``fullname`` is set to ``1``, the first and last names will also be
checked along with the username. ``fullname`` is ignored if ``q``
is not set.
For example, accessing ``/api/users/?q=bo&fullname=1`` will list
any users with a username, first name or last name starting with
``bo``.
"""
pass
review_group_user_resource = ReviewGroupUserResource()
| mit |
KontorConsulting/odoo | addons/account/wizard/account_report_general_ledger.py | 267 | 3191 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape'] is False:
data['form'].pop('landscape')
else:
context['landscape'] = data['form']['landscape']
return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
soimort/you-get | src/you_get/extractors/zhihu.py | 2 | 3558 | #!/usr/bin/env python
__all__ = ['zhihu_download', 'zhihu_download_playlist']
from ..common import *
import json
def zhihu_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
paths = url.split("/")
# question or column
if len(paths) < 3 and len(paths) < 6:
raise TypeError("URL does not conform to specifications, Support column and question only."
"Example URL: https://zhuanlan.zhihu.com/p/51669862 or "
"https://www.zhihu.com/question/267782048/answer/490720324")
if ("question" not in paths or "answer" not in paths) and "zhuanlan.zhihu.com" not in paths:
raise TypeError("URL does not conform to specifications, Support column and question only."
"Example URL: https://zhuanlan.zhihu.com/p/51669862 or "
"https://www.zhihu.com/question/267782048/answer/490720324")
html = get_html(url, faker=True)
title = match1(html, r'data-react-helmet="true">(.*?)</title>')
for index, video_id in enumerate(matchall(html, [r'<a class="video-box" href="\S+video/(\d+)"'])):
try:
video_info = json.loads(
get_content(r"https://lens.zhihu.com/api/videos/{}".format(video_id), headers=fake_headers))
except json.decoder.JSONDecodeError:
log.w("Video id not found:{}".format(video_id))
continue
play_list = video_info["playlist"]
# first High Definition
# second Second Standard Definition
# third ld. What is ld ?
# finally continue
data = play_list.get("hd", play_list.get("sd", play_list.get("ld", None)))
if not data:
log.w("Video id No play address:{}".format(video_id))
continue
print_info(site_info, title, data["format"], data["size"])
if not info_only:
ext = "_{}.{}".format(index, data["format"])
if kwargs.get("zhihu_offset"):
ext = "_{}".format(kwargs["zhihu_offset"]) + ext
download_urls([data["play_url"]], title, ext, data["size"],
output_dir=output_dir, merge=merge, **kwargs)
def zhihu_download_playlist(url, output_dir='.', merge=True, info_only=False, **kwargs):
if "question" not in url or "answer" in url: # question page
raise TypeError("URL does not conform to specifications, Support question only."
" Example URL: https://www.zhihu.com/question/267782048")
url = url.split("?")[0]
if url[-1] == "/":
question_id = url.split("/")[-2]
else:
question_id = url.split("/")[-1]
videos_url = r"https://www.zhihu.com/api/v4/questions/{}/answers".format(question_id)
try:
questions = json.loads(get_content(videos_url))
except json.decoder.JSONDecodeError:
raise TypeError("Check whether the problem URL exists.Example URL: https://www.zhihu.com/question/267782048")
count = 0
while 1:
for data in questions["data"]:
kwargs["zhihu_offset"] = count
zhihu_download("https://www.zhihu.com/question/{}/answer/{}".format(question_id, data["id"]),
output_dir=output_dir, merge=merge, info_only=info_only, **kwargs)
count += 1
if questions["paging"]["is_end"]:
return
questions = json.loads(get_content(questions["paging"]["next"], headers=fake_headers))
site_info = "zhihu.com"
download = zhihu_download
download_playlist = zhihu_download_playlist
| mit |
alexryndin/ambari | ambari-server/src/main/resources/stacks/ADH/1.5/services/TEZ/package/scripts/tez_client.py | 4 | 3757 | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import os
import urlparse
from ambari_commons import OSConst
from ambari_commons.inet_utils import download_file
from ambari_commons.os_family_impl import OsFamilyImpl
from ambari_commons.os_utils import copy_file, extract_path_component
from resource_management.core.exceptions import ClientComponentHasNoStatus
from resource_management.core.source import InlineTemplate
from resource_management.libraries.functions import conf_select
from resource_management.libraries.script.script import Script
from tez import tez
class TezClient(Script):
def configure(self, env):
import params
env.set_params(params)
tez()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class TezClientLinux(TezClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
def install(self, env):
self.install_packages(env)
self.configure(env)
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class TezClientWindows(TezClient):
def install(self, env):
import params
if params.tez_home_dir is None:
self.install_packages(env)
params.refresh_tez_state_dependent_params()
env.set_params(params)
self._install_lzo_support_if_needed(params)
self.configure(env)
def _install_lzo_support_if_needed(self, params):
hadoop_classpath_prefix = self._expand_hadoop_classpath_prefix(params.hadoop_classpath_prefix_template, params.config['configurations']['tez-site'])
hadoop_lzo_dest_path = extract_path_component(hadoop_classpath_prefix, "hadoop-lzo-")
if hadoop_lzo_dest_path:
hadoop_lzo_file = os.path.split(hadoop_lzo_dest_path)[1]
config = Script.get_config()
file_url = urlparse.urljoin(config['hostLevelParams']['jdk_location'], hadoop_lzo_file)
hadoop_lzo_dl_path = os.path.join(config["hostLevelParams"]["agentCacheDir"], hadoop_lzo_file)
download_file(file_url, hadoop_lzo_dl_path)
#This is for protection against configuration changes. It will infect every new destination with the lzo jar,
# but since the classpath points to the jar directly we're getting away with it.
if not os.path.exists(hadoop_lzo_dest_path):
copy_file(hadoop_lzo_dl_path, hadoop_lzo_dest_path)
def _expand_hadoop_classpath_prefix(self, hadoop_classpath_prefix_template, configurations):
import resource_management
hadoop_classpath_prefix_obj = InlineTemplate(hadoop_classpath_prefix_template, configurations_dict=configurations,
extra_imports=[resource_management, resource_management.core,
resource_management.core.source])
hadoop_classpath_prefix = hadoop_classpath_prefix_obj.get_content()
return hadoop_classpath_prefix
if __name__ == "__main__":
TezClient().execute()
| apache-2.0 |
pradeepbp/stocker2 | portfolio.py | 1 | 4715 | #!/usr/bin/python
'''
portfolio.py
Copyright (C) 2011 Pradeep Balan Pillai
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
import gobject
import gtk
import guiutils
class Portfolio:
def __init__(self, profile = None):
self.drawGUI()
# Dialog to receive transaction input
def add_transaction(self):
dialog = gtk.Dialog('Add Transaction', None, gtk.DIALOG_MODAL,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_ADD,
gtk.RESPONSE_ACCEPT))
dialog.set_position(gtk.WIN_POS_CENTER)
table = gtk.Table(6,2)
name_label = gtk.Label('Name')
name_label.set_alignment(0.90,0.5)
code_label = gtk.Label('Stock code')
code_label.set_alignment(0.90,0.5)
action_label = gtk.Label('Action')
action_label.set_alignment(0.90,0.5)
date_label = gtk.Label('Date')
date_label.set_alignment(0.90,0.5)
quantity_label = gtk.Label('Quantity')
quantity_label.set_alignment(0.90,0.5)
price_label = gtk.Label('Price')
price_label.set_alignment(0.90,0.5)
table.attach(name_label, 0,1,0,1)
table.attach(code_label, 0,1, 1, 2,gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(action_label, 0,1, 2, 3,gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(date_label, 0,1, 3, 4,gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(quantity_label, 0,1, 4, 5,gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(price_label, 0,1, 5, 6,gtk.EXPAND|gtk.FILL, gtk.SHRINK)
name_entry = gtk.Entry()
code_entry = gtk.Entry()
date_selector = guiutils.DateEntry()
quantity_entry = gtk.Entry()
price_entry = gtk.Entry()
combolist = gtk.ListStore(gobject.TYPE_STRING)
for item in ['Buy', 'Sell']:
combolist.append([item])
action_combobox = gtk.ComboBoxEntry(combolist)
table.attach(name_entry, 1,2,0,1, gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(code_entry, 1,2,1,2, gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(action_combobox, 1,2,2,3, gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(date_selector, 1,2,3,4, gtk.SHRINK)
table.attach(quantity_entry, 1,2,4,5, gtk.EXPAND|gtk.FILL, gtk.SHRINK)
table.attach(price_entry, 1,2,5,6, gtk.EXPAND|gtk.FILL, gtk.SHRINK)
dialog.vbox.pack_start(table)
dialog.show_all()
resp = dialog.run()
if resp == gtk.RESPONSE_CANCEL:
dialog.destroy()
# Funcion to generate the GUI
def drawGUI(self):
self.win = gtk.Window()
self.win.set_title('Portfolio')
self.win.set_default_size(400,200)
self.win.set_position(gtk.WIN_POS_CENTER)
self.win.connect('destroy', gtk.main_quit)
# Set toolbar and toolbuttons
toolbar = gtk.Toolbar()
add_toolbutton = gtk.ToolButton(gtk.STOCK_ADD)
add_toolbutton.set_tooltip_text('Add transaction')
add_toolbutton.connect('clicked', lambda a: self.add_transaction())
delete_toolbutton = gtk.ToolButton(gtk.STOCK_DELETE)
delete_toolbutton.set_tooltip_text('Delete transaction')
import_toolbutton = gtk.ToolButton(gtk.STOCK_GO_DOWN)
import_toolbutton.set_label('Import')
import_toolbutton.set_tooltip_text('Import transaction')
refresh_toolbutton = gtk.ToolButton(gtk.STOCK_REFRESH)
refresh_toolbutton.set_tooltip_text('Update portfolio')
toolbar.insert(refresh_toolbutton, 0)
toolbar.insert(import_toolbutton, 0)
toolbar.insert(delete_toolbutton, 0)
toolbar.insert(add_toolbutton, 0)
# Create a Liststore object for portfolio table and attach it to Treeview
self.store = gtk.ListStore(str,str,str,str,str)
treeview = gtk.TreeView(self.store)
layout = gtk.Table(2, 1)
layout.attach(toolbar, 0,1,0,1, gtk.FILL|gtk.EXPAND, gtk.SHRINK)
self.win.add(layout)
self.win.show_all()
gtk.main()
if __name__ == '__main__':
Portfolio()
| gpl-3.0 |
RookieGameDevs/revived | docs/conf.py | 1 | 4888 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# revived documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 13 23:19:29 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx_autodoc_typehints'
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'revived'
copyright = '2017, Lorenzo Berni'
author = 'Lorenzo Berni'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'reviveddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'revived.tex', 'revived Documentation',
'Lorenzo Berni', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'revived', 'revived Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'revived', 'revived Documentation',
author, 'revived', 'One line description of project.',
'Miscellaneous'),
]
| bsd-3-clause |
andymckay/django | tests/regressiontests/admin_widgets/widgetadmin.py | 149 | 1211 | """
"""
from __future__ import absolute_import
from django.contrib import admin
from . import models
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ['make', 'model', 'owner']
list_editable = ['owner']
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = models.Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super(CarTireAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ['band']
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ('students',)
filter_horizontal = ('alumni',)
site = WidgetAdmin(name='widget-admin')
site.register(models.User)
site.register(models.Car, CarAdmin)
site.register(models.CarTire, CarTireAdmin)
site.register(models.Member)
site.register(models.Band)
site.register(models.Event, EventAdmin)
site.register(models.Album)
site.register(models.Inventory)
site.register(models.Bee)
site.register(models.Advisor)
site.register(models.School, SchoolAdmin) | bsd-3-clause |
mcopik/Elemental | examples/interface/SequentialLeastSquares.py | 2 | 1648 | #
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El, time
n0 = n1 = 20
display = False
def ExtendedLaplacian(xSize,ySize):
A = El.SparseMatrix()
n = xSize*ySize
A.Resize(2*n,n)
A.Reserve(6*n)
hxInvSq = (1.*(xSize+1))**2
hyInvSq = (1.*(ySize+1))**2
for s in xrange(2*n):
if s < xSize*ySize:
x = s % xSize
y = s / xSize
A.QueueUpdate( s, s, 2*(hxInvSq+hyInvSq) )
if x != 0: A.QueueUpdate( s, s-1, -hxInvSq )
if x != xSize-1: A.QueueUpdate( s, s+1, -hxInvSq )
if y != 0: A.QueueUpdate( s, s-xSize, -hyInvSq )
if y != ySize-1: A.QueueUpdate( s, s+xSize, -hyInvSq )
else:
A.QueueUpdate( s, s-xSize*ySize, 2*(hxInvSq+hyInvSq) )
A.ProcessQueues()
return A
A = ExtendedLaplacian(n0,n1)
if display:
El.Display( A, "A" )
El.Display( A.Graph(), "Graph of A" )
y = El.Matrix()
El.Uniform( y, 2*n0*n1, 1 )
if display:
El.Display( y, "y" )
yNrm = El.Nrm2(y)
print "|| y ||_2 =", yNrm
startLS = clock.time()
x = El.LeastSquares(A,y)
endLS = clock.time()
print "LS time:", endLS-startLS, "seconds"
xNrm = El.Nrm2(x)
if display:
El.Display( x, "x" )
print "|| x ||_2 =", xNrm
El.Multiply(El.NORMAL,-1.,A,x,1.,y)
if display:
El.Display( y, "A x - y" )
eNrm = El.Nrm2(y)
print "|| A x - y ||_2 / || y ||_2 =", eNrm/yNrm
# Require the user to press a button before the figures are closed
El.Finalize()
raw_input('Press Enter to exit')
| bsd-3-clause |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/importlib/test/regrtest.py | 51 | 1224 | """Run Python's standard test suite using importlib.__import__.
Tests known to fail because of assumptions that importlib (properly)
invalidates are automatically skipped if the entire test suite is run.
Otherwise all command-line options valid for test.regrtest are also valid for
this script.
XXX FAILING
* test_import
- test_incorrect_code_name
file name differing between __file__ and co_filename (r68360 on trunk)
- test_import_by_filename
exception for trying to import by file name does not match
"""
import importlib
import sys
from test import regrtest
if __name__ == '__main__':
__builtins__.__import__ = importlib.__import__
exclude = ['--exclude',
'test_frozen', # Does not expect __loader__ attribute
'test_pkg', # Does not expect __loader__ attribute
'test_pydoc', # Does not expect __loader__ attribute
]
# Switching on --exclude implies running all test but the ones listed, so
# only use it when one is not running an explicit test
if len(sys.argv) == 1:
# No programmatic way to specify tests to exclude
sys.argv.extend(exclude)
regrtest.main(quiet=True, verbose2=True)
| apache-2.0 |
rgom/Pydev | plugins/org.python.pydev.jython/jysrc/assist_proposal.py | 7 | 6837 | """Convenience module for scripting PyDev Quick Assist proposals in Jyton.
USAGE
=====
Create pyedit_*.py file in your jython script dir of choice, import this
module, subclass AssistProposal, instantiate it and register the instance
with Pydev.
Example:
-------------------------------------------------------------
from assist_proposal import AssistProposal, register_proposal
class MyProposal(AssistProposal):
implementation_goes_here
register_proposal(MyProposal())
-------------------------------------------------------------
The cmd variable is provided automatically by pydev and will be a string
such as 'onSave' or 'onCreateActions' etc...
See docs in source for further details.
"""
__author__ = """Joel Hedlund <joel.hedlund at gmail.com>
Some ideas borrowed from Fabio Zadrozny. These cases are explicitly noted
in the relevant code docs.
"""
__version__ = "1.0.0"
__copyright__ = """Available under the same conditions as PyDev.
See PyDev license for details.
http://pydev.sourceforge.net
"""
from org.python.pydev.editor.correctionassist.heuristics import IAssistProps #@UnresolvedImport
class AssistProposal:
"""Convenience class for adding assist proposals to pydev.
This class does nothing useful. Subclasses should assign proper values
to data members and provide sane implementations for methods.
Class data members
==================
description: <str>
The text displayed to the user in the quick assist menu (Ctrl-1).
tag: <str>
Unique descriptive identifier for the assist.
"""
description = "Remember to change this description"
tag = "REMEMBER_TO_CHANGE_THIS_TAG"
def isValid(self, selection, current_line, editor, offset):
"""Return True if the proposal is applicable, False otherwise.
This method should provide the same interface as the method with
the same name in IAssistProps.
If desirable, subclasses may store the isValid args as instance
data members for use with .apply().
IN:
pyselection: <PySelection>
The current selection. Highly useful.
current_line: <str>
The text on the current line.
editor: <PyEdit>
The current editor.
offset: <int>
The current position in the editor.
OUT:
Boolean. Is the proposal applicable in the current situation?
"""
return False
def apply(self, document):
"""Do what the assist is supposed to do when activated.
This method should provide the same interface as the method with
same name in PyCompletionProposal.
See also docs for the .isValid() method. You might like to use data
from there.
IN:
document: <IDocument>
The edited document.
OUT:
None.
"""
def register_proposal(proposal, debug=False):
"""Register the proposal with the quick assistant.
IN:
proposal: <AssistantProposal>
The object that holds all relevant information and does all the
necessary work for the proposal.
force = False: <bool>
If False (default), we will not attempt to re-register the assist
proposal if an assist proposal with the same tag is already
registered. If True, then we will override the registered proposal
with our own. This is mainly useful for debugging.
OUT:
None.
"""
from org.python.pydev.editor.correctionassist import PythonCorrectionProcessor #@UnresolvedImport
bTagInUse = PythonCorrectionProcessor.hasAdditionalAssist(proposal.tag)
if debug or not bTagInUse:
oInterface = AssistantInterface(proposal)
PythonCorrectionProcessor.addAdditionalAssist(proposal.tag, oInterface)
class AssistantInterface(IAssistProps):
"""Assistant interface wrapper for AssistProposal instances.
The Quick Assistant will ask this class if we can apply the proposal,
and if so, which properties does it have?
Adapted from Fabio Zadroznys AssistAssignParamsToAttributes class in
assign_params_to_attributes_assist.py.
Instance data members
=====================
proposal: <AssistantProposal>
The object that holds all relevant information and does all the
necessary work for the proposal.
"""
def __init__(self, proposal, *args):
"""A new Assistant Interface.
IN:
proposal: <AssistantProposal>
"""
self.proposal = proposal
def getImage(self, imageCache, c):
if imageCache is not None:
return imageCache.get(c)
return None
def isValid(self, ps, sel, editor, offset):
"""java: boolean isValid(PySelection ps, String sel, PyEdit edit, int offset);
"""
return self.proposal.isValid(ps, sel, editor, offset)
def getProps(self, ps, imageCache, f, nature, editor, offset):
'''java: List<ICompletionProposal> getProps(PySelection ps, ImageCache imageCache, File f,
IPythonNature nature, PyEdit edit, int offset)
'''
from java.util import ArrayList #@UnresolvedImport
IPyCompletionProposal = editor.getIPyCompletionProposalClass() #@UnresolvedImport
PyCompletionProposal = editor.getPyCompletionProposalClass() #@UnresolvedImport
UIConstants = editor.getUIConstantsClass() #@UnresolvedImport
class Prop(PyCompletionProposal):
"""This is the proposal that Ctrl+1 will require.
Adapted from Fabio Zadroznys Prop class in
assign_params_to_attributes_assist.py.
Instance data members
=====================
proposal: <AssistantProposal>
The object that holds all relevant information and does all the
necessary work for the proposal.
"""
def __init__(self, proposal, *args):
PyCompletionProposal.__init__(self, *args)
self.proposal = proposal
def apply(self, document):
"""java: public void apply(IDocument document)
"""
self.proposal.apply(document)
def getSelection(self, document):
return None
oProp = Prop(self.proposal,
'', 0, 0, 0,
self.getImage(imageCache, UIConstants.ASSIST_DOCSTRING),
self.proposal.description,
None, None,
IPyCompletionProposal.PRIORITY_DEFAULT)
l = ArrayList()
l.add(oProp)
return l
| epl-1.0 |
RexValkering/socialforcemodel | socialforcemodel/pedestriannumba.py | 1 | 4631 | from numba import jit, float32, bool_, int32
import numpy as np
@jit(float32[:](float32, float32[:], float32[:], float32, float32, float32,
float32[:, :], float32[:, :], float32[:], float32, float32, bool_, bool_,
float32[:], float32[:], int32))
def calculate_pedestrian_repulsive_force(distance_threshold, self_position, self_velocity,
self_radius, self_speed, self_labda_scale, ped_position, ped_velocity,
ped_radius, world_height, world_width, continuous_domain,
ignore_pedestrians_behind, desired_dir, force_args, k):
""" Calculates the repulsive force with all others pedestrians. """
social_force = np.zeros(2)
physical_force = np.zeros(2)
local_density = 0.0
local_velocity_variance = 0.0
sum_repulsive = 0.0
sum_pushing = 0.0
# Loop through all pedestrians.
for i in range(len(ped_position)):
p_position = np.array(ped_position[i])
# Calculate the distance.
position = self_position
difference = p_position - position
# In case of a continuous domain, we should check if the 'wrapped'
# distance is closer.
if continuous_domain:
if difference[0] > 0.5 * world_width:
difference[0] = difference[0] - world_width
p_position[0] -= world_width
elif difference[0] < - 0.5 * world_width:
difference[0] = difference[0] + world_width
p_position[0] += world_width
if difference[1] > 0.5 * world_height:
difference[1] = difference[1] - world_height
p_position[1] -= world_height
elif difference[1] < - 0.5 * world_height:
difference[1] = difference[1] + world_height
p_position[1] += world_height
distance_squared = difference[0]**2 + difference[1]**2
# Skip if the pedestrian is too far away. This saves a significant
# amount of time in large groups.
if distance_squared > distance_threshold:
continue
distance = np.sqrt(distance_squared)
# Agent overlap is positive if two agents 'overlap' in space.
agent_overlap = self_radius + ped_radius[i] - distance
# Unit vector of the difference
difference_direction = difference / distance
# Find normal and tangential of difference
normal = (position - p_position) / distance
tangential = np.array([-normal[1], normal[0]])
max_repulsive_force = force_args[0]
labda = force_args[1]
D_zero = force_args[2]
D_one = force_args[3]
body_force_constant = force_args[4]
friction_force_constant = force_args[5]
smoothing_squared = force_args[6]
smoothing_factor = force_args[7]
factor = max(distance, 0.15)
cos_angle = desired_dir * difference_direction
labda = (1.0 - self_labda_scale * (1.0 - labda))
omega = labda + (1 - labda) * (1 + cos_angle) / 2
social_repulsion_force = max_repulsive_force * omega * np.exp(
- factor / D_zero + (D_one / factor)**k
)
pushing_force = 0
friction_force = np.array([0, 0])
if agent_overlap > 0 and False:
# Find delta, which is a factor for friction force.
delta = (ped_velocity[i] - self_velocity) * tangential
pushing_force = body_force_constant * agent_overlap
friction_force = (friction_force_constant * agent_overlap *
delta * tangential)
# Sum the forces and add to total force.
social_pedestrian_force = social_repulsion_force * normal
physical_pedestrian_force = pushing_force * normal + friction_force
social_force += social_pedestrian_force
physical_force += physical_pedestrian_force
pressure = smoothing_factor * np.exp(-distance_squared /
smoothing_squared)
local_density += pressure
local_velocity_variance += self_speed * pressure
sum_repulsive += np.sqrt(social_repulsion_force[0]**2 + social_repulsion_force[1]**2)
sum_pushing += pushing_force
if local_density != 0:
local_velocity_variance /= local_density
# print([local_density, local_velocity_variance, sum_repulsive, sum_pushing])
return np.append(np.append(social_force, physical_force),
[local_density, local_velocity_variance, sum_repulsive, sum_pushing])
| mit |
hellhound/dentexchange | dentexchange/apps/employee/models.py | 2 | 7481 | # -*- coding:utf-8 -*-
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from validatedfile.fields import ValidatedFileField
from libs import constants as lib_constants
from libs.models.indexable import IndexableModel
from matches.models import Match
from . import strings, constants
class EmployeeQuestionnaire(IndexableModel):
user = models.OneToOneField(User,
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_USER)
### Job Position you're looking for
job_position = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_JOB_POSITION,
choices=constants.JOB_POSITION_CHOICES,
blank=True, null=True)
### Type of Practice
solo_practitioner = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SOLO_PRACTITIONER)
multi_practitioner = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_MULTI_PRACTITIONER)
corporate = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_CORPORATE)
### Patients' Method of Payment
fee_for_service = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_FEE_FOR_SERVICE)
insurance = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_INSURANCE)
capitation_medicaid = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_CAPITATION_MEDICAID)
### Location
zip_code = models.DecimalField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_ZIP_CODE,
max_digits=5, decimal_places=0,
blank=True, null=True)
city = models.CharField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_CITY,
max_length=100, blank=True)
state = models.CharField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_STATE,
choices=lib_constants.STATE_CHOICES,
max_length=2, blank=True)
distance = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_DISTANCE,
choices=constants.DISTANCE_CHOICES,
blank=True, null=True)
### Type of schedule required
schedule_type = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SCHEDULE_TYPE,
choices=constants.SCHEDULE_TYPE_CHOICES,
default=constants.SCHEDULE_TYPE_CHOICES.PART_TIME)
monday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_MONDAY_DAYTIME,
default=False)
monday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_MONDAY_EVENING,
default=False)
tuesday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_TUESDAY_DAYTIME,
default=False)
tuesday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_TUESDAY_EVENING,
default=False)
wednesday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_WEDNESDAY_DAYTIME,
default=False)
wednesday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_WEDNESDAY_EVENING,
default=False)
thursday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_THURSDAY_DAYTIME,
default=False)
thursday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_THURSDAY_EVENING,
default=False)
friday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_FRIDAY_DAYTIME,
default=False)
friday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_FRIDAY_EVENING,
default=False)
saturday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SATURDAY_DAYTIME,
default=False)
saturday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SATURDAY_EVENING,
default=False)
sunday_daytime = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SUNDAY_DAYTIME,
default=False)
sunday_evening = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SUNDAY_EVENING,
default=False)
### Compensation
compensation_type = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_COMPENSATION_TYPE,
choices=constants.COMPENSATION_TYPE_CHOICES,
default=constants.COMPENSATION_TYPE_CHOICES.HOURLY)
hourly_wage = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_HOURLY_WAGE,
choices=constants.HOURLY_WAGE_CHOICES,
blank=True, null=True)
annualy_wage = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_ANNUALY_WAGE,
choices=constants.ANNUALY_WAGE_CHOICES,
blank=True, null=True)
production = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_PRODUCTION,
default=False)
collection = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_COLLECTION,
default=False)
### Experience
experience_years = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_EXPERIENCE_YEARS,
choices=constants.EXPERIENCE_YEARS_CHOICES,
blank=True, null=True)
### Education
dental_school = models.CharField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_DENTAL_SCHOOL,
max_length=200, blank=True)
graduation_year = models.PositiveSmallIntegerField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_GRADUATION_YEAR,
choices=constants.GRADUATION_YEAR_CHOICES,
blank=True, null=True)
### Visa
visa = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_VISA,
choices=lib_constants.YES_NO_CHOICES,
default=lib_constants.YES_NO_CHOICES.YES)
### Specific strengths
specific_strengths = models.TextField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_SPECIFIC_STRENGTHS,
blank=True)
### Visibility
is_private = models.BooleanField(
verbose_name=strings.EMPLOYEE_QUESTIONNAIRE_IS_PRIVATE,
help_text=strings.EMPLOYEE_QUESTIONNAIRE_IS_PRIVATE_HELP_TEXT,
default=False)
matches = generic.GenericRelation(Match,
content_type_field='match_content_type',
object_id_field='match_object_id')
class Meta(object):
verbose_name = strings.EMPLOYEE_QUESTIONNAIRE_VERBOSE_NAME
verbose_name_plural = strings.EMPLOYEE_QUESTIONNAIRE_VERBOSE_NAME_PLURAL
def __unicode__(self):
return self.user.email
def get_email(self):
return unicode(self)
def get_location(self):
return
class Resume(models.Model):
user = models.OneToOneField(User, verbose_name=strings.RESUME_USER)
cv_file = ValidatedFileField(upload_to='employee/resumes',
verbose_name=strings.RESUME_CV_FILE,
help_text=strings.RESUME_CV_FILE_HELP_TEXT,
max_upload_size=constants.RESUME_CV_FILE_MAX_UPLOAD_SIZE,
content_types=constants.RESUME_CV_FILE_CONTENT_TYPES,
null=True, blank=True)
class Meta(object):
verbose_name = strings.RESUME_VERBOSE_NAME
verbose_name_plural = strings.RESUME_VERBOSE_NAME_PLURAL
def __unicode__(self):
return self.user.email
def get_email(self):
return unicode(self)
| bsd-3-clause |
paulproteus/django | tests/modeltests/select_related/models.py | 133 | 1643 | """
41. Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.db import models
# Who remembers high school biology?
class Domain(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain)
def __unicode__(self):
return self.name
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom)
def __unicode__(self):
return self.name
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __unicode__(self):
return self.name
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass)
def __unicode__(self):
return self.name
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order)
def __unicode__(self):
return self.name
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family)
def __unicode__(self):
return self.name
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus)
def __unicode__(self):
return self.name | bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/examples/preprocessing/plot_xdawn_denoising.py | 8 | 2719 | """
================
XDAWN Denoising
================
XDAWN filters are trained from epochs, signal is projected in the sources
space and then projected back in the sensor space using only the first two
XDAWN components. The process is similar to an ICA, but is
supervised in order to maximize the signal to signal + noise ratio of the
evoked response.
WARNING: As this denoising method exploits the known events to
maximize SNR of the contrast between conditions it can lead to overfit.
To avoid a statistical analysis problem you should split epochs used
in fit with the ones used in apply method.
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
from mne import (io, compute_raw_covariance, read_events, pick_types,
Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.viz import plot_epochs_image
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20, method='iir') # replace baselining with high-pass
events = read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
# Fit xdawn
xd.fit(epochs)
# Denoise epochs
epochs_denoised = xd.apply(epochs)
# Plot image epoch after xdawn
plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
| bsd-3-clause |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extract87Percent.py | 1 | 1143 | def extract87Percent(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Return of the former hero' in item['tags']:
return buildReleaseMessageWithType(item, 'Return of the Former Hero', vol, chp, frag=frag, postfix=postfix)
if 'Dragon egg' in item['tags']:
return buildReleaseMessageWithType(item, 'Reincarnated as a dragon’s egg ~Lets aim to be the strongest~', vol, chp, frag=frag, postfix=postfix)
if 'Summoning at random' in item['tags']:
return buildReleaseMessageWithType(item, 'Summoning at Random', vol, chp, frag=frag, postfix=postfix)
if 'Legend' in item['tags']:
return buildReleaseMessageWithType(item, 'レジェンド', vol, chp, frag=frag, postfix=postfix)
if 'Death game' in item['tags']:
return buildReleaseMessageWithType(item, 'The world is fun as it has become a death game', vol, chp, frag=frag, postfix=postfix)
if 'Elf Tensei' in item['tags']:
return buildReleaseMessageWithType(item, 'Elf Tensei Kara no Cheat Kenkoku-ki', vol, chp, frag=frag, postfix=postfix)
return False
| bsd-3-clause |
kaiseu/pat-data-processing | node.py | 1 | 2180 | #!/usr/bin/python
# encoding: utf-8
"""
@author: xuk1
@license: (C) Copyright 2013-2017
@contact: kai.a.xu@intel.com
@file: node.py
@time: 8/15/2017 10:47
@desc:
"""
import os
from component.factory import AttribFactory
from utils.commonOps import get_file_names
class Node:
"""
Corresponding to a physical machine, each Node may have many attributes, nodes makes up Cluster
"""
def __init__(self):
pass
def __init__(self, file_path):
self.file_path = file_path
# self.attrib = self.node_exist_attrib()
def node_exist_attrib(self):
"""
Get attributes this node has
:return: exist attributes of this node
"""
file_names = get_file_names(self.file_path)
exist_attrib = []
for names in file_names:
if names in AttribFactory().node_attrib.values():
exist_attrib.append(AttribFactory().get_attrib(names))
else:
print('{0} module not implemented yet, will ignore it...\n'.format(names))
return sorted(exist_attrib)
def get_attrib_data_by_time(self, attrib, start, end):
"""
Get data of a given attribute within a given time period
:param attrib: input attribute
:param start: list of start timestamp
:param end: list of end timestamp, should be the same length of start
:return: dict that contains avg value and all raw data of all the timestamp pair
"""
if attrib.lower() in AttribFactory.node_attrib.keys():
attrib_file = self.file_path + os.sep + AttribFactory.node_attrib[attrib.lower()]
if os.path.isfile(attrib_file):
return AttribFactory.create_attrib(attrib, attrib_file).get_data_by_time(start, end)
else:
print('node does not have attribute {0}'.format(attrib))
exit(-1)
else:
print('Node does not have attrib: {0} defined, defined attributions are: {1}, will exit...' \
.format(attrib, AttribFactory.node_attrib.keys()))
exit(-1)
| apache-2.0 |
camilonova/sentry | src/sentry/migrations/0090_auto__add_grouptagkey__add_unique_grouptagkey_project_group_key__add_f.py | 36 | 28230 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupTagKey'
db.create_table('sentry_grouptagkey', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Project'], null=True)),
('group', self.gf('sentry.db.models.fields.FlexibleForeignKey')(to=orm['sentry.Group'])),
('key', self.gf('django.db.models.fields.CharField')(max_length=32)),
('values_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('sentry', ['GroupTagKey'])
# Adding unique constraint on 'GroupTagKey', fields ['project', 'group', 'key']
db.create_unique('sentry_grouptagkey', ['project_id', 'group_id', 'key'])
# Adding field 'FilterValue.times_seen'
db.add_column('sentry_filtervalue', 'times_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Adding field 'FilterValue.last_seen'
db.add_column('sentry_filtervalue', 'last_seen',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, db_index=True),
keep_default=False)
# Adding field 'FilterValue.first_seen'
db.add_column('sentry_filtervalue', 'first_seen',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, db_index=True),
keep_default=False)
# Adding field 'FilterKey.values_seen'
db.add_column('sentry_filterkey', 'values_seen',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'GroupTagKey', fields ['project', 'group', 'key']
db.delete_unique('sentry_grouptagkey', ['project_id', 'group_id', 'key'])
# Deleting model 'GroupTagKey'
db.delete_table('sentry_grouptagkey')
# Deleting field 'FilterValue.times_seen'
db.delete_column('sentry_filtervalue', 'times_seen')
# Deleting field 'FilterValue.last_seen'
db.delete_column('sentry_filtervalue', 'last_seen')
# Deleting field 'FilterValue.first_seen'
db.delete_column('sentry_filtervalue', 'first_seen')
# Deleting field 'FilterKey.values_seen'
db.delete_column('sentry_filterkey', 'values_seen')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.affecteduserbygroup': {
'Meta': {'unique_together': "(('project', 'tuser', 'group'),)", 'object_name': 'AffectedUserByGroup'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'tuser': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.TrackedUser']", 'null': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'users_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'GroupCountByMinute', 'db_table': "'sentry_messagecountbyminute'"},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouptag': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTag', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.trackeduser': {
'Meta': {'unique_together': "(('project', 'ident'),)", 'object_name': 'TrackedUser'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Group']", 'through': "orm['sentry.AffectedUserByGroup']", 'symmetrical': 'False'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'num_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.