repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
BorgERP/borg-erp-6of3 | server/openerp/tools/func.py | 15 | 2124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
__all__ = ['synchronized']
from functools import wraps
from inspect import getsourcefile
def synchronized(lock_attr='_lock'):
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
lock = getattr(self, lock_attr)
try:
lock.acquire()
return func(self, *args, **kwargs)
finally:
lock.release()
return wrapper
return decorator
def frame_codeinfo(fframe, back=0):
""" Return a (filename, line) pair for a previous frame .
@return (filename, lineno) where lineno is either int or string==''
"""
try:
if not fframe:
return ("<unknown>", '')
for i in range(back):
fframe = fframe.f_back
try:
fname = getsourcefile(fframe)
except TypeError:
fname = '<builtin>'
lineno = fframe.f_lineno or ''
return (fname, lineno)
except Exception:
return ("<unknown>", '')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Vagab0nd/SiCKRAGE | sickchill/views/movies.py | 1 | 2964 | import logging
from sickchill import settings
from sickchill.oldbeard import config
from .common import PageTemplate
from .index import WebRoot
logger = logging.getLogger('sickchill.movie')
class MoviesHandler(WebRoot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _genericMessage(self, subject=None, message=None):
t = PageTemplate(rh=self, filename="genericMessage.mako")
return t.render(message=message, subject=subject, topmenu="movies", title="")
def index(self):
t = PageTemplate(rh=self, filename="movies/index.mako")
return t.render(title=_("Movies"), header=_("Movie List"), topmenu="movies", movies=settings.movie_list, controller="movies", action="index")
def search(self):
query = self.get_body_argument('query', '')
year = self.get_body_argument('year', '')
language = self.get_body_argument('language', '')
adult = config.checkbox_to_value(self.get_body_argument('adult', False))
search_results = []
if query:
search_results = settings.movie_list.search_tmdb(query=query, year=year, language=language, adult=adult)
t = PageTemplate(rh=self, filename="movies/search.mako")
return t.render(title=_("Movies"), header=_("Movie Search"), topmenu="movies", controller="movies", action="search",
search_results=search_results, movies=settings.movie_list, query=query, year=year, language=language, adult=adult)
def add(self):
movie = None
imdb_id = self.get_body_argument('imdb', None)
if imdb_id:
movie = settings.movie_list.add_from_imdb(imdb_id=imdb_id)
tmdb_id = self.get_body_argument('tmdb', None)
if tmdb_id:
movie = settings.movie_list.add_from_tmdb(tmdb_id=tmdb_id)
if not movie:
return self.redirect(self.reverse_url('movies-search', 'search'))
return self.redirect(self.reverse_url('movies-details', 'details', movie.slug))
def remove(self):
pk = self.path_kwargs.get('pk')
if pk is not None:
if not settings.movie_list.query.get(pk):
return self._genericMessage(_('Error'), _('Movie not found'))
settings.movie_list.delete(pk)
t = PageTemplate(rh=self, filename="movies/remove.mako")
return t.render(title=_("Movies"), header=_("Movie Remove"), topmenu="movies", movies=settings.movie_list, controller="movies", action="remove")
def details(self):
movie = settings.movie_list.by_slug(self.path_kwargs.get('slug'))
if not movie:
return self._genericMessage(_('Error'), _('Movie not found'))
t = PageTemplate(rh=self, filename="movies/details.mako")
return t.render(title=_("Movies"), header=_("Movie Remove"), topmenu="movies", controller="movies", action="details",
movie=movie, movie_message=None)
| gpl-3.0 |
sthirugn/robottelo | robottelo/performance/pulp.py | 8 | 4941 | """Test utilities for writing Pulp tests
Part of functionalities of Pulp are defined in this module
and have utilities of single repository synchronization, single
sequential repository sync, sequential repository re-sync.
"""
import logging
from robottelo import ssh
from robottelo.cli.base import CLIReturnCodeError
from robottelo.cli.repository import Repository
LOGGER = logging.getLogger(__name__)
class Pulp(object):
"""Performance Measurement of RH Satellite 6
Pulp Synchronization functionality
"""
@classmethod
def repository_single_sync(cls, repo_id, repo_name, thread_id):
"""Single Synchronization
:param str repo_id: Repository id to be synchronized
:param str repo_name: Repository name
:return: time measure for a single sync
:rtype: float
"""
LOGGER.info(
'Synchronize {0} by thread-{1}:'
.format(repo_name, thread_id)
)
result = Repository.synchronize(
{'id': repo_id},
return_raw_response=True
)
if result.return_code != 0:
LOGGER.error(
'Sync repository {0} by thread-{1} failed!'
.format(repo_name, thread_id)
)
return 0
LOGGER.info(
'Sync repository {0} by thread-{1} successful!'
.format(repo_name, thread_id)
)
return cls.get_elapsed_time(result.stderr)
@staticmethod
def get_elapsed_time(stderr):
"""retrieve time from stderr"""
# should return only one time point as a single sync
real_time = ''
for line in stderr.split('\n'):
if line.startswith('real'):
real_time = line
return 0 if real_time == '' else float(real_time.split(' ')[1])
@staticmethod
def get_enabled_repos(org_id):
"""Get all enabled repositories ids and names
:return map_repo_name_id: The dictionary contains all enabled
repositories in Satellite. Map repo-name as key, repo-id as value
:raises ``RumtimeException`` if there's no enabled repository in
default organization denoted by ``org_id``
"""
LOGGER.info('Searching for enabled repositories by hammer CLI:')
try:
result = Repository.list(
{'organization-id': org_id},
per_page=False
)
except CLIReturnCodeError:
raise RuntimeError(
'No enabled repository found in organization {0}!'
.format(org_id)
)
# map repository name with id
map_repo_name_id = {}
for repo in result:
map_repo_name_id[repo['name']] = repo['id']
return map_repo_name_id
@classmethod
def repositories_sequential_sync(
cls,
repo_names_list,
map_repo_name_id,
sync_iterations,
savepoint=None):
"""Sync all repositories linearly, and repeat X times
:param list repo_names_list: A list of targeting repository names
:param int sync_iterations: The number of times to repeat sync
:return time_result_dict_sync
:rtype: dict
"""
# Create a dictionary to store all timing results from each sync
time_result_dict_sync = {}
# repeat sequential sync X times
for i in range(sync_iterations):
# note: name key by thread to adapt to graph module
key = 'thread-{0}'.format(i)
time_result_dict_sync[key] = []
# Sync each repo one-by-one and collect timing data
for repo_name in repo_names_list:
repo_id = map_repo_name_id.get(repo_name, None)
if repo_id is None:
LOGGER.warning(
'Invalid repository name {}!'.format(repo_name)
)
continue
LOGGER.debug(
'Sequential Sync {0} attempt {1}:'.format(repo_name, i)
)
# sync repository once at a time
time_result_dict_sync[key].append(
cls.repository_single_sync(repo_id, repo_name, 'linear')
)
# for resync purpose, no need to restore
if savepoint is None:
return
else:
# restore database at the end of each iteration
cls._restore_from_savepoint(savepoint)
return time_result_dict_sync
@staticmethod
def _restore_from_savepoint(savepoint):
"""Restore from savepoint"""
if savepoint == '':
LOGGER.warning('No savepoint while continuing test!')
return
LOGGER.info('Reset db from /home/backup/{0}'.format(savepoint))
ssh.command('./reset-db.sh /home/backup/{0}'.format(savepoint))
| gpl-3.0 |
piohhmy/euler | p011.py | 1 | 2514 | import textwrap
from functools import reduce
gridstr = textwrap.dedent("""\
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48""")
grid = [list(map(int,line.split(" "))) for line in gridstr.split("\n")]
def product_right_4(grid, row, col):
return reduce(lambda x,y:x*y, grid[row][col:col+4])
def product_down_4(grid, row, col):
return reduce(lambda x,y:x*y,[row[col] for row in grid[row:row+4]])
def product_diagonal_right_4(grid, row, col):
if row + 4 <= len(grid) and col + 4 <=len(grid[row]):
return reduce(lambda x,y:x*y, [grid[row+i][col+i] for i in range(0,4)])
def product_diagonal_left_4(grid, row, col):
if row + 4 <= len(grid) and col - 4 >= 0:
return reduce(lambda x,y:x*y, [grid[row+i][col-i] for i in range(0,4)])
def find_grid_products(grid):
for (row, line) in enumerate(grid):
for (col, num) in enumerate(line):
yield product_right_4(grid, row, col)
yield product_down_4(grid, row, col)
yield product_diagonal_right_4(grid, row, col)
yield product_diagonal_left_4(grid, row, col)
def solve_p11():
return max(find_grid_products(grid))
if __name__ == '__main__':
print(solve_p11())
| mit |
vmax-feihu/hue | desktop/core/ext-py/Django-1.6.10/django/conf/locale/nb/formats.py | 118 | 1763 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
emilhetty/home-assistant | homeassistant/components/notify/pushetta.py | 11 | 1938 | """
Pushetta platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.pushetta/
"""
import logging
from homeassistant.components.notify import (
ATTR_TITLE, DOMAIN, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import validate_config
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pushetta==1.0.15']
def get_service(hass, config):
"""Get the Pushetta notification service."""
from pushetta import Pushetta, exceptions
if not validate_config({DOMAIN: config},
{DOMAIN: [CONF_API_KEY, 'channel_name']},
_LOGGER):
return None
try:
pushetta = Pushetta(config[CONF_API_KEY])
pushetta.pushMessage(config['channel_name'], "Home Assistant started")
except exceptions.TokenValidationError:
_LOGGER.error("Please check your access token")
return None
except exceptions.ChannelNotFoundError:
_LOGGER.error("Channel '%s' not found", config['channel_name'])
return None
return PushettaNotificationService(config[CONF_API_KEY],
config['channel_name'])
# pylint: disable=too-few-public-methods
class PushettaNotificationService(BaseNotificationService):
"""Implement the notification service for Pushetta."""
def __init__(self, api_key, channel_name):
"""Initialize the service."""
from pushetta import Pushetta
self._api_key = api_key
self._channel_name = channel_name
self.pushetta = Pushetta(self._api_key)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE)
self.pushetta.pushMessage(self._channel_name,
"{} {}".format(title, message))
| mit |
miing/mci_migo | identityprovider/teams.py | 1 | 13038 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Team membership support for Launchpad.
The primary form of communication between the RP and Launchpad is an
OpenID authentication request. Our solution is to piggyback a team
membership test onto this interaction.
As part of an OpenID authentication request, the RP includes the
following fields:
openid.ns.lp:
An OpenID 2.0 namespace URI for the extension. It is not strictly
required for 1.1 requests, but including it is good for forward
compatibility.
It must be set to: http://ns.launchpad.net/2007/openid-teams
openid.lp.query_membership:
A comma separated list of Launchpad team names that the RP is
interested in.
As part of the positive assertion OpenID response, the following field
will be provided:
openid.ns.lp:
(as above)
openid.lp.is_member:
A comma separated list of teams that the user is actually a member
of. The list may be limited to those teams mentioned in the
request.
This field must be included in the response signature in order to
be considered valid (as the response is bounced through the user's
web browser, an unsigned value could be modified).
@since: 2.1.1
"""
from openid.message import (
registerNamespaceAlias,
NamespaceAliasRegistrationError,
)
from openid.extension import Extension
from openid import oidutil
try:
basestring # pylint:disable-msg=W0104
except NameError:
# For Python 2.2
basestring = (str, unicode) # pylint:disable-msg=W0622
__all__ = [
'TeamsRequest',
'TeamsResponse',
'ns_uri',
'supportsTeams',
]
ns_uri = 'http://ns.launchpad.net/2007/openid-teams'
try:
registerNamespaceAlias(ns_uri, 'lp')
except NamespaceAliasRegistrationError, e:
oidutil.log('registerNamespaceAlias(%r, %r) failed: %s' % (ns_uri,
'lp', str(e),))
def supportsTeams(endpoint):
"""Does the given endpoint advertise support for Launchpad Teams?
@param endpoint: The endpoint object as returned by OpenID discovery
@type endpoint: openid.consumer.discover.OpenIDEndpoint
@returns: Whether an lp type was advertised by the endpoint
@rtype: bool
"""
return endpoint.usesExtension(ns_uri)
class TeamsNamespaceError(ValueError):
"""The Launchpad teams namespace was not found and could not
be created using the expected name (there's another extension
using the name 'lp')
This is not I{illegal}, for OpenID 2, although it probably
indicates a problem, since it's not expected that other extensions
will re-use the alias that is in use for OpenID 1.
If this is an OpenID 1 request, then there is no recourse. This
should not happen unless some code has modified the namespaces for
the message that is being processed.
"""
def getTeamsNS(message):
"""Extract the Launchpad teams namespace URI from the given
OpenID message.
@param message: The OpenID message from which to parse Launchpad
teams. This may be a request or response message.
@type message: C{L{openid.message.Message}}
@returns: the lp namespace URI for the supplied message. The
message may be modified to define a Launchpad teams
namespace.
@rtype: C{str}
@raise ValueError: when using OpenID 1 if the message defines
the 'lp' alias to be something other than a Launchpad
teams type.
"""
# See if there exists an alias for the Launchpad teams type.
alias = message.namespaces.getAlias(ns_uri)
if alias is None:
# There is no alias, so try to add one. (OpenID version 1)
try:
message.namespaces.addAlias(ns_uri, 'lp')
except KeyError, why:
# An alias for the string 'lp' already exists, but it's
# defined for something other than Launchpad teams
raise TeamsNamespaceError(why[0])
# we know that ns_uri defined, because it's defined in the
# else clause of the loop as well, so disable the warning
return ns_uri # pylint:disable-msg=W0631
class TeamsRequest(Extension):
"""An object to hold the state of a Launchpad teams request.
@ivar query_membership: A comma separated list of Launchpad team
names that the RP is interested in.
@type required: [str]
@group Consumer: requestField, requestTeams, getExtensionArgs,
addToOpenIDRequest
@group Server: fromOpenIDRequest, parseExtensionArgs
"""
ns_alias = 'lp'
def __init__(self, query_membership=None, lp_ns_uri=ns_uri):
"""Initialize an empty Launchpad teams request"""
Extension.__init__(self)
self.query_membership = []
self.ns_uri = lp_ns_uri
if query_membership:
self.requestTeams(query_membership)
# Assign getTeamsNS to a static method so that it can be
# overridden for testing.
_getTeamsNS = staticmethod(getTeamsNS)
def fromOpenIDRequest(cls, request):
"""Create a Launchpad teams request that contains the
fields that were requested in the OpenID request with the
given arguments
@param request: The OpenID request
@type request: openid.server.CheckIDRequest
@returns: The newly created Launchpad teams request
@rtype: C{L{TeamsRequest}}
"""
self = cls()
# Since we're going to mess with namespace URI mapping, don't
# mutate the object that was passed in.
message = request.message.copy()
self.ns_uri = self._getTeamsNS(message)
args = message.getArgs(self.ns_uri)
self.parseExtensionArgs(args)
return self
fromOpenIDRequest = classmethod(fromOpenIDRequest)
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified Launchpad teams request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized
Launchpad teams request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the lp namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
args = message.getArgs(ns_uri)
request.parseExtensionArgs(args)
@param args: The unqualified Launchpad teams arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the Launchpad teams specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
items = args.get('query_membership')
if items:
for team_name in items.split(','):
try:
self.requestTeam(team_name, strict)
except ValueError:
if strict:
raise
def allRequestedTeams(self):
"""A list of all of the Launchpad teams that were
requested.
@rtype: [str]
"""
return self.query_membership
def wereTeamsRequested(self):
"""Have any Launchpad teams been requested?
@rtype: bool
"""
return bool(self.allRequestedTeams())
def __contains__(self, team_name):
"""Was this team in the request?"""
return team_name in self.query_membership
def requestTeam(self, team_name, strict=False):
"""Request the specified team from the OpenID user
@param team_name: the unqualified Launchpad team name
@type team_name: str
@param strict: whether to raise an exception when a team is
added to a request more than once
@raise ValueError: when strict is set and the team was
requested more than once
"""
if strict:
if team_name in self.query_membership:
raise ValueError('That team has already been requested')
else:
if team_name in self.query_membership:
return
self.query_membership.append(team_name)
def requestTeams(self, query_membership, strict=False):
"""Add the given list of teams to the request
@param query_membership: The Launchpad teams request
@type query_membership: [str]
@raise ValueError: when a team requested is not a string
or strict is set and a team was requested more than once
"""
if isinstance(query_membership, basestring):
raise TypeError('Teams should be passed as a list of '
'strings (not %r)' % (type(query_membership),))
for team_name in query_membership:
self.requestTeam(team_name, strict=strict)
def getExtensionArgs(self):
"""Get a dictionary of unqualified Launchpad teams
arguments representing this request.
This method is essentially the inverse of
C{L{parseExtensionArgs}}. This method serializes the Launchpad
teams request fields.
@rtype: {str:str}
"""
args = {}
if self.query_membership:
args['query_membership'] = ','.join(self.query_membership)
return args
class TeamsResponse(Extension):
"""Represents the data returned in a Launchpad teams response
inside of an OpenID C{id_res} response. This object will be
created by the OpenID server, added to the C{id_res} response
object, and then extracted from the C{id_res} message by the
Consumer.
@ivar data: The Launchpad teams data, an array.
@ivar ns_uri: The URI under which the Launchpad teams data was
stored in the response message.
@group Server: extractResponse
@group Consumer: fromSuccessResponse
@group Read-only dictionary interface: keys, iterkeys, items, iteritems,
__iter__, get, __getitem__, keys, has_key
"""
ns_alias = 'lp'
def __init__(self, is_member=None, lp_ns_uri=ns_uri):
Extension.__init__(self)
if is_member is None:
self.is_member = []
else:
self.is_member = is_member
self.ns_uri = lp_ns_uri
def addTeam(self, team_name):
if team_name not in self.is_member:
self.is_member.append(team_name)
def extractResponse(cls, request, is_member_str):
"""Take a C{L{TeamsRequest}} and a list of Launchpad
team values and create a C{L{TeamsResponse}}
object containing that data.
@param request: The Launchpad teams request object
@type request: TeamsRequest
@param is_member: The Launchpad teams data for this
response, as a list of strings.
@type is_member: {str:str}
@returns: a Launchpad teams response object
@rtype: TeamsResponse
"""
self = cls()
self.ns_uri = request.ns_uri
self.is_member = is_member_str.split(',')
return self
extractResponse = classmethod(extractResponse)
# Assign getTeamsNS to a static method so that it can be
# overridden for testing
_getTeamsNS = staticmethod(getTeamsNS)
def fromSuccessResponse(cls, success_response, signed_only=True):
"""Create a C{L{TeamsResponse}} object from a successful OpenID
library response
(C{L{openid.consumer.consumer.SuccessResponse}}) response
message
@param success_response: A SuccessResponse from consumer.complete()
@type success_response: C{L{openid.consumer.consumer.SuccessResponse}}
@param signed_only: Whether to process only data that was
signed in the id_res message from the server.
@type signed_only: bool
@rtype: TeamsResponse
@returns: A Launchpad teams response containing the data
that was supplied with the C{id_res} response.
"""
self = cls()
self.ns_uri = self._getTeamsNS(success_response.message)
if signed_only:
args = success_response.getSignedNS(self.ns_uri)
else:
args = success_response.message.getArgs(self.ns_uri)
if "is_member" in args:
is_member_str = args["is_member"]
self.is_member = is_member_str.split(',')
#self.is_member = args["is_member"]
return self
fromSuccessResponse = classmethod(fromSuccessResponse)
def getExtensionArgs(self):
"""Get the fields to put in the Launchpad teams namespace
when adding them to an id_res message.
@see: openid.extension
"""
ns_args = {'is_member': ','.join(self.is_member)}
return ns_args
| agpl-3.0 |
kcpawan/django | django/contrib/gis/db/backends/oracle/introspection.py | 539 | 1977 | import sys
import cx_Oracle
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.utils import six
class OracleIntrospection(DatabaseIntrospection):
# Associating any OBJECTVAR instances with GeometryField. Of course,
# this won't work right on Oracle objects that aren't MDSYS.SDO_GEOMETRY,
# but it is the only object type supported within Django anyways.
data_types_reverse = DatabaseIntrospection.data_types_reverse.copy()
data_types_reverse[cx_Oracle.OBJECT] = 'GeometryField'
def get_geometry_type(self, table_name, geo_col):
cursor = self.connection.cursor()
try:
# Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information.
try:
cursor.execute(
'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" '
'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s',
(table_name.upper(), geo_col.upper())
)
row = cursor.fetchone()
except Exception as msg:
new_msg = (
'Could not find entry in USER_SDO_GEOM_METADATA '
'corresponding to "%s"."%s"\n'
'Error message: %s.') % (table_name, geo_col, msg)
six.reraise(Exception, Exception(new_msg), sys.exc_info()[2])
# TODO: Research way to find a more specific geometry field type for
# the column's contents.
field_type = 'GeometryField'
# Getting the field parameters.
field_params = {}
dim, srid = row
if srid != 4326:
field_params['srid'] = srid
# Length of object array ( SDO_DIM_ARRAY ) is number of dimensions.
dim = len(dim)
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
clouddocx/boto | boto/dynamodb/exceptions.py | 185 | 1687 | """
Exceptions that are specific to the dynamodb module.
"""
from boto.exception import BotoServerError, BotoClientError
from boto.exception import DynamoDBResponseError
class DynamoDBExpiredTokenError(BotoServerError):
"""
Raised when a DynamoDB security token expires. This is generally boto's
(or the user's) notice to renew their DynamoDB security tokens.
"""
pass
class DynamoDBKeyNotFoundError(BotoClientError):
"""
Raised when attempting to retrieve or interact with an item whose key
can't be found.
"""
pass
class DynamoDBItemError(BotoClientError):
"""
Raised when invalid parameters are passed when creating a
new Item in DynamoDB.
"""
pass
class DynamoDBNumberError(BotoClientError):
"""
Raised in the event of incompatible numeric type casting.
"""
pass
class DynamoDBConditionalCheckFailedError(DynamoDBResponseError):
"""
Raised when a ConditionalCheckFailedException response is received.
This happens when a conditional check, expressed via the expected_value
paramenter, fails.
"""
pass
class DynamoDBValidationError(DynamoDBResponseError):
"""
Raised when a ValidationException response is received. This happens
when one or more required parameter values are missing, or if the item
has exceeded the 64Kb size limit.
"""
pass
class DynamoDBThroughputExceededError(DynamoDBResponseError):
"""
Raised when the provisioned throughput has been exceeded.
Normally, when provisioned throughput is exceeded the operation
is retried. If the retries are exhausted then this exception
will be raised.
"""
pass
| mit |
Appono/hey-eurydices | vendor/pip-1.3.1/pip/log.py | 143 | 6371 | """Logging
"""
import sys
import logging
from pip import backwardcompat
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
VERBOSE_DEBUG = logging.DEBUG - 1
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO + logging.WARN) / 2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [VERBOSE_DEBUG, DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self):
self.consumers = []
self.indent = 0
self.explicit_levels = False
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' ' * self.indent + rendered
if self.explicit_levels:
## FIXME: should this be a name, not a level number?
rendered = '%02i %s' % (level, rendered)
if hasattr(consumer, 'write'):
rendered += '\n'
backwardcompat.fwrite(consumer, rendered)
else:
consumer(rendered)
def _show_progress(self):
"""Should we display download progress?"""
return (self.stdout_level_matches(self.NOTIFY) and sys.stdout.isatty())
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self._show_progress():
sys.stdout.write(' ' * self.indent + msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
self.last_message = None
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self._show_progress():
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
# These erase any messages shown with show_progress (besides .'s)
logger.show_progress('')
logger.show_progress('')
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self, message=None):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
if message is None:
sys.stdout.write('.')
sys.stdout.flush()
else:
if self.last_message:
padding = ' ' * max(0, len(self.last_message) - len(message))
else:
padding = ''
sys.stdout.write('\r%s%s%s%s' %
(' ' * self.indent, self.in_progress, message, padding))
sys.stdout.flush()
self.last_message = message
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger()
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None or stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
def move_stdout_to_stderr(self):
to_remove = []
to_add = []
for consumer_level, consumer in self.consumers:
if consumer == sys.stdout:
to_remove.append((consumer_level, consumer))
to_add.append((consumer_level, sys.stderr))
for item in to_remove:
self.consumers.remove(item)
self.consumers.extend(to_add)
logger = Logger()
| mit |
atvcaptain/enigma2 | lib/python/Screens/ScreenSaver.py | 1 | 1677 | from Screens.Screen import Screen
from Components.MovieList import AUDIO_EXTENSIONS
from Components.ServiceEventTracker import ServiceEventTracker
from Components.Pixmap import Pixmap
from enigma import ePoint, eTimer, iPlayableService
import os, random
class Screensaver(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.moveLogoTimer = eTimer()
self.moveLogoTimer.callback.append(self.doMovePicture)
self.onShow.append(self.__onShow)
self.onHide.append(self.__onHide)
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evStart: self.serviceStarted
})
self["picture"] = Pixmap()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
picturesize = self["picture"].getSize()
self.maxx = self.instance.size().width() - picturesize[0]
if self.maxx < 1:
self.instance.size().width()
self.maxy = self.instance.size().height() - picturesize[1]
if self.maxy < 1:
self.maxy = self.instance.size().height()
self.doMovePicture()
def __onHide(self):
self.moveLogoTimer.stop()
def __onShow(self):
self.moveLogoTimer.startLongTimer(5)
def serviceStarted(self):
if self.shown:
ref = self.session.nav.getCurrentlyPlayingServiceOrGroup()
if ref:
ref = ref.toString().split(":")
if not os.path.splitext(ref[10])[1].lower() in AUDIO_EXTENSIONS:
self.hide()
def doMovePicture(self):
try:
self.posx = random.randint(1, self.maxx)
self.posy = random.randint(1, self.maxy)
except Exception:
self.posx = 0
self.posy = 0
self["picture"].instance.move(ePoint(self.posx, self.posy))
self.moveLogoTimer.startLongTimer(9)
| gpl-2.0 |
mverwe/JetRecoValidation | PuThresholdTuning/python/crabConfigDijet40Run2.py | 1 | 1469 | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
#from WMCore.Configuration import Configuration
#config = Configuration()
#config.section_("General")
config.General.requestName = 'HiForestDijet40Run2Fullv2'
config.General.workArea = 'crab_projects'
config.General.transferOutputs = True
config.General.transferLogs = False
#config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'runForest_PbPb_MIX_75X_PUThresholdVarV2.py'
#config.section_("Data")
config.Data.inputDataset = '/Pyquen_DiJet_pt40_5020GeV_GEN_SIM_PU_20150813/twang-Pyquen_DiJet_pt40_5020GeV_step3_RECODEBUG_20150813-3179e0200600a67eea51209589c07fdd/USER'
#config.Data.inputDataset = '/Pyquen_DiJet_Pt120_TuneZ2_Unquenched_Hydjet1p8_2760GeV/HiFall13DR53X-NoPileUp_STARTHI53_LV1-v3/GEN-SIM-RECO'
config.Data.inputDBS = 'phys03' #'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 1
#config.Data.totalUnits = 100 #process small amount first
#config.Data.lumiMask = 'lumi_JSON.txt'
#config.Data.runRange = '247324'
#config.Data.outLFNDirBase = '/store/user/mverweij/pp2015/MC/Monash13_0T'
config.Data.outLFNDirBase = '/store/group/cmst3/user/mverweij/jetsPbPb/Run2Prep/Dijet40CMSSW753p1/v3'
config.Data.publication = False #True
config.Data.publishDataName = ''
#config.section_('Site')
config.Site.storageSite = 'T2_CH_CERN'
#config.Site.whitelist = ['T2_CH_CERN']
| cc0-1.0 |
kalev/anaconda | pyanaconda/iw/GroupSelector.py | 2 | 22606 | # Copyright 2005-2007 Red Hat, Inc.
#
# Jeremy Katz <katzj@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 only
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys
import logging
import gettext
import gtk
import gtk.glade
import gtk.gdk as gdk
import gobject
import yum
import yum.Errors
try:
import repomd.mdErrors as mdErrors
except ImportError: # yum 2.9.x
mdErrors = yum.Errors
from yum.constants import *
from pyanaconda.compssort import *
I18N_DOMAIN="anaconda"
import rpm
def sanitizeString(s, translate = True):
if len(s) == 0:
return s
if not translate:
i18ndomains = []
elif hasattr(rpm, "expandMacro"):
i18ndomains = rpm.expandMacro("%_i18ndomains").split(":")
else:
i18ndomains = ["redhat-dist"]
# iterate over i18ndomains to find the translation
for d in i18ndomains:
r = gettext.dgettext(d, s)
if r != s:
s = r
break
s = s.replace("\n\n", "\x00")
s = s.replace("\n", " ")
s = s.replace("\x00", "\n\n")
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
if type(s) != unicode:
try:
s = unicode(s, "utf-8")
except UnicodeDecodeError as e:
sys.stderr.write("Unable to convert %s to a unicode object: %s\n" % (s, e))
return ""
return s
# given a package object, spit out a string reasonable for the list widgets
def listEntryString(po):
desc = po.returnSimple('summary') or ''
pkgStr = "%s-%s-%s.%s" % (po.name, po.version, po.release, po.arch)
desc = "<b>%s</b> - %s" %(pkgStr, sanitizeString(desc))
return desc
GLADE_FILE = "GroupSelector.glade"
def _getgladefile(fn):
if os.path.exists(fn):
return fn
elif os.path.exists("data/%s" %(fn,)):
return "data/%s" %(fn,)
else:
return "/usr/share/pirut/ui/%s" %(fn,)
t = gettext.translation(I18N_DOMAIN, "/usr/share/locale", fallback = True)
_ = t.lgettext
def _deselectPackage(ayum, group, pkg):
grpid = group.groupid
try:
pkgs = ayum.pkgSack.returnNewestByName(pkg)
except mdErrors.PackageSackError:
log = logging.getLogger("yum.verbose")
log.debug("no such package %s from group %s" % (pkg, grpid))
if pkgs:
pkgs = ayum.bestPackagesFromList(pkgs)
for po in pkgs:
txmbrs = ayum.tsInfo.getMembers(pkgtup = po.pkgtup)
for txmbr in txmbrs:
try:
txmbr.groups.remove(grpid)
except ValueError:
log = logging.getLogger("yum.verbose")
log.debug("package %s was not marked in group %s" %(po, grpid))
if len(txmbr.groups) == 0:
ayum.tsInfo.remove(po.pkgtup)
def _selectPackage(ayum, group, pkg):
grpid = group.groupid
try:
txmbrs = ayum.install(name = pkg)
except yum.Errors.InstallError as e:
log = logging.getLogger("yum.verbose")
log.info("No package named %s available to be installed: %s" %(pkg, e))
else:
map(lambda x: x.groups.append(grpid), txmbrs)
def _catHasGroupWithPackages(cat, ayum):
grps = map(lambda x: ayum.comps.return_group(x),
filter(lambda x: ayum.comps.has_group(x), cat.groups))
for g in grps:
if ayum._groupHasPackages(g):
return True
return False
class OptionalPackageSelector:
def __init__(self, yumobj, group, parent = None, getgladefunc = None):
self.ayum = yumobj
self.group = group
if getgladefunc:
xmlfn = getgladefunc(GLADE_FILE)
else:
xmlfn = _getgladefile(GLADE_FILE)
self.xml = gtk.glade.XML(xmlfn, "groupDetailsDialog",
domain=I18N_DOMAIN)
self.window = self.xml.get_widget("groupDetailsDialog")
if parent:
self.window.set_transient_for(parent)
self.window.set_title(_("Packages in %s") %
xmltrans(group.name, group.translated_name))
self.window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.window.set_size_request(600, 400)
self._createStore()
self._populate()
def __search_pkgs(self, model, col, key, i):
val = model.get_value(i, 2).returnSimple('name')
if val.lower().startswith(key.lower()):
return False
return True
def _createStore(self):
self.pkgstore = gtk.ListStore(gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT)
tree = self.xml.get_widget("packageList")
tree.set_model(self.pkgstore)
column = gtk.TreeViewColumn(None, None)
cbr = gtk.CellRendererToggle()
cbr.connect ("toggled", self._pkgToggled)
column.pack_start(cbr, False)
column.add_attribute(cbr, 'active', 0)
tree.append_column(column)
column = gtk.TreeViewColumn(None, None)
renderer = gtk.CellRendererText()
column.pack_start(renderer, True)
column.add_attribute(renderer, 'markup', 1)
tree.append_column(column)
tree.set_search_equal_func(self.__search_pkgs)
tree.connect("row-activated", self._rowToggle)
self.pkgstore.set_sort_column_id(1, gtk.SORT_ASCENDING)
def _rowToggle(self, tree, path, col):
self._pkgToggled(None, path)
def _pkgToggled(self, widget, path):
if type(path) == type(str):
i = self.pkgstore.get_iter_from_string(path)
else:
i = self.pkgstore.get_iter(path)
sel = self.pkgstore.get_value(i, 0)
pkg = self.pkgstore.get_value(i, 2).returnSimple('name')
if sel and not self.ayum.simpleDBInstalled(name = pkg):
_deselectPackage(self.ayum, self.group, pkg)
elif sel:
self.ayum.remove(name = pkg)
elif self.ayum.simpleDBInstalled(name = pkg):
txmbrs = self.ayum.tsInfo.matchNaevr(name = pkg)
for tx in txmbrs:
if tx.output_state == TS_ERASE:
self.ayum.tsInfo.remove(tx.pkgtup)
else:
_selectPackage(self.ayum, self.group, pkg)
self.pkgstore.set_value(i, 0, not sel)
def __getPackageObject(self, pkgname):
try:
pkgs = self.ayum.pkgSack.returnNewestByName(pkgname)
except yum.Errors.YumBaseError:
return None
if not pkgs:
return None
pkgs = self.ayum.bestPackagesFromList(pkgs)
if not pkgs:
return None
return pkgs[0]
def _populate(self):
pkgs = self.group.default_packages.keys() + \
self.group.optional_packages.keys()
for pkg in pkgs:
po = self.__getPackageObject(pkg)
if not po:
continue
# Don't display obsolete packages in the UI
if self.ayum.up.checkForObsolete([po.pkgtup]).has_key(po.pkgtup):
continue
self.pkgstore.append([self.ayum.isPackageInstalled(pkg), listEntryString(po), po])
def run(self):
self.window.show_all()
return self.window.run()
def destroy(self):
return self.window.destroy()
# the GroupSelector requires a YumBase object which also implements the
# following additional methods:
# * isPackageInstalled(p): is there a package named p installed or selected
# * isGroupInstalled(grp): is there a group grp installed or selected
class GroupSelector:
def __init__(self, yumobj, getgladefunc = None, framefunc = None):
self.ayum = yumobj
self.getgladefunc = getgladefunc
self.framefunc = framefunc
if getgladefunc:
xmlfn = getgladefunc(GLADE_FILE)
else:
xmlfn = _getgladefile(GLADE_FILE)
self.xml = gtk.glade.XML(xmlfn, "groupSelectionBox",
domain=I18N_DOMAIN)
self.vbox = self.xml.get_widget("groupSelectionBox")
self.xml.get_widget("detailsButton").set_sensitive(False)
self.menuxml = gtk.glade.XML(xmlfn, "groupPopupMenu",
domain=I18N_DOMAIN)
self.groupMenu = self.menuxml.get_widget("groupPopupMenu")
self._connectSignals()
self._createStores()
self.vbox.show()
def _connectSignals(self):
sigs = { "on_detailsButton_clicked": self._optionalPackagesDialog,
"on_groupList_button_press": self._groupListButtonPress,
"on_groupList_popup_menu": self._groupListPopup, }
self.xml.signal_autoconnect(sigs)
menusigs = { "on_select_activate": self._selectAllPackages,
"on_selectgrp_activate": self._groupSelect,
"on_deselectgrp_activate": self._groupDeselect,
"on_deselect_activate": self._deselectAllPackages }
self.menuxml.signal_autoconnect(menusigs)
def _createStores(self):
self._createCategoryStore()
self._createGroupStore()
b = gtk.TextBuffer()
self.xml.get_widget("groupDescriptionTextView").set_buffer(b)
def _createCategoryStore(self):
# display string, category object
self.catstore = gtk.TreeStore(gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT)
tree = self.xml.get_widget("categoryList")
tree.set_model(self.catstore)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Text', renderer, markup=0)
column.set_clickable(False)
tree.append_column(column)
tree.columns_autosize()
tree.set_enable_search(False)
selection = tree.get_selection()
selection.connect("changed", self._categorySelected)
def _createGroupStore(self):
# checkbox, display string, object
self.groupstore = gtk.TreeStore(gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING,
gobject.TYPE_PYOBJECT,
gobject.TYPE_OBJECT)
tree = self.xml.get_widget("groupList")
tree.set_model(self.groupstore)
column = gtk.TreeViewColumn(None, None)
column.set_clickable(True)
pixr = gtk.CellRendererPixbuf()
pixr.set_property('stock-size', 1)
column.pack_start(pixr, False)
column.add_attribute(pixr, 'pixbuf', 3)
cbr = gtk.CellRendererToggle()
column.pack_start(cbr, False)
column.add_attribute(cbr, 'active', 0)
cbr.connect ("toggled", self._groupToggled)
tree.append_column(column)
renderer = gtk.CellRendererText()
column = gtk.TreeViewColumn('Text', renderer, markup=1)
column.set_clickable(False)
tree.append_column(column)
tree.columns_autosize()
tree.set_enable_search(False)
tree.grab_focus()
selection = tree.get_selection()
selection.connect("changed", self._groupSelected)
selection.set_mode(gtk.SELECTION_MULTIPLE)
def _get_pix(self, fn):
imgsize = 24
pix = gtk.gdk.pixbuf_new_from_file(fn)
if pix.get_height() != imgsize or pix.get_width() != imgsize:
pix = pix.scale_simple(imgsize, imgsize,
gtk.gdk.INTERP_BILINEAR)
return pix
def _categorySelected(self, selection):
self.groupstore.clear()
(model, i) = selection.get_selected()
if not i:
return
cat = model.get_value(i, 1)
# fall back to the category pixbuf
fbpix = None
fn = "/usr/share/pixmaps/comps/%s.png" %(cat.categoryid,)
if os.access(fn, os.R_OK):
fbpix = self._get_pix(fn)
self._populateGroups(cat.groups, fbpix)
def _populateGroups(self, groups, defaultpix = None):
grps = map(lambda x: self.ayum.comps.return_group(x),
filter(lambda x: self.ayum.comps.has_group(x), groups))
grps.sort(ui_comps_sort)
for grp in grps:
if not self.ayum._groupHasPackages(grp):
continue
s = "<span size=\"large\" weight=\"bold\">%s</span>" % xmltrans(grp.name, grp.translated_name)
fn = "/usr/share/pixmaps/comps/%s.png" % grp.groupid
if os.access(fn, os.R_OK):
pix = self._get_pix(fn)
elif defaultpix:
pix = defaultpix
else:
pix = None
self.groupstore.append(None,
[self.ayum.isGroupInstalled(grp),s,grp,pix])
tree = self.xml.get_widget("groupList")
gobject.idle_add(lambda x: x.flags() & gtk.REALIZED and x.scroll_to_point(0, 0), tree)
self.xml.get_widget("optionalLabel").set_text("")
self.xml.get_widget("detailsButton").set_sensitive(False)
# select the first group
i = self.groupstore.get_iter_first()
if i is not None:
sel = self.xml.get_widget("groupList").get_selection()
sel.select_iter(i)
def _groupSelected(self, selection):
if selection.count_selected_rows() != 1:
# if we have more groups (or no group) selected, then
# we can't show a description or allow selecting optional
self.__setGroupDescription(None)
return
(model, paths) = selection.get_selected_rows()
grp = model.get_value(model.get_iter(paths[0]), 2)
self.__setGroupDescription(grp)
def __setGroupDescription(self, grp):
b = self.xml.get_widget("groupDescriptionTextView").get_buffer()
b.set_text("")
if grp is None:
return
if grp.description:
txt = xmltrans(grp.description, grp.translated_description)
else:
txt = xmltrans(grp.name, grp.translated_name)
inst = 0
cnt = 0
pkgs = grp.default_packages.keys() + grp.optional_packages.keys()
for p in pkgs:
if self.ayum.isPackageInstalled(p):
cnt += 1
inst += 1
elif self.ayum.pkgSack.searchNevra(name=p):
cnt += 1
else:
log = logging.getLogger("yum.verbose")
log.debug("no such package %s for %s" %(p, grp.groupid))
b.set_text(txt)
if cnt == 0 or not self.ayum.isGroupInstalled(grp):
self.xml.get_widget("detailsButton").set_sensitive(False)
self.xml.get_widget("optionalLabel").set_text("")
else:
self.xml.get_widget("detailsButton").set_sensitive(True)
txt = _("Optional packages selected: %(inst)d of %(cnt)d") \
% {'inst': inst, 'cnt': cnt}
self.xml.get_widget("optionalLabel").set_markup(_("<i>%s</i>") %(txt,))
def _groupToggled(self, widget, path, sel = None, updateText = True):
if type(path) == type(str):
i = self.groupstore.get_iter_from_string(path)
else:
i = self.groupstore.get_iter(path)
if sel is None:
sel = not self.groupstore.get_value(i, 0)
self.groupstore.set_value(i, 0, sel)
grp = self.groupstore.get_value(i, 2)
self.vbox.window.set_cursor(gdk.Cursor(gdk.WATCH))
if sel:
self.ayum.selectGroup(grp.groupid)
else:
self.ayum.deselectGroup(grp.groupid)
# FIXME: this doesn't mark installed packages for removal.
# we probably want that behavior with s-c-p, but not anaconda
if updateText:
self.__setGroupDescription(grp)
self.vbox.window.set_cursor(None)
def populateCategories(self):
self.catstore.clear()
cats = self.ayum.comps.categories
cats.sort(ui_comps_sort)
for cat in cats:
if not _catHasGroupWithPackages(cat, self.ayum):
continue
s = "<span size=\"large\" weight=\"bold\">%s</span>" % xmltrans(cat.name, cat.translated_name)
self.catstore.append(None, [s, cat])
# select the first category
i = self.catstore.get_iter_first()
if i is not None:
sel = self.xml.get_widget("categoryList").get_selection()
sel.select_iter(i)
def _setupCatchallCategory(self):
# FIXME: this is a bad hack, but catch groups which aren't in
# a category yet are supposed to be user-visible somehow.
# conceivably should be handled by yum
grps = {}
for g in self.ayum.comps.groups:
if g.user_visible and self.ayum._groupHasPackages(g):
grps[g.groupid] = g
for cat in self.ayum.comps.categories:
for g in cat.groups:
if grps.has_key(g):
del grps[g]
if len(grps.keys()) == 0:
return
c = yum.comps.Category()
c.name = _("Uncategorized")
c._groups = grps
c.categoryid = "uncategorized"
self.ayum.comps._categories[c.categoryid] = c
def doRefresh(self):
if len(self.ayum.comps.categories) == 0:
self.xml.get_widget("categorySW").hide()
self._populateGroups(map(lambda x: x.groupid,
self.ayum.comps.groups))
else:
self._setupCatchallCategory()
self.populateCategories()
def _getSelectedGroup(self):
"""Return the selected group.
NOTE: this only ever returns one group."""
selection = self.xml.get_widget("groupList").get_selection()
(model, paths) = selection.get_selected_rows()
for p in paths:
return model.get_value(model.get_iter(p), 2)
return None
def _optionalPackagesDialog(self, *args):
group = self._getSelectedGroup()
if group is None:
return
pwin = self.vbox.get_parent() # hack to find the parent window...
while not isinstance(pwin, gtk.Window):
pwin = pwin.get_parent()
d = OptionalPackageSelector(self.ayum, group, pwin, self.getgladefunc)
if self.framefunc:
self.framefunc(d.window)
rc = d.run()
d.destroy()
self.__setGroupDescription(group)
def _groupSelect(self, *args):
selection = self.xml.get_widget("groupList").get_selection()
if selection.count_selected_rows() == 0:
return
(model, paths) = selection.get_selected_rows()
for p in paths:
self._groupToggled(model, p, True, updateText=(len(paths) == 1))
def _groupDeselect(self, *args):
selection = self.xml.get_widget("groupList").get_selection()
if selection.count_selected_rows() == 0:
return
(model, paths) = selection.get_selected_rows()
for p in paths:
self._groupToggled(model, p, False, updateText=(len(paths) == 1))
def _selectAllPackages(self, *args):
selection = self.xml.get_widget("groupList").get_selection()
if selection.count_selected_rows() == 0:
return
(model, paths) = selection.get_selected_rows()
self.vbox.window.set_cursor(gdk.Cursor(gdk.WATCH))
for p in paths:
i = model.get_iter(p)
grp = model.get_value(i, 2)
# ensure the group is selected
self.ayum.selectGroup(grp.groupid)
model.set_value(i, 0, True)
for pkg in grp.default_packages.keys() + \
grp.optional_packages.keys():
if self.ayum.isPackageInstalled(pkg):
continue
elif self.ayum.simpleDBInstalled(name = pkg):
txmbrs = self.ayum.tsInfo.matchNaevr(name = pkg)
for tx in txmbrs:
if tx.output_state == TS_ERASE:
self.ayum.tsInfo.remove(tx.pkgtup)
else:
_selectPackage(self.ayum, grp, pkg)
if len(paths) == 1:
self.__setGroupDescription(grp)
self.vbox.window.set_cursor(None)
def _deselectAllPackages(self, *args):
selection = self.xml.get_widget("groupList").get_selection()
if selection.count_selected_rows() == 0:
return
(model, paths) = selection.get_selected_rows()
for p in paths:
i = model.get_iter(p)
grp = model.get_value(i, 2)
for pkg in grp.default_packages.keys() + \
grp.optional_packages.keys():
if not self.ayum.isPackageInstalled(pkg):
continue
elif self.ayum.simpleDBInstalled(name=pkg):
self.ayum.remove(name=pkg)
else:
_deselectPackage(self.ayum, grp, pkg)
if len(paths) == 1:
self.__setGroupDescription(grp)
def __doGroupPopup(self, button, time):
menu = self.groupMenu
menu.popup(None, None, None, button, time)
menu.show_all()
def _groupListButtonPress(self, widget, event):
if event.button == 3:
x = int(event.x)
y = int(event.y)
pthinfo = widget.get_path_at_pos(x, y)
if pthinfo is not None:
sel = widget.get_selection()
if sel.count_selected_rows() == 1:
path, col, cellx, celly = pthinfo
widget.grab_focus()
widget.set_cursor(path, col, 0)
self.__doGroupPopup(event.button, event.time)
return 1
def _groupListPopup(self, widget):
sel = widget.get_selection()
if sel.count_selected_rows() > 0:
self.__doGroupPopup(0, 0)
| gpl-2.0 |
Dunkas12/BeepBoopBot | lib/pip/_vendor/lockfile/linklockfile.py | 536 | 2652 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
| gpl-3.0 |
alexforencich/verilog-wishbone | tb/test_wb_ram.py | 2 | 4270 | #!/usr/bin/env python
"""
Copyright (c) 2015-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import wb
module = 'wb_ram'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
DATA_WIDTH = 32
ADDR_WIDTH = 16
SELECT_WIDTH = 4
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
adr_i = Signal(intbv(0)[ADDR_WIDTH:])
dat_i = Signal(intbv(0)[DATA_WIDTH:])
we_i = Signal(bool(0))
sel_i = Signal(intbv(0)[SELECT_WIDTH:])
stb_i = Signal(bool(0))
cyc_i = Signal(bool(0))
# Outputs
dat_o = Signal(intbv(0)[DATA_WIDTH:])
ack_o = Signal(bool(0))
# WB master
wbm_inst = wb.WBMaster()
wbm_logic = wbm_inst.create_logic(
clk,
adr_o=adr_i,
dat_i=dat_o,
dat_o=dat_i,
we_o=we_i,
sel_o=sel_i,
stb_o=stb_i,
ack_i=ack_o,
cyc_o=cyc_i,
name='master'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
adr_i=adr_i,
dat_i=dat_i,
dat_o=dat_o,
we_i=we_i,
sel_i=sel_i,
stb_i=stb_i,
ack_o=ack_o,
cyc_i=cyc_i
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
print("test 1: read and write")
current_test.next = 1
wbm_inst.init_write(4, b'\x11\x22\x33\x44')
yield wbm_inst.wait()
yield clk.posedge
wbm_inst.init_read(4, 4)
yield wbm_inst.wait()
yield clk.posedge
data = wbm_inst.get_read_data()
assert data[0] == 4
assert data[1] == b'\x11\x22\x33\x44'
yield delay(100)
yield clk.posedge
print("test 2: various reads and writes")
current_test.next = 2
for length in range(1,8):
for offset in range(4):
wbm_inst.init_write(256*(16*offset+length)+offset, b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length])
yield wbm_inst.wait()
yield clk.posedge
for length in range(1,8):
for offset in range(4):
wbm_inst.init_read(256*(16*offset+length)+offset, length)
yield wbm_inst.wait()
yield clk.posedge
data = wbm_inst.get_read_data()
assert data[0] == 256*(16*offset+length)+offset
assert data[1] == b'\x11\x22\x33\x44\x55\x66\x77\x88'[0:length]
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| mit |
b0204888100/chen3 | lib/werkzeug/test.py | 308 | 33874 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_native, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if isinstance(value, string_types):
value = to_native(value, charset)
else:
value = str(value)
write('\r\n\r\n' + value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
#py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ, buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| apache-2.0 |
ryuunosukeyoshi/PartnerPoi-Bot | lib/youtube_dl/extractor/hypem.py | 72 | 1887 | from __future__ import unicode_literals
import json
import time
from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
)
class HypemIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hypem\.com/track/(?P<id>[^/]+)/'
_TEST = {
'url': 'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
'md5': 'b9cc91b5af8995e9f0c1cee04c575828',
'info_dict': {
'id': '1v6ga',
'ext': 'mp3',
'title': 'Tame',
'uploader': 'BODYWORK',
}
}
def _real_extract(self, url):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
html_tracks = self._html_search_regex(
r'(?ms)<script type="application/json" id="displayList-data">(.+?)</script>',
response, 'tracks')
try:
track_list = json.loads(html_tracks)
track = track_list['tracks'][0]
except ValueError:
raise ExtractorError('Hypemachine contained invalid JSON.')
key = track['key']
track_id = track['id']
title = track['song']
request = sanitized_Request(
'http://hypem.com/serve/source/%s/%s' % (track_id, key),
'', {'Content-Type': 'application/json'})
song_data = self._download_json(request, track_id, 'Downloading metadata')
final_url = song_data['url']
artist = track.get('artist')
return {
'id': track_id,
'url': final_url,
'ext': 'mp3',
'title': title,
'uploader': artist,
}
| gpl-3.0 |
shiminasai/ciat_plataforma | analisis/analisis/admin.py | 3 | 12085 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import *
from django.forms import CheckboxSelectMultiple
from .forms import *
from comunicacion.lugar.models import *
from nested_inline.admin import NestedStackedInline, NestedModelAdmin, NestedTabularInline
from django.utils.translation import ugettext_lazy as _
# Register your models here.
class Pregunta_1_Inline(NestedTabularInline):
model = Pregunta_1
can_delete = False
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_1_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_2_Inline(NestedTabularInline):
model = Pregunta_2
extra = 1
max_num = 4
can_delete = True
class Pregunta_3_Inline(NestedTabularInline):
model = Pregunta_3
max_num = 1
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_4_Inline(NestedTabularInline):
model = Pregunta_4
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_5a_Inline(NestedTabularInline):
model = Pregunta_5a
form = Pregunta_5aForm
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_5a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_5c_nestedInline(NestedTabularInline):
model = Pregunta_5c_nested
extra = 1
max_num = 5
fk_name = 'pregunta_5c'
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_5c_Inline(NestedTabularInline):
model = Pregunta_5c
inlines = [Pregunta_5c_nestedInline]
# form = Pregunta_5cForm
max_num = 2
can_delete = False
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5c_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_5d_Inline(NestedTabularInline):
model = Pregunta_5d
# form = Pregunta_5dForm
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5d_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_5e_Inline(NestedTabularInline):
model = Pregunta_5e
# form = Pregunta_5eForm
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_5a.objects.filter(prioritizado='2')
return super(Pregunta_5e_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6a_Inline(NestedTabularInline):
model = Pregunta_6a
form = Pregunta_6aForm
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_6a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_6c_nestedInline(NestedTabularInline):
model = Pregunta_6c_nested
extra = 1
max_num = 5
fk_name = 'pregunta_6c'
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_6c_Inline(NestedTabularInline):
model = Pregunta_6c
inlines = [Pregunta_6c_nestedInline]
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6c_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6d_Inline(NestedTabularInline):
model = Pregunta_6d
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6d_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_6e_Inline(NestedTabularInline):
model = Pregunta_6e
max_num = 2
extra = 2
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'innovacion':
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='1',entrevistado__pk=_identrevista)
else:
kwargs["queryset"] = Pregunta_6a.objects.filter(prioritizado='2')
return super(Pregunta_6e_Inline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class Pregunta_7a_Inline(NestedTabularInline):
model = Pregunta_7a
extra = 1
can_delete = True
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'ubicacion':
kwargs["queryset"] = Municipio.objects.filter(departamento__id__in=[x.id for x in a.departamento.all()])
except Exception, e:
pass
else:
kwargs["queryset"] = Municipio.objects.filter(departamento__id='0')
return super(Pregunta_7a_Inline, self).formfield_for_manytomany(db_field, request, **kwargs)
class Pregunta_7b_Inline(NestedTabularInline):
model = Pregunta_7b
max_num = 1
can_delete = False
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_8_Inline(NestedTabularInline):
model = Pregunta_8
extra = 1
can_delete = True
fields = (('organizacion','territorio1'),('periodo1','profundidad1'),('tema'))
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
class Pregunta_9_Inline(NestedTabularInline):
model = Pregunta_9
extra = 7
max_num = 7
can_delete = False
# fieldsets = [
# (None, {'fields' : ('tema','prioridad','papel')}),
# ('Auto-evaluación de la capacidad de la organización', {'fields' : ('conocimiento','experiencia')}),
# ]
class Pregunta_11_Inline(NestedTabularInline):
model = Pregunta_11
extra = 7
max_num = 7
can_delete = False
class EntrevistaAdmin(NestedModelAdmin):
def queryset(self, request):
if request.user.is_superuser:
return Entrevista.objects.all()
return Entrevista.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
obj.usuario = request.user
obj.save()
exclude = ('usuario',)
formfield_overrides = {
models.ManyToManyField: {'widget': CheckboxSelectMultiple},
}
fieldsets = [
(_('Informacion de la persona entrevistada'), {'fields' : (('nombre','posicion','email','organizacion','pais','departamento','telefono'),('fecha1','alcance1','tipo_estudio',))}),
]
list_display = ('organizacion','nombre','posicion','fecha1','tipo_estudio')
list_display_links = ('organizacion','nombre')
list_filter = ('pais',)
inlines = [Pregunta_1_Inline, Pregunta_2_Inline, Pregunta_3_Inline, Pregunta_4_Inline,
Pregunta_5a_Inline, Pregunta_5c_Inline, Pregunta_5d_Inline, Pregunta_5e_Inline,
Pregunta_6a_Inline, Pregunta_6c_Inline,Pregunta_6d_Inline,Pregunta_6e_Inline,
Pregunta_7a_Inline,Pregunta_7b_Inline,Pregunta_8_Inline,Pregunta_9_Inline,Pregunta_11_Inline]
class Media:
js = ('analisis/js/custom.js',)
css = {
'all': ('analisis/css/admin.css',)
}
# def formfield_for_manytomany(self, db_field, request, **kwargs):
# urlactual=request.get_full_path()
# urlactual=urlactual.split('/')
# if urlactual[4]=='add':
# kwargs["queryset"] = Departamento.objects.filter(pais='0')
# return super(EntrevistaAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
urlactual=request.get_full_path()
urlactual=urlactual.split('/')
if urlactual[4]!='add':
_identrevista=int(urlactual[4])
try:
a = Entrevista.objects.get(id=_identrevista)
if db_field.name == 'departamento':
kwargs["queryset"] = Departamento.objects.filter(pais=a.pais)
except Exception, e:
pass
return super(EntrevistaAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
# else:
# kwargs["queryset"] = Departamento.objects.filter(pais='0')
admin.site.register(Entrevista,EntrevistaAdmin)
| mit |
thawatchai/mrkimontour | appengine-django/lib/django/core/management/templates.py | 274 | 13263 | import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
from os import path
import django
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive, six
from django.utils.six.moves.urllib.request import urlretrieve
from django.utils.version import get_docs_version
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template',
help='The path or URL to load the template from.')
parser.add_argument('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.')
parser.add_argument('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
'unicode_literals': '' if six.PY3 else 'from __future__ import unicode_literals\n\n',
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Engine().from_string(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| gpl-2.0 |
bcgrendel/Speechcoder | speechcoder_ext/mod_env_var.py | 1 | 1259 | # Original source code found here: http://code.activestate.com/recipes/416087/history/4/
# Modified to change/add a specific env var instead of being specified via a command line argument.
try:
from _winreg import *
import os, sys, win32gui, win32con
except:
print "[ERROR] You need to install python win32 libraries first!\n\nTry looking here:\nhttps://pypi.python.org/pypi/pywin32";
exit();
def queryValue(key, name):
value, type_id = QueryValueEx(key, name)
return value
def show(key):
for i in range(1024):
try:
n,v,t = EnumValue(key, i)
print '%s=%s' % (n, v)
except EnvironmentError:
break
def main():
try:
path = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'
reg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
key = OpenKey(reg, path, 0, KEY_ALL_ACCESS)
name = "NPP_DRAGON";
value = os.getcwd();
#name, value = sys.argv[1].split('=')
if name.upper() == 'PATH':
value = queryValue(key, name) + ';' + value
if value:
SetValueEx(key, name, 0, REG_EXPAND_SZ, value)
else:
DeleteValue(key, name)
win32gui.SendMessage(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment')
except Exception, e:
print e
CloseKey(key)
CloseKey(reg)
if __name__=='__main__':
main()
| mit |
pilou-/ansible | lib/ansible/modules/network/fortios/fortios_authentication_scheme.py | 24 | 11368 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_authentication_scheme
short_description: Configure Authentication Schemes in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure authentication feature and scheme category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
authentication_scheme:
description:
- Configure Authentication Schemes.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
domain-controller:
description:
- Domain controller setting. Source user.domain-controller.name.
fsso-agent-for-ntlm:
description:
- FSSO agent to use for NTLM authentication. Source user.fsso.name.
fsso-guest:
description:
- Enable/disable user fsso-guest authentication (default = disable).
choices:
- enable
- disable
kerberos-keytab:
description:
- Kerberos keytab setting. Source user.krb-keytab.name.
method:
description:
- Authentication methods (default = basic).
choices:
- ntlm
- basic
- digest
- form
- negotiate
- fsso
- rsso
- ssh-publickey
name:
description:
- Authentication scheme name.
required: true
negotiate-ntlm:
description:
- Enable/disable negotiate authentication for NTLM (default = disable).
choices:
- enable
- disable
require-tfa:
description:
- Enable/disable two-factor authentication (default = disable).
choices:
- enable
- disable
ssh-ca:
description:
- SSH CA name. Source firewall.ssh.local-ca.name.
user-database:
description:
- Authentication server to contain user information; "local" (default) or "123" (for LDAP).
suboptions:
name:
description:
- Authentication server name. Source system.datasource.name user.radius.name user.tacacs+.name user.ldap.name user.group.name.
required: true
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure Authentication Schemes.
fortios_authentication_scheme:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
authentication_scheme:
state: "present"
domain-controller: "<your_own_value> (source user.domain-controller.name)"
fsso-agent-for-ntlm: "<your_own_value> (source user.fsso.name)"
fsso-guest: "enable"
kerberos-keytab: "<your_own_value> (source user.krb-keytab.name)"
method: "ntlm"
name: "default_name_8"
negotiate-ntlm: "enable"
require-tfa: "enable"
ssh-ca: "<your_own_value> (source firewall.ssh.local-ca.name)"
user-database:
-
name: "default_name_13 (source system.datasource.name user.radius.name user.tacacs+.name user.ldap.name user.group.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_authentication_scheme_data(json):
option_list = ['domain-controller', 'fsso-agent-for-ntlm', 'fsso-guest',
'kerberos-keytab', 'method', 'name',
'negotiate-ntlm', 'require-tfa', 'ssh-ca',
'user-database']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def authentication_scheme(data, fos):
vdom = data['vdom']
authentication_scheme_data = data['authentication_scheme']
filtered_data = filter_authentication_scheme_data(authentication_scheme_data)
if authentication_scheme_data['state'] == "present":
return fos.set('authentication',
'scheme',
data=filtered_data,
vdom=vdom)
elif authentication_scheme_data['state'] == "absent":
return fos.delete('authentication',
'scheme',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_authentication(data, fos):
login(data)
methodlist = ['authentication_scheme']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"authentication_scheme": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"domain-controller": {"required": False, "type": "str"},
"fsso-agent-for-ntlm": {"required": False, "type": "str"},
"fsso-guest": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"kerberos-keytab": {"required": False, "type": "str"},
"method": {"required": False, "type": "str",
"choices": ["ntlm", "basic", "digest",
"form", "negotiate", "fsso",
"rsso", "ssh-publickey"]},
"name": {"required": True, "type": "str"},
"negotiate-ntlm": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"require-tfa": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh-ca": {"required": False, "type": "str"},
"user-database": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_authentication(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
afterlastangel/simple-logistics | src/processor.py | 1 | 5614 | __author__ = 'afterlastangel@gmail.com'
from math import hypot
import googlemaps
import os
from math import radians, cos, sin, asin, sqrt
class Journey:
"""An object to store the journey"""
length = 0
data = []
def __init__(self):
self.length = 0
self.data = []
class Point:
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
class DeliveryRequest:
def __init__(self, start, end):
self.start = start
self.end = end
def find_shortest_journey(journey_array):
shortest_length = float("inf")
shortest_journey = None
for i in range(1, len(journey_array)):
journey = journey_array[i]
if 0 < journey.length < shortest_length :
shortest_length = journey.length
shortest_journey = journey.data[:]
return shortest_journey, shortest_length
def process_data(data, max_length):
size = len(data)
# init journey_array
journey_array = [[Journey() for _ in range(size)] for _ in range(size)]
# init data for first trip
for j in range(1, size):
i = 0
if 0 < data[i][j] <= max_length:
journey_array[i][j].data.append(j)
journey_array[i][j].length = data[i][j]
# loop for next trips
for i in range(1, size-1):
found = False
# loop for number of destinations exclude starter (HQ)
for j in range(1, size):
# check for previous step, find the shortest
shortest_length = float("inf")
shortest_journey_data = None
for k in range(1, size):
if journey_array[i-1][k].data and j not in journey_array[i-1][k].data and data[k][j] > 0:
new_length = journey_array[i-1][k].length + data[k][j]
if new_length <= max_length and new_length < shortest_length:
shortest_journey_data = journey_array[i-1][k].data[:]
shortest_length = new_length
if shortest_length != float("inf"):
journey_array[i][j].data = shortest_journey_data[:]
journey_array[i][j].data.append(j)
journey_array[i][j].length = shortest_length
found = True
if not found:
return find_shortest_journey(journey_array[i-1])
return find_shortest_journey(journey_array[size-2])
def remove_journey(data, journey):
for k in journey:
for i in range(len(data)):
for j in range(len(data[i])):
if i == k or j == k:
data[i][j] = 0
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def duration(point1, point2):
"""
Find duration https://developers.google.com/maps/documentation/distance-matrix/intro
"""
gmaps = googlemaps.Client(key=os.environ.get('SIMPLE_LOGISTICS_KEY'))
result = gmaps.distance_matrix([(point1.latitude,point1.longitude)], [(point2.latitude,point2.longitude)])
if result['status'] == u'OK':
if result['rows'][0]['elements'][0]['status'] == u'OK':
return result['rows'][0]['elements'][0]['duration']['value']
# in case of google maps fail, use estimation and speed 20km/h
return haversine(point1.longitude, point1.latitude, point2.longitude, point2.latitude) * 180
def calculate_request(hq, requests):
data = [[0 for _ in range(len(requests)+1)] for _ in range(len(requests)+1)]
for j in range(1, len(requests)+1):
data[0][j] = duration(hq, requests[j-1].start) \
+ duration(requests[j-1].start, requests[j-1].end)
for i in range(1, len(requests)+1):
for j in range(1, len(requests)+1):
if i != j:
data[i][j] = duration(requests[i-1].end, requests[j-1].start) \
+ duration(requests[j-1].start, requests[j-1].end)
return data
def read_data():
f = open('dataset.txt', 'r')
lineno = 0
requests = []
for line in f:
if lineno == 0:
worker = int(line.split(" ")[0])
max_travel = float(line.split(" ")[1])
elif lineno == 1:
hq = Point(float(line.split(" ")[0]), float(line.split(" ")[1]))
else:
start = Point(float(line.split(" ")[0]), float(line.split(" ")[1]))
end = Point(float(line.split(" ")[2]), float(line.split(" ")[3]))
requests.append(DeliveryRequest(start, end))
lineno = lineno + 1
return worker, max_travel, hq, requests
def main():
"""
Test data
data = [[0, 1, 4, 1],
[0, 0, 1, 3],
[0, 1, 0, 1],
[0, 1, 1, 0]]
"""
worker, max_travel, hq, requests = read_data()
data = calculate_request(hq, requests)
"""
data = [[0, 5, 7, 12, 6],
[0, 0, 5, 7, 3],
[0, 7, 0, 5, 4],
[0, 4, 5, 0, 4],
[0, 8, 5, 8, 4]]
max_travel = 100
"""
for i in range(worker):
journey, length = process_data(data, max_travel)
if journey:
print journey, length
remove_journey(data, journey)
else:
break
if __name__ == "__main__":
main()
| gpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/google/appengine/ext/db/__init__.py | 3 | 124053 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple, schema-based database abstraction layer for the datastore.
Modeled after Django's abstraction layer on top of SQL databases,
http://www.djangoproject.com/documentation/mode_api/. Ours is a little simpler
and a lot less code because the datastore is so much simpler than SQL
databases.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want to
publish a story with title, body, and created date, you would do it like this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
You can create a new Story in the datastore with this usage pattern:
story = Story(title='My title')
story.body = 'My body'
story.put()
You query for Story entities using built in query interfaces that map directly
to the syntax and semantics of the datastore:
stories = Story.all().filter('date >=', yesterday).order('-date')
for story in stories:
print story.title
The Property declarations enforce types by performing validation on assignment.
For example, the DateTimeProperty enforces that you assign valid datetime
objects, and if you supply the "required" option for a property, you will not
be able to assign None to that property.
We also support references between models, so if a story has comments, you
would represent it like this:
class Comment(db.Model):
story = db.ReferenceProperty(Story)
body = db.TextProperty()
When you get a story out of the datastore, the story reference is resolved
automatically the first time it is referenced, which makes it easy to use
model instances without performing additional queries by hand:
comment = Comment.get(key)
print comment.story.title
Likewise, you can access the set of comments that refer to each story through
this property through a reverse reference called comment_set, which is a Query
preconfigured to return all matching comments:
story = Story.get(key)
for comment in story.comment_set:
print comment.body
"""
import copy
import datetime
import logging
import re
import time
import urlparse
import warnings
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.datastore import datastore_rpc
from google.appengine.datastore import datastore_query
Error = datastore_errors.Error
BadValueError = datastore_errors.BadValueError
BadPropertyError = datastore_errors.BadPropertyError
BadRequestError = datastore_errors.BadRequestError
EntityNotFoundError = datastore_errors.EntityNotFoundError
BadArgumentError = datastore_errors.BadArgumentError
QueryNotFoundError = datastore_errors.QueryNotFoundError
TransactionNotFoundError = datastore_errors.TransactionNotFoundError
Rollback = datastore_errors.Rollback
TransactionFailedError = datastore_errors.TransactionFailedError
BadFilterError = datastore_errors.BadFilterError
BadQueryError = datastore_errors.BadQueryError
BadKeyError = datastore_errors.BadKeyError
InternalError = datastore_errors.InternalError
NeedIndexError = datastore_errors.NeedIndexError
ReferencePropertyResolveError = datastore_errors.ReferencePropertyResolveError
Timeout = datastore_errors.Timeout
CommittedButStillApplying = datastore_errors.CommittedButStillApplying
ValidationError = BadValueError
Key = datastore_types.Key
Category = datastore_types.Category
Link = datastore_types.Link
Email = datastore_types.Email
GeoPt = datastore_types.GeoPt
IM = datastore_types.IM
PhoneNumber = datastore_types.PhoneNumber
PostalAddress = datastore_types.PostalAddress
Rating = datastore_types.Rating
Text = datastore_types.Text
Blob = datastore_types.Blob
ByteString = datastore_types.ByteString
BlobKey = datastore_types.BlobKey
READ_CAPABILITY = datastore.READ_CAPABILITY
WRITE_CAPABILITY = datastore.WRITE_CAPABILITY
STRONG_CONSISTENCY = datastore.STRONG_CONSISTENCY
EVENTUAL_CONSISTENCY = datastore.EVENTUAL_CONSISTENCY
NESTED = datastore_rpc.TransactionOptions.NESTED
MANDATORY = datastore_rpc.TransactionOptions.MANDATORY
ALLOWED = datastore_rpc.TransactionOptions.ALLOWED
INDEPENDENT = datastore_rpc.TransactionOptions.INDEPENDENT
KEY_RANGE_EMPTY = "Empty"
"""Indicates the given key range is empty and the datastore's
automatic ID allocator will not assign keys in this range to new
entities.
"""
KEY_RANGE_CONTENTION = "Contention"
"""Indicates the given key range is empty but the datastore's
automatic ID allocator may assign new entities keys in this range.
However it is safe to manually assign keys in this range
if either of the following is true:
- No other request will insert entities with the same kind and parent
as the given key range until all entities with manually assigned
keys from this range have been written.
- Overwriting entities written by other requests with the same kind
and parent as the given key range is acceptable.
The datastore's automatic ID allocator will not assign a key to a new
entity that will overwrite an existing entity, so once the range is
populated there will no longer be any contention.
"""
KEY_RANGE_COLLISION = "Collision"
"""Indicates that entities with keys inside the given key range
already exist and writing to this range will overwrite those entities.
Additionally the implications of KEY_RANGE_COLLISION apply. If
overwriting entities that exist in this range is acceptable it is safe
to use the given range.
The datastore's automatic ID allocator will never assign a key to
a new entity that will overwrite an existing entity so entities
written by the user to this range will never be overwritten by
an entity with an automatically assigned key.
"""
_kind_map = {}
_SELF_REFERENCE = object()
_RESERVED_WORDS = set(['key_name'])
class NotSavedError(Error):
"""Raised when a saved-object action is performed on a non-saved object."""
class KindError(BadValueError):
"""Raised when an entity is used with incorrect Model."""
class PropertyError(Error):
"""Raised when non-existent property is referenced."""
class DuplicatePropertyError(Error):
"""Raised when a property is duplicated in a model definition."""
class ConfigurationError(Error):
"""Raised when a property or model is improperly configured."""
class ReservedWordError(Error):
"""Raised when a property is defined for a reserved word."""
class DerivedPropertyError(Error):
"""Raised when attempting to assign a value to a derived property."""
_ALLOWED_PROPERTY_TYPES = set([
basestring,
str,
unicode,
bool,
int,
long,
float,
Key,
datetime.datetime,
datetime.date,
datetime.time,
Blob,
datastore_types.EmbeddedEntity,
ByteString,
Text,
users.User,
Category,
Link,
Email,
GeoPt,
IM,
PhoneNumber,
PostalAddress,
Rating,
BlobKey,
])
_ALLOWED_EXPANDO_PROPERTY_TYPES = set(_ALLOWED_PROPERTY_TYPES)
_ALLOWED_EXPANDO_PROPERTY_TYPES.update((list, tuple, type(None)))
_OPERATORS = ['<', '<=', '>', '>=', '=', '==', '!=', 'in']
_FILTER_REGEX = re.compile(
'^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(_OPERATORS),
re.IGNORECASE | re.UNICODE)
def class_for_kind(kind):
"""Return base-class responsible for implementing kind.
Necessary to recover the class responsible for implementing provided
kind.
Args:
kind: Entity kind string.
Returns:
Class implementation for kind.
Raises:
KindError when there is no implementation for kind.
"""
try:
return _kind_map[kind]
except KeyError:
raise KindError('No implementation for kind \'%s\'' % kind)
def check_reserved_word(attr_name):
"""Raise an exception if attribute name is a reserved word.
Args:
attr_name: Name to check to see if it is a reserved word.
Raises:
ReservedWordError when attr_name is determined to be a reserved word.
"""
if datastore_types.RESERVED_PROPERTY_NAME.match(attr_name):
raise ReservedWordError(
"Cannot define property. All names both beginning and "
"ending with '__' are reserved.")
if attr_name in _RESERVED_WORDS or attr_name in dir(Model):
raise ReservedWordError(
"Cannot define property using reserved word '%(attr_name)s'. "
"If you would like to use this name in the datastore consider "
"using a different name like %(attr_name)s_ and adding "
"name='%(attr_name)s' to the parameter list of the property "
"definition." % locals())
def query_descendants(model_instance):
"""Returns a query for all the descendants of a model instance.
Args:
model_instance: Model instance to find the descendants of.
Returns:
Query that will retrieve all entities that have the given model instance
as an ancestor. Unlike normal ancestor queries, this does not include the
ancestor itself.
"""
result = Query().ancestor(model_instance)
result.filter(datastore_types.KEY_SPECIAL_PROPERTY + ' >',
model_instance.key())
return result
def model_to_protobuf(model_instance, _entity_class=datastore.Entity):
"""Encodes a model instance as a protocol buffer.
Args:
model_instance: Model instance to encode.
Returns:
entity_pb.EntityProto representation of the model instance
"""
return model_instance._populate_entity(_entity_class).ToPb()
def model_from_protobuf(pb, _entity_class=datastore.Entity):
"""Decodes a model instance from a protocol buffer.
Args:
pb: The protocol buffer representation of the model instance. Can be an
entity_pb.EntityProto or str encoding of an entity_bp.EntityProto
Returns:
Model instance resulting from decoding the protocol buffer
"""
entity = _entity_class.FromPb(pb, default_kind=Expando.kind())
return class_for_kind(entity.kind()).from_entity(entity)
def model_is_projection(model_instance):
"""Returns true if the given db.Model instance only contains a projection of
the full entity.
"""
return model_instance._entity and model_instance._entity.is_projection()
def _initialize_properties(model_class, name, bases, dct):
"""Initialize Property attributes for Model-class.
Args:
model_class: Model class to initialize properties for.
"""
model_class._properties = {}
property_source = {}
def get_attr_source(name, cls):
for src_cls in cls.mro():
if name in src_cls.__dict__:
return src_cls
defined = set()
for base in bases:
if hasattr(base, '_properties'):
property_keys = set(base._properties.keys())
duplicate_property_keys = defined & property_keys
for dupe_prop_name in duplicate_property_keys:
old_source = property_source[dupe_prop_name] = get_attr_source(
dupe_prop_name, property_source[dupe_prop_name])
new_source = get_attr_source(dupe_prop_name, base)
if old_source != new_source:
raise DuplicatePropertyError(
'Duplicate property, %s, is inherited from both %s and %s.' %
(dupe_prop_name, old_source.__name__, new_source.__name__))
property_keys -= duplicate_property_keys
if property_keys:
defined |= property_keys
property_source.update(dict.fromkeys(property_keys, base))
model_class._properties.update(base._properties)
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Property):
check_reserved_word(attr_name)
if attr_name in defined:
raise DuplicatePropertyError('Duplicate property: %s' % attr_name)
defined.add(attr_name)
model_class._properties[attr_name] = attr
attr.__property_config__(model_class, attr_name)
model_class._all_properties = frozenset(
prop.name for name, prop in model_class._properties.items())
model_class._unindexed_properties = frozenset(
prop.name for name, prop in model_class._properties.items()
if not prop.indexed)
def _coerce_to_key(value):
"""Returns the value's key.
Args:
value: a Model or Key instance or string encoded key or None
Returns:
The corresponding key, or None if value is None.
"""
if value is None:
return None
value, multiple = datastore.NormalizeAndTypeCheck(
value, (Model, Key, basestring))
if len(value) > 1:
raise datastore_errors.BadArgumentError('Expected only one model or key')
value = value[0]
if isinstance(value, Model):
return value.key()
elif isinstance(value, basestring):
return Key(value)
else:
return value
class PropertiedClass(type):
"""Meta-class for initializing Model classes properties.
Used for initializing Properties defined in the context of a model.
By using a meta-class much of the configuration of a Property
descriptor becomes implicit. By using this meta-class, descriptors
that are of class Model are notified about which class they
belong to and what attribute they are associated with and can
do appropriate initialization via __property_config__.
Duplicate properties are not permitted.
"""
def __init__(cls, name, bases, dct, map_kind=True):
"""Initializes a class that might have property definitions.
This method is called when a class is created with the PropertiedClass
meta-class.
Loads all properties for this model and its base classes in to a dictionary
for easy reflection via the 'properties' method.
Configures each property defined in the new class.
Duplicate properties, either defined in the new class or defined separately
in two base classes are not permitted.
Properties may not assigned to names which are in the list of
_RESERVED_WORDS. It is still possible to store a property using a reserved
word in the datastore by using the 'name' keyword argument to the Property
constructor.
Args:
cls: Class being initialized.
name: Name of new class.
bases: Base classes of new class.
dct: Dictionary of new definitions for class.
Raises:
DuplicatePropertyError when a property is duplicated either in the new
class or separately in two base classes.
ReservedWordError when a property is given a name that is in the list of
reserved words, attributes of Model and names of the form '__.*__'.
"""
super(PropertiedClass, cls).__init__(name, bases, dct)
_initialize_properties(cls, name, bases, dct)
if map_kind:
_kind_map[cls.kind()] = cls
AUTO_UPDATE_UNCHANGED = object()
class Property(object):
"""A Property is an attribute of a Model.
It defines the type of the attribute, which determines how it is stored
in the datastore and how the property values are validated. Different property
types support different options, which change validation rules, default
values, etc. The simplest example of a property is a StringProperty:
class Story(db.Model):
title = db.StringProperty()
"""
creation_counter = 0
def __init__(self,
verbose_name=None,
name=None,
default=None,
required=False,
validator=None,
choices=None,
indexed=True):
"""Initializes this Property with the given options.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
default: Default value for property if none is assigned.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
indexed: Whether property is indexed.
"""
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validator = validator
self.choices = choices
self.indexed = indexed
self.creation_counter = Property.creation_counter
Property.creation_counter += 1
def __property_config__(self, model_class, property_name):
"""Configure property, connecting it to its model.
Configure the property so that it knows its property name and what class
it belongs to.
Args:
model_class: Model class which Property will belong to.
property_name: Name of property within Model instance to store property
values in. By default this will be the property name preceded by
an underscore, but may change for different subclasses.
"""
self.model_class = model_class
if self.name is None:
self.name = property_name
def __get__(self, model_instance, model_class):
"""Returns the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean."""
if model_instance is None:
return self
try:
return getattr(model_instance, self._attr_name())
except AttributeError:
return None
def __set__(self, model_instance, value):
"""Sets the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean.
"""
value = self.validate(value)
setattr(model_instance, self._attr_name(), value)
def default_value(self):
"""Default value for unassigned values.
Returns:
Default value as provided by __init__(default).
"""
return self.default
def validate(self, value):
"""Assert that provided value is compatible with this property.
Args:
value: Value to validate against this Property.
Returns:
A valid value, either the input unchanged or adapted to the
required type.
Raises:
BadValueError if the value is not appropriate for this
property in any way.
"""
if self.empty(value):
if self.required:
raise BadValueError('Property %s is required' % self.name)
else:
if self.choices:
if value not in self.choices:
raise BadValueError('Property %s is %r; must be one of %r' %
(self.name, value, self.choices))
if self.validator is not None:
self.validator(value)
return value
def empty(self, value):
"""Determine if value is empty in the context of this property.
For most kinds, this is equivalent to "not value", but for kinds like
bool, the test is more subtle, so subclasses can override this method
if necessary.
Args:
value: Value to validate against this Property.
Returns:
True if this value is considered empty in the context of this Property
type, otherwise False.
"""
return not value
def get_value_for_datastore(self, model_instance):
"""Datastore representation of this property.
Looks for this property in the given model instance, and returns the proper
datastore representation of the value that can be stored in a datastore
entity. Most critically, it will fetch the datastore key value for
reference properties.
Some properies (e.g. DateTimeProperty, UserProperty) optionally update their
value on every put(). This call must return the current value for such
properties (get_updated_value_for_datastore returns the new value).
Args:
model_instance: Instance to fetch datastore value from.
Returns:
Datastore representation of the model value in a form that is
appropriate for storing in the datastore.
"""
return self.__get__(model_instance, model_instance.__class__)
def get_updated_value_for_datastore(self, model_instance):
"""Determine new value for auto-updated property.
Some properies (e.g. DateTimeProperty, UserProperty) optionally update their
value on every put(). This call must return the new desired value for such
properties. For all other properties, this call must return
AUTO_UPDATE_UNCHANGED.
Args:
model_instance: Instance to get new value for.
Returns:
Datastore representation of the new model value in a form that is
appropriate for storing in the datastore, or AUTO_UPDATE_UNCHANGED.
"""
return AUTO_UPDATE_UNCHANGED
def make_value_from_datastore_index_value(self, index_value):
value = datastore_types.RestoreFromIndexValue(index_value, self.data_type)
return self.make_value_from_datastore(value)
def make_value_from_datastore(self, value):
"""Native representation of this property.
Given a value retrieved from a datastore entity, return a value,
possibly converted, to be stored on the model instance. Usually
this returns the value unchanged, but a property class may
override this when it uses a different datatype on the model
instance than on the entity.
This API is not quite symmetric with get_value_for_datastore(),
because the model instance on which to store the converted value
may not exist yet -- we may be collecting values to be passed to a
model constructor.
Args:
value: value retrieved from the datastore entity.
Returns:
The value converted for use as a model instance attribute.
"""
return value
def _require_parameter(self, kwds, parameter, value):
"""Sets kwds[parameter] to value.
If kwds[parameter] exists and is not value, raises ConfigurationError.
Args:
kwds: The parameter dict, which maps parameter names (strings) to values.
parameter: The name of the parameter to set.
value: The value to set it to.
"""
if parameter in kwds and kwds[parameter] != value:
raise ConfigurationError('%s must be %s.' % (parameter, value))
kwds[parameter] = value
def _attr_name(self):
"""Attribute name we use for this property in model instances.
DO NOT USE THIS METHOD.
"""
return '_' + self.name
data_type = str
def datastore_type(self):
"""Deprecated backwards-compatible accessor method for self.data_type."""
return self.data_type
class Index(datastore._BaseIndex):
"""A datastore index."""
id = datastore._BaseIndex._Id
kind = datastore._BaseIndex._Kind
has_ancestor = datastore._BaseIndex._HasAncestor
properties = datastore._BaseIndex._Properties
class Model(object):
"""Model is the superclass of all object entities in the datastore.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want
to publish a story with title, body, and created date, you would do it like
this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
A model instance can have a single parent. Model instances without any
parent are root entities. It is possible to efficiently query for
instances by their shared parent. All descendents of a single root
instance also behave as a transaction group. This means that when you
work one member of the group within a transaction all descendents of that
root join the transaction. All operations within a transaction on this
group are ACID.
"""
__metaclass__ = PropertiedClass
def __new__(*args, **unused_kwds):
"""Allow subclasses to call __new__() with arguments.
Do NOT list 'cls' as the first argument, or in the case when
the 'unused_kwds' dictionary contains the key 'cls', the function
will complain about multiple argument values for 'cls'.
Raises:
TypeError if there are no positional arguments.
"""
if args:
cls = args[0]
else:
raise TypeError('object.__new__(): not enough arguments')
return super(Model, cls).__new__(cls)
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
"""Creates a new instance of this model.
To create a new entity, you instantiate a model and then call put(),
which saves the entity to the datastore:
person = Person()
person.name = 'Bret'
person.put()
You can initialize properties in the model in the constructor with keyword
arguments:
person = Person(name='Bret')
We initialize all other properties to the default value (as defined by the
properties in the model definition) if they are not provided in the
constructor.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_from_entity: Intentionally undocumented.
kwds: Keyword arguments mapping to properties of model. Also:
key: Key instance for this instance, if provided makes parent and
key_name redundant (they do not need to be set but if they are
they must match the key).
"""
namespace = None
if isinstance(_app, tuple):
if len(_app) != 2:
raise BadArgumentError('_app must have 2 values if type is tuple.')
_app, namespace = _app
key = kwds.get('key', None)
if key is not None:
if isinstance(key, (tuple, list)):
key = Key.from_path(*key)
if isinstance(key, basestring):
key = Key(encoded=key)
if not isinstance(key, Key):
raise TypeError('Expected Key type; received %s (is %s)' %
(key, key.__class__.__name__))
if not key.has_id_or_name():
raise BadKeyError('Key must have an id or name')
if key.kind() != self.kind():
raise BadKeyError('Expected Key kind to be %s; received %s' %
(self.kind(), key.kind()))
if _app is not None and key.app() != _app:
raise BadKeyError('Expected Key app to be %s; received %s' %
(_app, key.app()))
if namespace is not None and key.namespace() != namespace:
raise BadKeyError('Expected Key namespace to be %s; received %s' %
(namespace, key.namespace()))
if key_name and key_name != key.name():
raise BadArgumentError('Cannot use key and key_name at the same time'
' with different values')
if parent and parent != key.parent():
raise BadArgumentError('Cannot use key and parent at the same time'
' with different values')
namespace = key.namespace()
self._key = key
self._key_name = None
self._parent = None
self._parent_key = None
else:
if key_name == '':
raise BadKeyError('Name cannot be empty.')
elif key_name is not None and not isinstance(key_name, basestring):
raise BadKeyError('Name must be string type, not %s' %
key_name.__class__.__name__)
if parent is not None:
if not isinstance(parent, (Model, Key)):
raise TypeError('Expected Model type; received %s (is %s)' %
(parent, parent.__class__.__name__))
if isinstance(parent, Model) and not parent.has_key():
raise BadValueError(
"%s instance must have a complete key before it can be used as a "
"parent." % parent.kind())
if isinstance(parent, Key):
self._parent_key = parent
self._parent = None
else:
self._parent_key = parent.key()
self._parent = parent
else:
self._parent_key = None
self._parent = None
self._key_name = key_name
self._key = None
if self._parent_key is not None:
if namespace is not None and self._parent_key.namespace() != namespace:
raise BadArgumentError(
'Expected parent namespace to be %r; received %r' %
(namespace, self._parent_key.namespace()))
namespace = self._parent_key.namespace()
self._entity = None
if _app is not None and isinstance(_app, Key):
raise BadArgumentError('_app should be a string; received Key(\'%s\'):\n'
' This may be the result of passing \'key\' as '
'a positional parameter in SDK 1.2.6. Please '
'only pass \'key\' as a keyword parameter.' % _app)
if namespace is None:
namespace = namespace_manager.get_namespace()
self._app = _app
self.__namespace = namespace
is_projection = False
if isinstance(_from_entity, datastore.Entity) and _from_entity.is_saved():
self._entity = _from_entity
is_projection = _from_entity.is_projection()
del self._key_name
del self._key
for prop in self.properties().values():
if prop.name in kwds:
value = kwds[prop.name]
elif is_projection:
continue
else:
value = prop.default_value()
try:
prop.__set__(self, value)
except DerivedPropertyError:
if prop.name in kwds and not _from_entity:
raise
def key(self):
"""Unique key for this entity.
This property is only available if this entity is already stored in the
datastore or if it has a full key, so it is available if this entity was
fetched returned from a query, or after put() is called the first time
for new entities, or if a complete key was given when constructed.
Returns:
Datastore key of persisted entity.
Raises:
NotSavedError when entity is not persistent.
"""
if self.is_saved():
return self._entity.key()
elif self._key:
return self._key
elif self._key_name:
parent = self._parent_key or (self._parent and self._parent.key())
self._key = Key.from_path(self.kind(), self._key_name, parent=parent,
_app=self._app, namespace=self.__namespace)
return self._key
else:
raise NotSavedError()
def __set_property(self, entity, name, datastore_value):
if datastore_value == []:
entity.pop(name, None)
else:
entity[name] = datastore_value
def _to_entity(self, entity):
"""Copies information from this model to provided entity.
Args:
entity: Entity to save information on.
"""
for prop in self.properties().values():
self.__set_property(entity, prop.name, prop.get_value_for_datastore(self))
set_unindexed_properties = getattr(entity, 'set_unindexed_properties', None)
if set_unindexed_properties:
set_unindexed_properties(self._unindexed_properties)
def _populate_internal_entity(self, _entity_class=datastore.Entity):
"""Populates self._entity, saving its state to the datastore.
After this method is called, calling is_saved() will return True.
Returns:
Populated self._entity
"""
self._entity = self._populate_entity(_entity_class=_entity_class)
for prop in self.properties().values():
new_value = prop.get_updated_value_for_datastore(self)
if new_value is not AUTO_UPDATE_UNCHANGED:
self.__set_property(self._entity, prop.name, new_value)
for attr in ('_key_name', '_key'):
try:
delattr(self, attr)
except AttributeError:
pass
return self._entity
def put(self, **kwargs):
"""Writes this model instance to the datastore.
If this instance is new, we add an entity to the datastore.
Otherwise, we update this instance, and the key will remain the
same.
Args:
config: datastore_rpc.Configuration to use for this request.
Returns:
The key of the instance (either the existing key or a new key).
Raises:
TransactionFailedError if the data could not be committed.
"""
self._populate_internal_entity()
return datastore.Put(self._entity, **kwargs)
save = put
def _populate_entity(self, _entity_class=datastore.Entity):
"""Internal helper -- Populate self._entity or create a new one
if that one does not exist. Does not change any state of the instance
other than the internal state of the entity.
This method is separate from _populate_internal_entity so that it is
possible to call to_xml without changing the state of an unsaved entity
to saved.
Returns:
self._entity or a new Entity which is not stored on the instance.
"""
if self.is_saved():
entity = self._entity
else:
kwds = {'_app': self._app, 'namespace': self.__namespace,
'unindexed_properties': self._unindexed_properties}
if self._key is not None:
if self._key.id():
kwds['id'] = self._key.id()
else:
kwds['name'] = self._key.name()
if self._key.parent():
kwds['parent'] = self._key.parent()
else:
if self._key_name is not None:
kwds['name'] = self._key_name
if self._parent_key is not None:
kwds['parent'] = self._parent_key
elif self._parent is not None:
kwds['parent'] = self._parent._entity
entity = _entity_class(self.kind(), **kwds)
self._to_entity(entity)
return entity
def delete(self, **kwargs):
"""Deletes this entity from the datastore.
Args:
config: datastore_rpc.Configuration to use for this request.
Raises:
TransactionFailedError if the data could not be committed.
"""
datastore.Delete(self.key(), **kwargs)
self._key = self.key()
self._key_name = None
self._parent_key = None
self._entity = None
def is_saved(self):
"""Determine if entity is persisted in the datastore.
New instances of Model do not start out saved in the data. Objects which
are saved to or loaded from the Datastore will have a True saved state.
Returns:
True if object has been persisted to the datastore, otherwise False.
"""
return self._entity is not None
def has_key(self):
"""Determine if this model instance has a complete key.
When not using a fully self-assigned Key, ids are not assigned until the
data is saved to the Datastore, but instances with a key name always have
a full key.
Returns:
True if the object has been persisted to the datastore or has a key
or has a key_name, otherwise False.
"""
return self.is_saved() or self._key or self._key_name
def dynamic_properties(self):
"""Returns a list of all dynamic properties defined for instance."""
return []
def instance_properties(self):
"""Alias for dyanmic_properties."""
return self.dynamic_properties()
def parent(self):
"""Get the parent of the model instance.
Returns:
Parent of contained entity or parent provided in constructor, None if
instance has no parent.
"""
if self._parent is None:
parent_key = self.parent_key()
if parent_key is not None:
self._parent = get(parent_key)
return self._parent
def parent_key(self):
"""Get the parent's key.
This method is useful for avoiding a potential fetch from the datastore
but still get information about the instances parent.
Returns:
Parent key of entity, None if there is no parent.
"""
if self._parent_key is not None:
return self._parent_key
elif self._parent is not None:
return self._parent.key()
elif self._entity is not None:
return self._entity.parent()
elif self._key is not None:
return self._key.parent()
else:
return None
def to_xml(self, _entity_class=datastore.Entity):
"""Generate an XML representation of this model instance.
atom and gd:namespace properties are converted to XML according to their
respective schemas. For more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
"""
entity = self._populate_entity(_entity_class)
return entity.ToXml()
@classmethod
def get(cls, keys, **kwargs):
"""Fetch instance from the datastore of a specific Model type using key.
We support Key objects and string keys (we convert them to Key objects
automatically).
Useful for ensuring that specific instance types are retrieved from the
datastore. It also helps that the source code clearly indicates what
kind of object is being retreived. Example:
story = Story.get(story_key)
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request.
Returns:
If a single key was given: a Model instance associated with key
for the provided class if it exists in the datastore, otherwise
None. If a list of keys was given: a list where list[i] is the
Model instance for keys[i], or None if no instance exists.
Raises:
KindError if any of the retreived objects are not instances of the
type associated with call to 'get'.
"""
results = get(keys, **kwargs)
if results is None:
return None
if isinstance(results, Model):
instances = [results]
else:
instances = results
for instance in instances:
if not(instance is None or isinstance(instance, cls)):
raise KindError('Kind %r is not a subclass of kind %r' %
(instance.kind(), cls.kind()))
return results
@classmethod
def get_by_key_name(cls, key_names, parent=None, **kwargs):
"""Get instance of Model class by its key's name.
Args:
key_names: A single key-name or a list of key-names.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
try:
parent = _coerce_to_key(parent)
except BadKeyError, e:
raise BadArgumentError(str(e))
key_names, multiple = datastore.NormalizeAndTypeCheck(key_names, basestring)
keys = [datastore.Key.from_path(cls.kind(), name, parent=parent)
for name in key_names]
if multiple:
return get(keys, **kwargs)
else:
return get(keys[0], **kwargs)
@classmethod
def get_by_id(cls, ids, parent=None, **kwargs):
"""Get instance of Model class by id.
Args:
key_names: A single id or a list of ids.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
if isinstance(parent, Model):
parent = parent.key()
ids, multiple = datastore.NormalizeAndTypeCheck(ids, (int, long))
keys = [datastore.Key.from_path(cls.kind(), id, parent=parent)
for id in ids]
if multiple:
return get(keys, **kwargs)
else:
return get(keys[0], **kwargs)
@classmethod
def get_or_insert(cls, key_name, **kwds):
"""Transactionally retrieve or create an instance of Model class.
This acts much like the Python dictionary setdefault() method, where we
first try to retrieve a Model instance with the given key name and parent.
If it's not present, then we create a new instance (using the *kwds
supplied) and insert that with the supplied key name.
Subsequent calls to this method with the same key_name and parent will
always yield the same entity (though not the same actual object instance),
regardless of the *kwds supplied. If the specified entity has somehow
been deleted separately, then the next call will create a new entity and
return it.
If the 'parent' keyword argument is supplied, it must be a Model instance.
It will be used as the parent of the new instance of this Model class if
one is created.
This method is especially useful for having just one unique entity for
a specific identifier. Insertion/retrieval is done transactionally, which
guarantees uniqueness.
Example usage:
class WikiTopic(db.Model):
creation_date = db.DatetimeProperty(auto_now_add=True)
body = db.TextProperty(required=True)
# The first time through we'll create the new topic.
wiki_word = 'CommonIdioms'
topic = WikiTopic.get_or_insert(wiki_word,
body='This topic is totally new!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
# The second time through will just retrieve the entity.
overwrite_topic = WikiTopic.get_or_insert(wiki_word,
body='A totally different message!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
Args:
key_name: Key name to retrieve or create.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists, the
rest of these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key_name and parent
or a new one that has just been created.
Raises:
TransactionFailedError if the specified Model instance could not be
retrieved or created transactionally (due to high contention, etc).
"""
def txn():
entity = cls.get_by_key_name(key_name, parent=kwds.get('parent'))
if entity is None:
entity = cls(key_name=key_name, **kwds)
entity.put()
return entity
return run_in_transaction(txn)
@classmethod
def all(cls, **kwds):
"""Returns a query over all instances of this model from the datastore.
Returns:
Query that will retrieve all instances from entity collection.
"""
return Query(cls, **kwds)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
"""
return GqlQuery('SELECT * FROM %s %s' % (cls.kind(), query_string),
*args, **kwds)
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Loads attributes which are not defined as part of the entity in
to the model instance.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = {}
for prop in cls.properties().values():
if prop.name in entity:
try:
value = entity[prop.name]
except KeyError:
entity_values[prop.name] = []
else:
if entity.is_projection():
value = prop.make_value_from_datastore_index_value(value)
else:
value = prop.make_value_from_datastore(value)
entity_values[prop.name] = value
return entity_values
@classmethod
def from_entity(cls, entity):
"""Converts the entity representation of this model to an instance.
Converts datastore.Entity instance to an instance of cls.
Args:
entity: Entity loaded directly from datastore.
Raises:
KindError when cls is incorrect model for entity.
"""
if cls.kind() != entity.kind():
raise KindError('Class %s cannot handle kind \'%s\'' %
(repr(cls), entity.kind()))
entity_values = cls._load_entity_values(entity)
if entity.key().has_id_or_name():
entity_values['key'] = entity.key()
return cls(None, _from_entity=entity, **entity_values)
@classmethod
def kind(cls):
"""Returns the datastore kind we use for this model.
We just use the name of the model for now, ignoring potential collisions.
"""
return cls.__name__
@classmethod
def entity_type(cls):
"""Soon to be removed alias for kind."""
return cls.kind()
@classmethod
def properties(cls):
"""Returns a dictionary of all the properties defined for this model."""
return dict(cls._properties)
@classmethod
def fields(cls):
"""Soon to be removed alias for properties."""
return cls.properties()
def create_rpc(deadline=None, callback=None, read_policy=STRONG_CONSISTENCY):
"""Create an rpc for use in configuring datastore calls.
NOTE: This functions exists for backwards compatibility. Please use
create_config() instead. NOTE: the latter uses 'on_completion',
which is a function taking an argument, wherease create_rpc uses
'callback' which is a function without arguments.
Args:
deadline: float, deadline for calls in seconds.
callback: callable, a callback triggered when this rpc completes,
accepts one argument: the returned rpc.
read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually
consistent reads
Returns:
A datastore.DatastoreRPC instance.
"""
return datastore.CreateRPC(
deadline=deadline, callback=callback, read_policy=read_policy)
def get_async(keys, **kwargs):
"""Asynchronously fetch the specified Model instance(s) from the datastore.
Identical to db.get() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
keys, multiple = datastore.NormalizeAndTypeCheckKeys(keys)
def extra_hook(entities):
if not multiple and not entities:
return None
models = []
for entity in entities:
if entity is None:
model = None
else:
cls1 = class_for_kind(entity.kind())
model = cls1.from_entity(entity)
models.append(model)
if multiple:
return models
assert len(models) == 1
return models[0]
return datastore.GetAsync(keys, extra_hook=extra_hook, **kwargs)
def get(keys, **kwargs):
"""Fetch the specific Model instance with the given key from the datastore.
We support Key objects and string keys (we convert them to Key objects
automatically).
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
If a single key was given: a Model instance associated with key
if it exists in the datastore, otherwise None. If a list of keys was
given: a list where list[i] is the Model instance for keys[i], or
None if no instance exists.
"""
return get_async(keys, **kwargs).get_result()
def put_async(models, **kwargs):
"""Asynchronously store one or more Model instances.
Identical to db.put() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
models, multiple = datastore.NormalizeAndTypeCheck(models, Model)
entities = [model._populate_internal_entity() for model in models]
def extra_hook(keys):
if multiple:
return keys
assert len(keys) == 1
return keys[0]
return datastore.PutAsync(entities, extra_hook=extra_hook, **kwargs)
def put(models, **kwargs):
"""Store one or more Model instances.
Args:
models: Model instance or list of Model instances.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
A Key if models is an instance, a list of Keys in the same order
as models if models is a list.
Raises:
TransactionFailedError if the data could not be committed.
"""
return put_async(models, **kwargs).get_result()
save = put
def delete_async(models, **kwargs):
"""Asynchronous version of delete one or more Model instances.
Identical to db.delete() except returns an asynchronous object. Call
get_result() on the return value to block on the call.
"""
if isinstance(models, (basestring, Model, Key)):
models = [models]
else:
try:
models = iter(models)
except TypeError:
models = [models]
keys = [_coerce_to_key(v) for v in models]
return datastore.DeleteAsync(keys, **kwargs)
def delete(models, **kwargs):
"""Delete one or more Model instances.
Args:
models: Model instance, key, key string or iterable thereof.
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Raises:
TransactionFailedError if the data could not be committed.
"""
delete_async(models, **kwargs).get_result()
def allocate_ids_async(model, size, **kwargs):
"""Asynchronously allocates a range of IDs.
Identical to allocate_ids() except returns an asynchronous object. Call
get_result() on the return value to block on the call and return the result.
"""
return datastore.AllocateIdsAsync(_coerce_to_key(model), size=size, **kwargs)
def allocate_ids(model, size, **kwargs):
"""Allocates a range of IDs of size for the model_key defined by model.
Allocates a range of IDs in the datastore such that those IDs will not
be automatically assigned to new entities. You can only allocate IDs
for model keys from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Returned ids should only be used
in entities with the same parent (if any) and kind as this key.
size: Number of IDs to allocate.
config: datastore_rpc.Configuration to use for this request.
Returns:
(start, end) of the allocated range, inclusive.
"""
return allocate_ids_async(model, size, **kwargs).get_result()
def allocate_id_range(model, start, end, **kwargs):
"""Allocates a range of IDs with specific endpoints.
Once these IDs have been allocated they may be provided manually to
newly created entities.
Since the datastore's automatic ID allocator will never assign
a key to a new entity that will cause an existing entity to be
overwritten, entities written to the given key range will never be
overwritten. However, writing entities with manually assigned keys in this
range may overwrite existing entities (or new entities written by a
separate request) depending on the key range state returned.
This method should only be used if you have an existing numeric id
range that you want to reserve, e.g. bulk loading entities that already
have IDs. If you don't care about which IDs you receive, use allocate_ids
instead.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Allocated ids should only be used
in entities with the same parent (if any) and kind as this key.
start: first id of the range to allocate, inclusive.
end: last id of the range to allocate, inclusive.
config: datastore_rpc.Configuration to use for this request.
Returns:
One of (KEY_RANGE_EMPTY, KEY_RANGE_CONTENTION, KEY_RANGE_COLLISION). If not
KEY_RANGE_EMPTY, this represents a potential issue with using the allocated
key range.
"""
key = _coerce_to_key(model)
datastore.NormalizeAndTypeCheck((start, end), (int, long))
if start < 1 or end < 1:
raise BadArgumentError('Start %d and end %d must both be > 0.' %
(start, end))
if start > end:
raise BadArgumentError('Range end %d cannot be less than start %d.' %
(end, start))
safe_start, _ = datastore.AllocateIds(key, max=end, **kwargs)
race_condition = safe_start > start
start_key = Key.from_path(key.kind(), start, parent=key.parent(),
_app=key.app(), namespace=key.namespace())
end_key = Key.from_path(key.kind(), end, parent=key.parent(),
_app=key.app(), namespace=key.namespace())
collision = (Query(keys_only=True, namespace=key.namespace(), _app=key.app())
.filter('__key__ >=', start_key)
.filter('__key__ <=', end_key).fetch(1))
if collision:
return KEY_RANGE_COLLISION
elif race_condition:
return KEY_RANGE_CONTENTION
else:
return KEY_RANGE_EMPTY
def _index_converter(index):
return Index(index.Id(),
index.Kind(),
index.HasAncestor(),
index.Properties())
def get_indexes_async(**kwargs):
"""Asynchronously retrieves the application indexes and their states.
Identical to get_indexes() except returns an asynchronous object. Call
get_result() on the return value to block on the call and get the results.
"""
def extra_hook(indexes):
return [(_index_converter(index), state) for index, state in indexes]
return datastore.GetIndexesAsync(extra_hook=extra_hook, **kwargs)
def get_indexes(**kwargs):
"""Retrieves the application indexes and their states.
Args:
config: datastore_rpc.Configuration to use for this request, must be
specified as a keyword argument.
Returns:
A list of (Index, Index.[BUILDING|SERVING|DELETING|ERROR]) tuples.
An index can be in the following states:
Index.BUILDING: Index is being built and therefore can not serve queries
Index.SERVING: Index is ready to service queries
Index.DELETING: Index is being deleted
Index.ERROR: Index encounted an error in the BUILDING state
"""
return get_indexes_async(**kwargs).get_result()
class Expando(Model):
"""Dynamically expandable model.
An Expando does not require (but can still benefit from) the definition
of any properties before it can be used to store information in the
datastore. Properties can be added to an expando object by simply
performing an assignment. The assignment of properties is done on
an instance by instance basis, so it is possible for one object of an
expando type to have different properties from another or even the same
properties with different types. It is still possible to define
properties on an expando, allowing those properties to behave the same
as on any other model.
Example:
import datetime
class Song(db.Expando):
title = db.StringProperty()
crazy = Song(title='Crazy like a diamond',
author='Lucy Sky',
publish_date='yesterday',
rating=5.0)
hoboken = Song(title='The man from Hoboken',
author=['Anthony', 'Lou'],
publish_date=datetime.datetime(1977, 5, 3))
crazy.last_minute_note=db.Text('Get a train to the station.')
Possible Uses:
One use of an expando is to create an object without any specific
structure and later, when your application mature and it in the right
state, change it to a normal model object and define explicit properties.
Additional exceptions for expando:
Protected attributes (ones whose names begin with '_') cannot be used
as dynamic properties. These are names that are reserved for protected
transient (non-persisted) attributes.
Order of lookup:
When trying to set or access an attribute value, any other defined
properties, such as methods and other values in __dict__ take precedence
over values in the datastore.
1 - Because it is not possible for the datastore to know what kind of
property to store on an undefined expando value, setting a property to
None is the same as deleting it from the expando.
2 - Persistent variables on Expando must not begin with '_'. These
variables considered to be 'protected' in Python, and are used
internally.
3 - Expando's dynamic properties are not able to store empty lists.
Attempting to assign an empty list to a dynamic property will raise
ValueError. Static properties on Expando can still support empty
lists but like normal Model properties is restricted from using
None.
"""
_dynamic_properties = None
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
"""Creates a new instance of this expando model.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_app: Intentionally undocumented.
args: Keyword arguments mapping to properties of model.
"""
super(Expando, self).__init__(parent, key_name, _app, **kwds)
self._dynamic_properties = {}
for prop, value in kwds.iteritems():
if prop not in self._all_properties and prop != 'key':
if not (hasattr(getattr(type(self), prop, None), '__set__')):
setattr(self, prop, value)
else:
check_reserved_word(prop)
def __setattr__(self, key, value):
"""Dynamically set field values that are not defined.
Tries to set the value on the object normally, but failing that
sets the value on the contained entity.
Args:
key: Name of attribute.
value: Value to set for attribute. Must be compatible with
datastore.
Raises:
ValueError on attempt to assign empty list.
"""
check_reserved_word(key)
if (key[:1] != '_' and
not hasattr(getattr(type(self), key, None), '__set__')):
if value == []:
raise ValueError('Cannot store empty list to dynamic property %s' %
key)
if type(value) not in _ALLOWED_EXPANDO_PROPERTY_TYPES:
raise TypeError("Expando cannot accept values of type '%s'." %
type(value).__name__)
if self._dynamic_properties is None:
self._dynamic_properties = {}
self._dynamic_properties[key] = value
else:
super(Expando, self).__setattr__(key, value)
def __getattribute__(self, key):
"""Get attribute from expando.
Must be overridden to allow dynamic properties to obscure class attributes.
Since all attributes are stored in self._dynamic_properties, the normal
__getattribute__ does not attempt to access it until __setattr__ is called.
By then, the static attribute being overwritten has already been located
and returned from the call.
This method short circuits the usual __getattribute__ call when finding a
dynamic property and returns it to the user via __getattr__. __getattr__
is called to preserve backward compatibility with older Expando models
that may have overridden the original __getattr__.
NOTE: Access to properties defined by Python descriptors are not obscured
because setting those attributes are done through the descriptor and does
not place those attributes in self._dynamic_properties.
"""
if not key.startswith('_'):
dynamic_properties = self._dynamic_properties
if dynamic_properties is not None and key in dynamic_properties:
return self.__getattr__(key)
return super(Expando, self).__getattribute__(key)
def __getattr__(self, key):
"""If no explicit attribute defined, retrieve value from entity.
Tries to get the value on the object normally, but failing that
retrieves value from contained entity.
Args:
key: Name of attribute.
Raises:
AttributeError when there is no attribute for key on object or
contained entity.
"""
_dynamic_properties = self._dynamic_properties
if _dynamic_properties is not None and key in _dynamic_properties:
return _dynamic_properties[key]
else:
return getattr(super(Expando, self), key)
def __delattr__(self, key):
"""Remove attribute from expando.
Expando is not like normal entities in that undefined fields
can be removed.
Args:
key: Dynamic property to be deleted.
"""
if self._dynamic_properties and key in self._dynamic_properties:
del self._dynamic_properties[key]
else:
object.__delattr__(self, key)
def dynamic_properties(self):
"""Determine which properties are particular to instance of entity.
Returns:
Set of names which correspond only to the dynamic properties.
"""
if self._dynamic_properties is None:
return []
return self._dynamic_properties.keys()
def _to_entity(self, entity):
"""Store to entity, deleting dynamic properties that no longer exist.
When the expando is saved, it is possible that a given property no longer
exists. In this case, the property will be removed from the saved instance.
Args:
entity: Entity which will receive dynamic properties.
"""
super(Expando, self)._to_entity(entity)
if self._dynamic_properties is None:
self._dynamic_properties = {}
for key, value in self._dynamic_properties.iteritems():
entity[key] = value
all_properties = set(self._dynamic_properties.iterkeys())
all_properties.update(self._all_properties)
for key in entity.keys():
if key not in all_properties:
del entity[key]
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Expando needs to do a second pass to add the entity values which were
ignored by Model because they didn't have an corresponding predefined
property on the model.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = super(Expando, cls)._load_entity_values(entity)
for key, value in entity.iteritems():
if key not in entity_values:
entity_values[str(key)] = value
return entity_values
class _BaseQuery(object):
"""Base class for both Query and GqlQuery."""
_last_raw_query = None
_last_index_list = None
_cursor = None
_end_cursor = None
def __init__(self, model_class=None):
"""Constructor.
Args:
model_class: Model class from which entities are constructed.
keys_only: Whether the query should return full entities or only keys.
compile: Whether the query should also return a compiled query.
cursor: A compiled query from which to resume.
namespace: The namespace to query.
"""
self._model_class = model_class
def is_keys_only(self):
"""Returns whether this query is keys only.
Returns:
True if this query returns keys, False if it returns entities.
"""
raise NotImplementedError
def projection(self):
"""Returns the tuple of properties in the projection or None.
Projected results differ from normal results in multiple ways:
- they only contain a portion of the original entity and cannot be put;
- properties defined on the model, but not included in the projections will
have a value of None, even if the property is required or has a default
value;
- multi-valued properties (such as a ListProperty) will only contain a single
value.
- dynamic properties not included in the projection will not appear
on the model instance.
- dynamic properties included in the projection are deserialized into
their indexed type. Specifically one of str, bool, long, float, GeoPt, Key
or User. If the original type is known, it can be restored using
datastore_types.RestoreFromIndexValue.
However, projection queries are significantly faster than normal queries.
Projection queries on entities with multi-valued properties will return the
same entity multiple times, once for each unique combination of values for
properties included in the order, an inequaly property, or the projected
properties.
Returns:
The list of properties in the projection, or None if no projection is
set on this query.
"""
raise NotImplementedError
def is_distinct(self):
"""Returns true if the projection query should be distinct.
This is equivalent to the SQL syntax: SELECT DISTINCT. It is only available
for projection queries, it is not valid to specify distinct without also
specifying projection properties.
Distinct projection queries on entities with multi-valued properties will
return the same entity multiple times, once for each unique combination of
properties included in the projection.
Returns:
True if this projection query is distinct.
"""
raise NotImplementedError
def _get_query(self):
"""Subclass must override (and not call their super method).
Returns:
A datastore.Query instance representing the query.
"""
raise NotImplementedError
def run(self, **kwargs):
"""Iterator for this query.
If you know the number of results you need, use run(limit=...) instead,
or use a GQL query with a LIMIT clause. It's more efficient. If you want
all results use run(batch_size=<large number>).
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Iterator for this query.
"""
raw_query = self._get_query()
iterator = raw_query.Run(**kwargs)
self._last_raw_query = raw_query
keys_only = kwargs.get('keys_only')
if keys_only is None:
keys_only = self.is_keys_only()
if keys_only:
return iterator
else:
return _QueryIterator(self._model_class, iter(iterator))
def __iter__(self):
"""Iterator for this query.
If you know the number of results you need, consider fetch() instead,
or use a GQL query with a LIMIT clause. It's more efficient.
"""
return self.run()
def __getstate__(self):
state = self.__dict__.copy()
state['_last_raw_query'] = None
return state
def get(self, **kwargs):
"""Get first result from this.
Beware: get() ignores the LIMIT clause on GQL queries.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
First result from running the query if there are any, else None.
"""
results = self.run(limit=1, **kwargs)
try:
return results.next()
except StopIteration:
return None
def count(self, limit=1000, **kwargs):
"""Number of entities this query fetches.
Beware: count() ignores the LIMIT clause on GQL queries.
Args:
limit: A number. If there are more results than this, stop short and
just return this number. Providing this argument makes the count
operation more efficient.
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Number of entities this query fetches.
"""
raw_query = self._get_query()
result = raw_query.Count(limit=limit, **kwargs)
self._last_raw_query = raw_query
return result
def fetch(self, limit, offset=0, **kwargs):
"""Return a list of items selected using SQL-like limit and offset.
Always use run(limit=...) instead of fetch() when iterating over a query.
Beware: offset must read and discard all skipped entities. Use
cursor()/with_cursor() instead.
Args:
limit: Maximum number of results to return.
offset: Optional number of results to skip first; default zero.
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
A list of db.Model instances. There may be fewer than 'limit'
results if there aren't enough results to satisfy the request.
"""
if limit is None:
kwargs.setdefault('batch_size', datastore._MAX_INT_32)
return list(self.run(limit=limit, offset=offset, **kwargs))
def index_list(self):
"""Get the index list for an already executed query.
Returns:
A list of indexes used by the query.
Raises:
AssertionError: If the query has not been executed.
"""
if self._last_raw_query is None:
raise AssertionError('No index list because query has not been run.')
if self._last_index_list is None:
raw_index_list = self._last_raw_query.GetIndexList()
self._last_index_list = [_index_converter(raw_index)
for raw_index in raw_index_list]
return self._last_index_list
def cursor(self):
"""Get a serialized cursor for an already executed query.
The returned cursor effectively lets a future invocation of a similar
query to begin fetching results immediately after the last returned
result from this query invocation.
Returns:
A base64-encoded serialized cursor.
Raises:
AssertionError: If the query has not been executed.
"""
if self._last_raw_query is None:
raise AssertionError('No cursor available.')
cursor = self._last_raw_query.GetCursor()
return websafe_encode_cursor(cursor)
def with_cursor(self, start_cursor=None, end_cursor=None):
"""Set the start and end of this query using serialized cursors.
Conceptually cursors point to the position between the last result returned
and the next result so running a query with each of the following cursors
combinations will return all results in four chunks with no duplicate
results:
query.with_cursor(end_cursor=cursor1)
query.with_cursors(cursor1, cursor2)
query.with_cursors(cursor2, cursor3)
query.with_cursors(start_cursor=cursor3)
For example if the cursors pointed to:
cursor: 1 2 3
result: a b c d e f g h
The results returned by these queries would be [a, b], [c, d], [e, f],
[g, h] respectively.
Cursors are pinned to the position just after the previous result (last
result, exclusive), so if results are inserted or deleted between the time
the cursor was made and these queries are executed, the cursors stay pinned
to these positions. For example:
delete(b, f, g, h)
put(a1, b1, c1, d1)
cursor: 1(b) 2(d) 3(f)
result: a a1 b1 c c1 d d1 e
The results returned by these queries would now be: [a, a1], [b1, c, c1, d],
[d1, e], [] respectively.
Args:
start_cursor: The cursor position at which to start or None
end_cursor: The cursor position at which to end or None
Returns:
This Query instance, for chaining.
Raises:
BadValueError when cursor is not valid.
"""
if start_cursor is None:
self._cursor = None
else:
self._cursor = websafe_decode_cursor(start_cursor)
if end_cursor is None:
self._end_cursor = None
else:
self._end_cursor = websafe_decode_cursor(end_cursor)
return self
def __getitem__(self, arg):
"""Support for query[index] and query[start:stop].
Beware: this ignores the LIMIT clause on GQL queries.
Args:
arg: Either a single integer, corresponding to the query[index]
syntax, or a Python slice object, corresponding to the
query[start:stop] or query[start:stop:step] syntax.
Returns:
A single Model instance when the argument is a single integer.
A list of Model instances when the argument is a slice.
"""
if isinstance(arg, slice):
start, stop, step = arg.start, arg.stop, arg.step
if start is None:
start = 0
if stop is None:
raise ValueError('Open-ended slices are not supported')
if step is None:
step = 1
if start < 0 or stop < 0 or step != 1:
raise ValueError(
'Only slices with start>=0, stop>=0, step==1 are supported')
limit = stop - start
if limit < 0:
return []
return self.fetch(limit, start)
elif isinstance(arg, (int, long)):
if arg < 0:
raise ValueError('Only indices >= 0 are supported')
results = self.fetch(1, arg)
if results:
return results[0]
else:
raise IndexError('The query returned fewer than %d results' % (arg+1))
else:
raise TypeError('Only integer indices and slices are supported')
class _QueryIterator(object):
"""Wraps the datastore iterator to return Model instances.
The datastore returns entities. We wrap the datastore iterator to
return Model instances instead.
"""
def __init__(self, model_class, datastore_iterator):
"""Iterator constructor
Args:
model_class: Model class from which entities are constructed.
datastore_iterator: Underlying datastore iterator.
"""
self.__model_class = model_class
self.__iterator = datastore_iterator
def __iter__(self):
"""Iterator on self.
Returns:
Self.
"""
return self
def next(self):
"""Get next Model instance in query results.
Returns:
Next model instance.
Raises:
StopIteration when there are no more results in query.
"""
if self.__model_class is not None:
return self.__model_class.from_entity(self.__iterator.next())
else:
while True:
entity = self.__iterator.next()
try:
model_class = class_for_kind(entity.kind())
except KindError:
if datastore_types.RESERVED_PROPERTY_NAME.match(entity.kind()):
continue
raise
else:
return model_class.from_entity(entity)
def _normalize_query_parameter(value):
"""Make any necessary type conversions to a query parameter.
The following conversions are made:
- Model instances are converted to Key instances. This is necessary so
that querying reference properties will work.
- datetime.date objects are converted to datetime.datetime objects (see
_date_to_datetime for details on this conversion). This is necessary so
that querying date properties with date objects will work.
- datetime.time objects are converted to datetime.datetime objects (see
_time_to_datetime for details on this conversion). This is necessary so
that querying time properties with time objects will work.
Args:
value: The query parameter value.
Returns:
The input value, or a converted value if value matches one of the
conversions specified above.
"""
if isinstance(value, Model):
value = value.key()
if (isinstance(value, datetime.date) and
not isinstance(value, datetime.datetime)):
value = _date_to_datetime(value)
elif isinstance(value, datetime.time):
value = _time_to_datetime(value)
return value
class Query(_BaseQuery):
"""A Query instance queries over instances of Models.
You construct a query with a model class, like this:
class Story(db.Model):
title = db.StringProperty()
date = db.DateTimeProperty()
query = Query(Story)
You modify a query with filters and orders like this:
query.filter('title =', 'Foo')
query.order('-date')
query.ancestor(key_or_model_instance)
Every query can return an iterator, so you access the results of a query
by iterating over it:
for story in query:
print story.title
For convenience, all of the filtering and ordering methods return "self",
so the easiest way to use the query interface is to cascade all filters and
orders in the iterator line like this:
for story in Query(story).filter('title =', 'Foo').order('-date'):
print story.title
"""
_keys_only = False
_distinct = False
_projection = None
_namespace = None
_app = None
__ancestor = None
def __init__(self, model_class=None, keys_only=False, cursor=None,
namespace=None, _app=None, distinct=False, projection=None):
"""Constructs a query over instances of the given Model.
Args:
model_class: Model class to build query for.
keys_only: Whether the query should return full entities or only keys.
projection: A tuple of strings representing the property names to include
in the projection this query should produce or None. Setting a
projection is similar to specifying 'SELECT prop1, prop2, ...' in SQL.
See _BaseQuery.projection for details on projection queries.
distinct: A boolean, true if the projection should be distinct.
See _BaseQuery.is_distinct for details on distinct queries.
cursor: A compiled query from which to resume.
namespace: The namespace to use for this query.
"""
super(Query, self).__init__(model_class)
if keys_only:
self._keys_only = True
if projection:
self._projection = projection
if namespace is not None:
self._namespace = namespace
if _app is not None:
self._app = _app
if distinct:
self._distinct = True
self.__query_sets = [{}]
self.__orderings = []
self.with_cursor(cursor)
def is_keys_only(self):
return self._keys_only
def projection(self):
return self._projection
def is_distinct(self):
return self._distinct
def _get_query(self,
_query_class=datastore.Query,
_multi_query_class=datastore.MultiQuery):
queries = []
for query_set in self.__query_sets:
if self._model_class is not None:
kind = self._model_class.kind()
else:
kind = None
query = _query_class(kind,
query_set,
keys_only=self._keys_only,
projection=self._projection,
distinct=self._distinct,
compile=True,
cursor=self._cursor,
end_cursor=self._end_cursor,
namespace=self._namespace,
_app=self._app)
query.Order(*self.__orderings)
if self.__ancestor is not None:
query.Ancestor(self.__ancestor)
queries.append(query)
if (_query_class != datastore.Query and
_multi_query_class == datastore.MultiQuery):
warnings.warn(
'Custom _query_class specified without corresponding custom'
' _query_multi_class. Things will break if you use queries with'
' the "IN" or "!=" operators.', RuntimeWarning)
if len(queries) > 1:
raise datastore_errors.BadArgumentError(
'Query requires multiple subqueries to satisfy. If _query_class'
' is overridden, _multi_query_class must also be overridden.')
elif (_query_class == datastore.Query and
_multi_query_class != datastore.MultiQuery):
raise BadArgumentError('_query_class must also be overridden if'
' _multi_query_class is overridden.')
if len(queries) == 1:
return queries[0]
else:
return _multi_query_class(queries, self.__orderings)
def __filter_disjunction(self, operations, values):
"""Add a disjunction of several filters and several values to the query.
This is implemented by duplicating queries and combining the
results later.
Args:
operations: a string or list of strings. Each string contains a
property name and an operator to filter by. The operators
themselves must not require multiple queries to evaluate
(currently, this means that 'in' and '!=' are invalid).
values: a value or list of filter values, normalized by
_normalize_query_parameter.
"""
if not isinstance(operations, (list, tuple)):
operations = [operations]
if not isinstance(values, (list, tuple)):
values = [values]
new_query_sets = []
for operation in operations:
if operation.lower().endswith('in') or operation.endswith('!='):
raise BadQueryError('Cannot use "in" or "!=" in a disjunction.')
for query_set in self.__query_sets:
for value in values:
new_query_set = copy.deepcopy(query_set)
datastore._AddOrAppend(new_query_set, operation, value)
new_query_sets.append(new_query_set)
self.__query_sets = new_query_sets
def filter(self, property_operator, value):
"""Add filter to query.
Args:
property_operator: string with the property and operator to filter by.
value: the filter value.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
match = _FILTER_REGEX.match(property_operator)
prop = match.group(1)
if match.group(3) is not None:
operator = match.group(3)
else:
operator = '=='
if self._model_class is None:
if prop != datastore_types.KEY_SPECIAL_PROPERTY:
raise BadQueryError(
'Only %s filters are allowed on kindless queries.' %
datastore_types.KEY_SPECIAL_PROPERTY)
elif prop in self._model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % prop)
if operator.lower() == 'in':
if self._keys_only:
raise BadQueryError('Keys only queries do not support IN filters.')
elif not isinstance(value, (list, tuple)):
raise BadValueError('Argument to the "in" operator must be a list')
values = [_normalize_query_parameter(v) for v in value]
self.__filter_disjunction(prop + ' =', values)
else:
if isinstance(value, (list, tuple)):
raise BadValueError('Filtering on lists is not supported')
if operator == '!=':
if self._keys_only:
raise BadQueryError('Keys only queries do not support != filters.')
self.__filter_disjunction([prop + ' <', prop + ' >'],
_normalize_query_parameter(value))
else:
value = _normalize_query_parameter(value)
for query_set in self.__query_sets:
datastore._AddOrAppend(query_set, property_operator, value)
return self
def order(self, property):
"""Set order of query result.
To use descending order, prepend '-' (minus) to the property
name, e.g., '-date' rather than 'date'.
Args:
property: Property to sort on.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
if property.startswith('-'):
property = property[1:]
order = datastore.Query.DESCENDING
else:
order = datastore.Query.ASCENDING
if self._model_class is None:
if (property != datastore_types.KEY_SPECIAL_PROPERTY or
order != datastore.Query.ASCENDING):
raise BadQueryError(
'Only %s ascending orders are supported on kindless queries' %
datastore_types.KEY_SPECIAL_PROPERTY)
else:
if not issubclass(self._model_class, Expando):
if (property not in self._model_class._all_properties and
property not in datastore_types._SPECIAL_PROPERTIES):
raise PropertyError('Invalid property name \'%s\'' % property)
if property in self._model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % property)
self.__orderings.append((property, order))
return self
def ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return results that descend from
a given model instance. In other words, all of the results will
have the ancestor as their parent, or parent's parent, etc. The
ancestor itself is also a possible result!
Args:
ancestor: Model or Key (that has already been saved)
Returns:
Self to support method chaining.
Raises:
TypeError if the argument isn't a Key or Model; NotSavedError
if it is, but isn't saved yet.
"""
if isinstance(ancestor, datastore.Key):
if ancestor.has_id_or_name():
self.__ancestor = ancestor
else:
raise NotSavedError()
elif isinstance(ancestor, Model):
if ancestor.has_key():
self.__ancestor = ancestor.key()
else:
raise NotSavedError()
else:
raise TypeError('ancestor should be Key or Model')
return self
class GqlQuery(_BaseQuery):
"""A Query class that uses GQL query syntax instead of .filter() etc."""
def __init__(self, query_string, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
Raises:
PropertyError if the query filters or sorts on a property that's not
indexed.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
namespace = None
if isinstance(app, tuple):
if len(app) != 2:
raise BadArgumentError('_app must have 2 values if type is tuple.')
app, namespace = app
self._proto_query = gql.GQL(query_string, _app=app, namespace=namespace)
if self._proto_query._kind is not None:
model_class = class_for_kind(self._proto_query._kind)
else:
model_class = None
super(GqlQuery, self).__init__(model_class)
if model_class is not None:
for property, unused in (self._proto_query.filters().keys() +
self._proto_query.orderings()):
if property in model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % property)
self.bind(*args, **kwds)
def is_keys_only(self):
return self._proto_query._keys_only
def projection(self):
return self._proto_query.projection()
def is_distinct(self):
return self._proto_query.is_distinct()
def bind(self, *args, **kwds):
"""Bind arguments (positional or keyword) to the query.
Note that you can also pass arguments directly to the query
constructor. Each time you call bind() the previous set of
arguments is replaced with the new set. This is useful because
the hard work in in parsing the query; so if you expect to be
using the same query with different sets of arguments, you should
hold on to the GqlQuery() object and call bind() on it each time.
Args:
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
self._args = []
for arg in args:
self._args.append(_normalize_query_parameter(arg))
self._kwds = {}
for name, arg in kwds.iteritems():
self._kwds[name] = _normalize_query_parameter(arg)
def run(self, **kwargs):
"""Iterator for this query that handles the LIMIT clause property.
If the GQL query string contains a LIMIT clause, this function fetches
all results before returning an iterator. Otherwise results are retrieved
in batches by the iterator.
Args:
kwargs: Any keyword arguments accepted by datastore_query.QueryOptions().
Returns:
Iterator for this query.
"""
if self._proto_query.limit() > 0:
kwargs.setdefault('limit', self._proto_query.limit())
kwargs.setdefault('offset', self._proto_query.offset())
return _BaseQuery.run(self, **kwargs)
def _get_query(self):
return self._proto_query.Bind(self._args, self._kwds,
self._cursor, self._end_cursor)
class UnindexedProperty(Property):
"""A property that isn't indexed by either built-in or composite indices.
TextProperty and BlobProperty derive from this class.
"""
def __init__(self, *args, **kwds):
"""Construct property. See the Property class for details.
Raises:
ConfigurationError if indexed=True.
"""
self._require_parameter(kwds, 'indexed', False)
kwds['indexed'] = True
super(UnindexedProperty, self).__init__(*args, **kwds)
def validate(self, value):
"""Validate property.
Returns:
A valid value.
Raises:
BadValueError if property is not an instance of data_type.
"""
if value is not None and not isinstance(value, self.data_type):
try:
value = self.data_type(value)
except TypeError, err:
raise BadValueError('Property %s must be convertible '
'to a %s instance (%s)' %
(self.name, self.data_type.__name__, err))
value = super(UnindexedProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s instance' %
(self.name, self.data_type.__name__))
return value
class TextProperty(UnindexedProperty):
"""A string that can be longer than 500 bytes."""
data_type = Text
class StringProperty(Property):
"""A textual property, which can be multi- or single-line."""
def __init__(self, verbose_name=None, multiline=False, **kwds):
"""Construct string property.
Args:
verbose_name: Verbose name is always first parameter.
multi-line: Carriage returns permitted in property.
"""
super(StringProperty, self).__init__(verbose_name, **kwds)
self.multiline = multiline
def validate(self, value):
"""Validate string property.
Returns:
A valid value.
Raises:
BadValueError if property is not multi-line but value is.
"""
value = super(StringProperty, self).validate(value)
if value is not None and not isinstance(value, basestring):
raise BadValueError(
'Property %s must be a str or unicode instance, not a %s'
% (self.name, type(value).__name__))
if not self.multiline and value and value.find('\n') != -1:
raise BadValueError('Property %s is not multi-line' % self.name)
if value is not None and len(value) > self.MAX_LENGTH:
raise BadValueError(
'Property %s is %d characters long; it must be %d or less.'
% (self.name, len(value), self.MAX_LENGTH))
return value
MAX_LENGTH = 500
data_type = basestring
class _CoercingProperty(Property):
"""A Property subclass that extends validate() to coerce to self.data_type."""
def validate(self, value):
"""Coerce values (except None) to self.data_type.
Args:
value: The value to be validated and coerced.
Returns:
The coerced and validated value. It is guaranteed that this is
either None or an instance of self.data_type; otherwise an exception
is raised.
Raises:
BadValueError if the value could not be validated or coerced.
"""
value = super(_CoercingProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class CategoryProperty(_CoercingProperty):
"""A property whose values are Category instances."""
data_type = Category
class LinkProperty(_CoercingProperty):
"""A property whose values are Link instances."""
def validate(self, value):
value = super(LinkProperty, self).validate(value)
if value is not None:
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
if not scheme or not netloc:
raise BadValueError('Property %s must be a full URL (\'%s\')' %
(self.name, value))
return value
data_type = Link
URLProperty = LinkProperty
class EmailProperty(_CoercingProperty):
"""A property whose values are Email instances."""
data_type = Email
class GeoPtProperty(_CoercingProperty):
"""A property whose values are GeoPt instances."""
data_type = GeoPt
class IMProperty(_CoercingProperty):
"""A property whose values are IM instances."""
data_type = IM
class PhoneNumberProperty(_CoercingProperty):
"""A property whose values are PhoneNumber instances."""
data_type = PhoneNumber
class PostalAddressProperty(_CoercingProperty):
"""A property whose values are PostalAddress instances."""
data_type = PostalAddress
class BlobProperty(UnindexedProperty):
"""A byte string that can be longer than 500 bytes."""
data_type = Blob
class ByteStringProperty(Property):
"""A short (<=500 bytes) byte string.
This type should be used for short binary values that need to be indexed. If
you do not require indexing (regardless of length), use BlobProperty instead.
"""
def validate(self, value):
"""Validate ByteString property.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'ByteString'.
"""
if value is not None and not isinstance(value, ByteString):
try:
value = ByteString(value)
except TypeError, err:
raise BadValueError('Property %s must be convertible '
'to a ByteString instance (%s)' % (self.name, err))
value = super(ByteStringProperty, self).validate(value)
if value is not None and not isinstance(value, ByteString):
raise BadValueError('Property %s must be a ByteString instance'
% self.name)
if value is not None and len(value) > self.MAX_LENGTH:
raise BadValueError(
'Property %s is %d bytes long; it must be %d or less.'
% (self.name, len(value), self.MAX_LENGTH))
return value
MAX_LENGTH = 500
data_type = ByteString
class DateTimeProperty(Property):
"""The base class of all of our date/time properties.
We handle common operations, like converting between time tuples and
datetime instances.
"""
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
**kwds):
"""Construct a DateTimeProperty
Args:
verbose_name: Verbose name is always first parameter.
auto_now: Date/time property is updated with the current time every time
it is saved to the datastore. Useful for properties that want to track
the modification time of an instance.
auto_now_add: Date/time is set to the when its instance is created.
Useful for properties that record the creation time of an entity.
"""
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def validate(self, value):
"""Validate datetime.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'datetime'.
"""
value = super(DateTimeProperty, self).validate(value)
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s, but was %r' %
(self.name, self.data_type.__name__, value))
return value
def default_value(self):
"""Default value for datetime.
Returns:
value of now() as appropriate to the date-time instance if auto_now
or auto_now_add is set, else user configured default value implementation.
"""
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the date-time instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return self.now()
return AUTO_UPDATE_UNCHANGED
data_type = datetime.datetime
@staticmethod
def now():
"""Get now as a full datetime value.
Returns:
'now' as a whole timestamp, including both time and date.
"""
return datetime.datetime.utcnow()
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
assert isinstance(value, datetime.date)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
assert isinstance(value, datetime.time)
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second,
value.microsecond)
class DateProperty(DateTimeProperty):
"""A date property, which stores a date without a time."""
@staticmethod
def now():
"""Get now as a date datetime value.
Returns:
'date' part of 'now' only.
"""
return datetime.datetime.utcnow().date()
def validate(self, value):
"""Validate date.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'date',
or if it is an instance of 'datetime' (which is a subclass
of 'date', but for all practical purposes a different type).
"""
value = super(DateProperty, self).validate(value)
if isinstance(value, datetime.datetime):
raise BadValueError('Property %s must be a %s, not a datetime' %
(self.name, self.data_type.__name__))
return value
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the date instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return _date_to_datetime(self.now())
return AUTO_UPDATE_UNCHANGED
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.date from the model instance and return a
datetime.datetime instance with the time set to zero.
See base class method documentation for details.
"""
value = super(DateProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.date)
value = _date_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its date portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.date()
return value
data_type = datetime.date
class TimeProperty(DateTimeProperty):
"""A time property, which stores a time without a date."""
@staticmethod
def now():
"""Get now as a time datetime value.
Returns:
'time' part of 'now' only.
"""
return datetime.datetime.utcnow().time()
def empty(self, value):
"""Is time property empty.
"0:0" (midnight) is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
now() as appropriate to the time instance in the odd case where
auto_now is set to True, else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_now:
return _time_to_datetime(self.now())
return AUTO_UPDATE_UNCHANGED
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.time from the model instance and return a
datetime.datetime instance with the date set to 1/1/1970.
See base class method documentation for details.
"""
value = super(TimeProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.time), repr(value)
value = _time_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its time portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.time()
return value
data_type = datetime.time
class IntegerProperty(Property):
"""An integer property."""
def validate(self, value):
"""Validate integer property.
Returns:
A valid value.
Raises:
BadValueError if value is not an integer or long instance.
"""
value = super(IntegerProperty, self).validate(value)
if value is None:
return value
if not isinstance(value, (int, long)) or isinstance(value, bool):
raise BadValueError('Property %s must be an int or long, not a %s'
% (self.name, type(value).__name__))
if value < -0x8000000000000000 or value > 0x7fffffffffffffff:
raise BadValueError('Property %s must fit in 64 bits' % self.name)
return value
data_type = int
def empty(self, value):
"""Is integer property empty.
0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class RatingProperty(_CoercingProperty, IntegerProperty):
"""A property whose values are Rating instances."""
data_type = Rating
class FloatProperty(Property):
"""A float property."""
def validate(self, value):
"""Validate float.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'float'.
"""
value = super(FloatProperty, self).validate(value)
if value is not None and not isinstance(value, float):
raise BadValueError('Property %s must be a float' % self.name)
return value
data_type = float
def empty(self, value):
"""Is float property empty.
0.0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class BooleanProperty(Property):
"""A boolean property."""
def validate(self, value):
"""Validate boolean.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'bool'.
"""
value = super(BooleanProperty, self).validate(value)
if value is not None and not isinstance(value, bool):
raise BadValueError('Property %s must be a bool' % self.name)
return value
data_type = bool
def empty(self, value):
"""Is boolean property empty.
False is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class UserProperty(Property):
"""A user property."""
def __init__(self,
verbose_name=None,
name=None,
required=False,
validator=None,
choices=None,
auto_current_user=False,
auto_current_user_add=False,
indexed=True):
"""Initializes this Property with the given options.
Note: this does *not* support the 'default' keyword argument.
Use auto_current_user_add=True instead.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
auto_current_user: If true, the value is set to the current user
each time the entity is written to the datastore.
auto_current_user_add: If true, the value is set to the current user
the first time the entity is written to the datastore.
indexed: Whether property is indexed.
"""
super(UserProperty, self).__init__(verbose_name, name,
required=required,
validator=validator,
choices=choices,
indexed=indexed)
self.auto_current_user = auto_current_user
self.auto_current_user_add = auto_current_user_add
def validate(self, value):
"""Validate user.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'User'.
"""
value = super(UserProperty, self).validate(value)
if value is not None and not isinstance(value, users.User):
raise BadValueError('Property %s must be a User' % self.name)
return value
def default_value(self):
"""Default value for user.
Returns:
Value of users.get_current_user() if auto_current_user or
auto_current_user_add is set; else None. (But *not* the default
implementation, since we don't support the 'default' keyword
argument.)
"""
if self.auto_current_user or self.auto_current_user_add:
return users.get_current_user()
return None
def get_updated_value_for_datastore(self, model_instance):
"""Get new value for property to send to datastore.
Returns:
Value of users.get_current_user() if auto_current_user is set;
else AUTO_UPDATE_UNCHANGED.
"""
if self.auto_current_user:
return users.get_current_user()
return AUTO_UPDATE_UNCHANGED
data_type = users.User
class ListProperty(Property):
"""A property that stores a list of things.
This is a parameterized property; the parameter must be a valid
non-list data type, and all items must conform to this type.
"""
def __init__(self, item_type, verbose_name=None, default=None, **kwds):
"""Construct ListProperty.
Args:
item_type: Type for the list items; must be one of the allowed property
types.
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if item_type is str:
item_type = basestring
if not isinstance(item_type, type):
raise TypeError('Item type should be a type object')
if item_type not in _ALLOWED_PROPERTY_TYPES:
raise ValueError('Item type %s is not acceptable' % item_type.__name__)
if issubclass(item_type, (Blob, Text)):
self._require_parameter(kwds, 'indexed', False)
kwds['indexed'] = True
self._require_parameter(kwds, 'required', True)
if default is None:
default = []
self.item_type = item_type
super(ListProperty, self).__init__(verbose_name,
default=default,
**kwds)
def validate(self, value):
"""Validate list.
Returns:
A valid value.
Raises:
BadValueError if property is not a list whose items are instances of
the item_type given to the constructor.
"""
value = super(ListProperty, self).validate(value)
if value is not None:
if not isinstance(value, list):
raise BadValueError('Property %s must be a list' % self.name)
value = self.validate_list_contents(value)
return value
def _load(self, model_instance, value):
if not isinstance(value, list):
value = [value]
return super(ListProperty, self)._load(model_instance, value)
def validate_list_contents(self, value):
"""Validates that all items in the list are of the correct type.
Returns:
The validated list.
Raises:
BadValueError if the list has items are not instances of the
item_type given to the constructor.
"""
if self.item_type in (int, long):
item_type = (int, long)
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == (int, long):
raise BadValueError('Items in the %s list must all be integers.' %
self.name)
else:
raise BadValueError(
'Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
"""Is list property empty.
[] is not an empty value.
Returns:
True if value is None, else false.
"""
return value is None
data_type = list
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
return list(super(ListProperty, self).default_value())
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
Returns:
validated list appropriate to save in the datastore.
"""
value = super(ListProperty, self).get_value_for_datastore(model_instance)
if not value:
return value
value = self.validate_list_contents(value)
if self.validator:
self.validator(value)
if self.item_type == datetime.date:
value = map(_date_to_datetime, value)
elif self.item_type == datetime.time:
value = map(_time_to_datetime, value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
If this list is a list of datetime.date or datetime.time, we convert
the list of datetime.datetime retrieved from the entity into
datetime.date or datetime.time.
See base class method documentation for details.
"""
if self.item_type == datetime.date:
for v in value:
assert isinstance(v, datetime.datetime)
value = map(lambda x: x.date(), value)
elif self.item_type == datetime.time:
for v in value:
assert isinstance(v, datetime.datetime)
value = map(lambda x: x.time(), value)
return value
def make_value_from_datastore_index_value(self, index_value):
value = [datastore_types.RestoreFromIndexValue(index_value, self.item_type)]
return self.make_value_from_datastore(value)
class StringListProperty(ListProperty):
"""A property that stores a list of strings.
A shorthand for the most common type of ListProperty.
"""
def __init__(self, verbose_name=None, default=None, **kwds):
"""Construct StringListProperty.
Args:
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to ListProperty().
"""
super(StringListProperty, self).__init__(basestring,
verbose_name=verbose_name,
default=default,
**kwds)
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
For example, a reference property in model A that refers to model B forms
a many-to-one relationship from A to B: every instance of A refers to a
single B instance, and every B instance can have many A instances refer
to it.
"""
def __init__(self,
reference_class=None,
verbose_name=None,
collection_name=None,
**attrs):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
"""
super(ReferenceProperty, self).__init__(verbose_name, **attrs)
self.collection_name = collection_name
if reference_class is None:
reference_class = Model
if not ((isinstance(reference_class, type) and
issubclass(reference_class, Model)) or
reference_class is _SELF_REFERENCE):
raise KindError('reference_class must be Model or _SELF_REFERENCE')
self.reference_class = self.data_type = reference_class
def make_value_from_datastore_index_value(self, index_value):
value = datastore_types.RestoreFromIndexValue(index_value, Key)
return self.make_value_from_datastore(value)
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
We need to do this to create the ReverseReferenceProperty properties for
this model and create the <reference>_set attributes on the referenced
model, e.g.:
class Story(db.Model):
title = db.StringProperty()
class Comment(db.Model):
story = db.ReferenceProperty(Story)
story = Story.get(id)
print [c for c in story.comment_set]
In this example, the comment_set property was created based on the reference
from Comment to Story (which is inherently one to many).
Args:
model_class: Model class which will have its reference properties
initialized.
property_name: Name of property being configured.
Raises:
DuplicatePropertyError if referenced class already has the provided
collection name as a property.
"""
super(ReferenceProperty, self).__property_config__(model_class,
property_name)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
if self.collection_name is None:
self.collection_name = '%s_set' % (model_class.__name__.lower())
existing_prop = getattr(self.reference_class, self.collection_name, None)
if existing_prop is not None:
if not (isinstance(existing_prop, _ReverseReferenceProperty) and
existing_prop._prop_name == property_name and
existing_prop._model.__name__ == model_class.__name__ and
existing_prop._model.__module__ == model_class.__module__):
raise DuplicatePropertyError('Class %s already has property %s '
% (self.reference_class.__name__,
self.collection_name))
setattr(self.reference_class,
self.collection_name,
_ReverseReferenceProperty(model_class, property_name))
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
Raises:
ReferencePropertyResolveError: if the referenced model does not exist.
"""
if model_instance is None:
return self
if hasattr(model_instance, self.__id_attr_name()):
reference_id = getattr(model_instance, self.__id_attr_name())
else:
reference_id = None
if reference_id is not None:
resolved = getattr(model_instance, self.__resolved_attr_name())
if resolved is not None:
return resolved
else:
instance = get(reference_id)
if instance is None:
raise ReferencePropertyResolveError(
'ReferenceProperty failed to be resolved: %s' %
reference_id.to_path())
setattr(model_instance, self.__resolved_attr_name(), instance)
return instance
else:
return None
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if isinstance(value, datastore.Key):
setattr(model_instance, self.__id_attr_name(), value)
setattr(model_instance, self.__resolved_attr_name(), None)
else:
setattr(model_instance, self.__id_attr_name(), value.key())
setattr(model_instance, self.__resolved_attr_name(), value)
else:
setattr(model_instance, self.__id_attr_name(), None)
setattr(model_instance, self.__resolved_attr_name(), None)
def get_value_for_datastore(self, model_instance):
"""Get key of reference rather than reference itself."""
return getattr(model_instance, self.__id_attr_name())
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if isinstance(value, datastore.Key):
return value
if value is not None and not value.has_key():
raise BadValueError(
'%s instance must have a complete key before it can be stored as a '
'reference' % self.reference_class.kind())
value = super(ReferenceProperty, self).validate(value)
if value is not None and not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.kind()))
return value
def __id_attr_name(self):
"""Get attribute of referenced id.
Returns:
Attribute where to store id of referenced entity.
"""
return self._attr_name()
def __resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED' + self._attr_name()
Reference = ReferenceProperty
def SelfReferenceProperty(verbose_name=None, collection_name=None, **attrs):
"""Create a self reference.
Function for declaring a self referencing property on a model.
Example:
class HtmlNode(db.Model):
parent = db.SelfReferenceProperty('Parent', 'children')
Args:
verbose_name: User friendly name of property.
collection_name: Name of collection on model.
Raises:
ConfigurationError if reference_class provided as parameter.
"""
if 'reference_class' in attrs:
raise ConfigurationError(
'Do not provide reference_class to self-reference.')
return ReferenceProperty(_SELF_REFERENCE,
verbose_name,
collection_name,
**attrs)
SelfReference = SelfReferenceProperty
class _ReverseReferenceProperty(Property):
"""The inverse of the Reference property above.
We construct reverse references automatically for the model to which
the Reference property is pointing to create the one-to-many property for
that model. For example, if you put a Reference property in model A that
refers to model B, we automatically create a _ReverseReference property in
B called a_set that can fetch all of the model A instances that refer to
that instance of model B.
"""
def __init__(self, model, prop):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
Args:
model: Model class that this property is a collection of.
property: Name of foreign property on referred model that points back
to this properties entity.
"""
self.__model = model
self.__property = prop
@property
def _model(self):
"""Internal helper to access the model class, read-only."""
return self.__model
@property
def _prop_name(self):
"""Internal helper to access the property name, read-only."""
return self.__property
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
return query.filter(self.__property + ' =', model_instance.key())
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise BadValueError('Virtual property is read-only')
class ComputedProperty(Property):
"""Property used for creating properties derived from other values.
Certain attributes should never be set by users but automatically
calculated at run-time from other values of the same entity. These
values are implemented as persistent properties because they provide
useful search keys.
A computed property behaves the same as normal properties except that
you may not set values on them. Attempting to do so raises
db.DerivedPropertyError which db.Model knows to ignore during entity
loading time. Whenever getattr is used for the property
the value is recaclulated. This happens when the model calls
get_value_for_datastore on the property.
Example:
import string
class Person(Model):
name = StringProperty(required=True)
@db.ComputedProperty
def lower_case_name(self):
return self.name.lower()
# Find all people regardless of case used in name.
Person.gql('WHERE lower_case_name=:1' % name_to_search_for.lower())
"""
def __init__(self, value_function, indexed=True):
"""Constructor.
Args:
value_function: Callable f(model_instance) -> value used to derive
persistent property value for storage in datastore.
indexed: Whether or not the attribute should be indexed.
"""
super(ComputedProperty, self).__init__(indexed=indexed)
self.__value_function = value_function
def __set__(self, *args):
"""Disallow setting this value.
Raises:
DerivedPropertyError when developer attempts to set attribute manually.
Model knows to ignore this exception when getting from datastore.
"""
raise DerivedPropertyError(
'Computed property %s cannot be set.' % self.name)
def __get__(self, model_instance, model_class):
"""Derive property value.
Args:
model_instance: Instance to derive property for in bound method case,
else None.
model_class: Model class associated with this property descriptor.
Returns:
Result of calling self.__value_funcion as provided by property
constructor.
"""
if model_instance is None:
return self
return self.__value_function(model_instance)
def to_dict(model_instance, dictionary=None):
"""Convert model to dictionary.
Args:
model_instance: Model instance for which to make dictionary.
dictionary: dict instance or compatible to receive model values.
The dictionary is not cleared of original values. Similar to using
dictionary.update. If dictionary is None, a new dictionary instance is
created and returned.
Returns:
New dictionary appropriate populated with model instances values
if entity is None, else entity.
"""
if dictionary is None:
dictionary = {}
model_instance._to_entity(dictionary)
return dictionary
run_in_transaction = datastore.RunInTransaction
run_in_transaction_custom_retries = datastore.RunInTransactionCustomRetries
run_in_transaction_options = datastore.RunInTransactionOptions
RunInTransaction = run_in_transaction
RunInTransactionCustomRetries = run_in_transaction_custom_retries
websafe_encode_cursor = datastore_query.Cursor.to_websafe_string
websafe_decode_cursor = datastore_query.Cursor.from_websafe_string
is_in_transaction = datastore.IsInTransaction
transactional = datastore.Transactional
non_transactional = datastore.NonTransactional
create_config = datastore.CreateConfig
create_transaction_options = datastore.CreateTransactionOptions
| bsd-3-clause |
visionegg/visionegg | demo/demo_keyboardtrigger.py | 1 | 1764 | #!/usr/bin/env python
"""Use the keyboard to trigger the Vision Egg."""
# Author(s): Hubertus Becker <hubertus.becker@uni-tuebingen.de>
# Copyright: (C) 2005 by Hertie Institute for Clinical Brain Research,
# Department of Cognitive Neurology, University of Tuebingen
# URL: http://www.hubertus-becker.de/resources/visionegg/
# $Revision$ $Date$
import VisionEgg
VisionEgg.start_default_logging(); VisionEgg.watch_exceptions()
from VisionEgg.Core import *
from VisionEgg.FlowControl import Presentation, ConstantController
from VisionEgg.Gratings import *
import VisionEgg.Daq
from VisionEgg.DaqKeyboard import *
import pygame
# Normal stuff (from grating demo):
screen = get_default_screen()
stimulus = SinGrating2D(
on = 0, # turn grating is off when not in go loop
position = ( screen.size[0]/2.0, screen.size[1]/2.0 ),
anchor = 'center',
size = ( 300.0 , 300.0 ),
spatial_freq = 10.0 / screen.size[0],
temporal_freq_hz = 5.0,
orientation = 45.0
)
viewport = Viewport( screen=screen, stimuli=[stimulus] )
p = Presentation(
go_duration=(5.0,'seconds'),
trigger_go_if_armed=0, # wait for trigger
viewports=[viewport]
)
# Stimulus on controller
stimulus_on_controller = ConstantController(during_go_value=1,between_go_value=0)
# Create a trigger input controller
trigger_in_controller = KeyboardTriggerInController() # Default is key '1'
#trigger_in_controller = KeyboardTriggerInController(pygame.locals.K_2) # Use '2'
# Add the trigger output controller to the presentation's list of controllers
p.add_controller(stimulus,'on',stimulus_on_controller)
p.add_controller(p,'trigger_go_if_armed',trigger_in_controller)
# Go!
p.go()
p.go()
p.go()
| lgpl-2.1 |
keceli/RMG-Java | source/cclib/method/cspa.py | 11 | 3680 | """
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 733 $"
import random # For sometimes running the progress updater
import numpy
from population import Population
class CSPA(Population):
"""The C-squared population analysis."""
def __init__(self, *args):
# Call the __init__ method of the superclass.
super(CSPA, self).__init__(logname="CSPA", *args)
def __str__(self):
"""Return a string representation of the object."""
return "CSPA of" % (self.data)
def __repr__(self):
"""Return a representation of the object."""
return 'CSPA("%s")' % (self.data)
def calculate(self, indices=None, fupdate=0.05):
"""Perform the C squared population analysis.
Inputs:
indices - list of lists containing atomic orbital indices of fragments
"""
# Do we have the needed info in the parser?
if not hasattr(self.data, "mocoeffs"):
self.logger.error("Missing mocoeffs")
return False
if not hasattr(self.data, "nbasis"):
self.logger.error("Missing nbasis")
return False
if not hasattr(self.data, "homos"):
self.logger.error("Missing homos")
return False
self.logger.info("Creating attribute aoresults: array[3]")
# Determine number of steps, and whether process involves beta orbitals.
unrestricted = (len(self.data.mocoeffs)==2)
nbasis = self.data.nbasis
self.aoresults = []
alpha = len(self.data.mocoeffs[0])
self.aoresults.append(numpy.zeros([alpha, nbasis], "d"))
nstep = alpha
if unrestricted:
beta = len(self.data.mocoeffs[1])
self.aoresults.append(numpy.zeros([beta, nbasis], "d"))
nstep += beta
# Intialize progress if available.
if self.progress:
self.progress.initialize(nstep)
step = 0
for spin in range(len(self.data.mocoeffs)):
for i in range(len(self.data.mocoeffs[spin])):
if self.progress and random.random() < fupdate:
self.progress.update(step, "C^2 Population Analysis")
submocoeffs = self.data.mocoeffs[spin][i]
scale = numpy.inner(submocoeffs, submocoeffs)
tempcoeffs = numpy.multiply(submocoeffs, submocoeffs)
tempvec = tempcoeffs/scale
self.aoresults[spin][i] = numpy.divide(tempcoeffs, scale).astype("d")
step += 1
if self.progress:
self.progress.update(nstep, "Done")
retval = super(CSPA, self).partition(indices)
if not retval:
self.logger.error("Error in partitioning results")
return False
self.logger.info("Creating fragcharges: array[1]")
size = len(self.fragresults[0][0])
self.fragcharges = numpy.zeros([size], "d")
for spin in range(len(self.fragresults)):
for i in range(self.data.homos[spin] + 1):
temp = numpy.reshape(self.fragresults[spin][i], (size,))
self.fragcharges = numpy.add(self.fragcharges, temp)
if not unrestricted:
self.fragcharges = numpy.multiply(self.fragcharges, 2)
return True
if __name__ == "__main__":
import doctest, cspa
doctest.testmod(cspa, verbose=False)
| mit |
bodi000/odoo | addons/knowledge/__init__.py | 436 | 1064 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
orbitfp7/nova | nova/tests/unit/virt/hyperv/test_hostutils.py | 13 | 5657 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.hyperv import constants
from nova.virt.hyperv import hostutils
class FakeCPUSpec(object):
"""Fake CPU Spec for unit tests."""
Architecture = mock.sentinel.cpu_arch
Name = mock.sentinel.cpu_name
Manufacturer = mock.sentinel.cpu_man
NumberOfCores = mock.sentinel.cpu_cores
NumberOfLogicalProcessors = mock.sentinel.cpu_procs
class HostUtilsTestCase(test.NoDBTestCase):
"""Unit tests for the Hyper-V hostutils class."""
_FAKE_MEMORY_TOTAL = 1024L
_FAKE_MEMORY_FREE = 512L
_FAKE_DISK_SIZE = 1024L
_FAKE_DISK_FREE = 512L
_FAKE_VERSION_GOOD = '6.2.0'
_FAKE_VERSION_BAD = '6.1.9'
def setUp(self):
self._hostutils = hostutils.HostUtils()
self._hostutils._conn_cimv2 = mock.MagicMock()
super(HostUtilsTestCase, self).setUp()
@mock.patch('nova.virt.hyperv.hostutils.ctypes')
def test_get_host_tick_count64(self, mock_ctypes):
tick_count64 = "100"
mock_ctypes.windll.kernel32.GetTickCount64.return_value = tick_count64
response = self._hostutils.get_host_tick_count64()
self.assertEqual(tick_count64, response)
def test_get_cpus_info(self):
cpu = mock.MagicMock(spec=FakeCPUSpec)
self._hostutils._conn_cimv2.query.return_value = [cpu]
cpu_list = self._hostutils.get_cpus_info()
self.assertEqual([cpu._mock_children], cpu_list)
def test_get_memory_info(self):
memory = mock.MagicMock()
type(memory).TotalVisibleMemorySize = mock.PropertyMock(
return_value=self._FAKE_MEMORY_TOTAL)
type(memory).FreePhysicalMemory = mock.PropertyMock(
return_value=self._FAKE_MEMORY_FREE)
self._hostutils._conn_cimv2.query.return_value = [memory]
total_memory, free_memory = self._hostutils.get_memory_info()
self.assertEqual(self._FAKE_MEMORY_TOTAL, total_memory)
self.assertEqual(self._FAKE_MEMORY_FREE, free_memory)
def test_get_volume_info(self):
disk = mock.MagicMock()
type(disk).Size = mock.PropertyMock(return_value=self._FAKE_DISK_SIZE)
type(disk).FreeSpace = mock.PropertyMock(
return_value=self._FAKE_DISK_FREE)
self._hostutils._conn_cimv2.query.return_value = [disk]
(total_memory, free_memory) = self._hostutils.get_volume_info(
mock.sentinel.FAKE_DRIVE)
self.assertEqual(self._FAKE_DISK_SIZE, total_memory)
self.assertEqual(self._FAKE_DISK_FREE, free_memory)
def test_check_min_windows_version_true(self):
self._test_check_min_windows_version(self._FAKE_VERSION_GOOD, True)
def test_check_min_windows_version_false(self):
self._test_check_min_windows_version(self._FAKE_VERSION_BAD, False)
def _test_check_min_windows_version(self, version, expected):
os = mock.MagicMock()
os.Version = version
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [os]
self.assertEqual(expected,
self._hostutils.check_min_windows_version(6, 2))
def _test_host_power_action(self, action):
fake_win32 = mock.MagicMock()
fake_win32.Win32Shutdown = mock.MagicMock()
self._hostutils._conn_cimv2.Win32_OperatingSystem.return_value = [
fake_win32]
if action == constants.HOST_POWER_ACTION_SHUTDOWN:
self._hostutils.host_power_action(action)
fake_win32.Win32Shutdown.assert_called_with(
self._hostutils._HOST_FORCED_SHUTDOWN)
elif action == constants.HOST_POWER_ACTION_REBOOT:
self._hostutils.host_power_action(action)
fake_win32.Win32Shutdown.assert_called_with(
self._hostutils._HOST_FORCED_REBOOT)
else:
self.assertRaises(NotImplementedError,
self._hostutils.host_power_action, action)
def test_host_shutdown(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_SHUTDOWN)
def test_host_reboot(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_REBOOT)
def test_host_startup(self):
self._test_host_power_action(constants.HOST_POWER_ACTION_STARTUP)
def test_get_supported_vm_types_2012_r2(self):
with mock.patch.object(self._hostutils,
'check_min_windows_version') as mock_check_win:
mock_check_win.return_value = True
result = self._hostutils.get_supported_vm_types()
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1,
constants.IMAGE_PROP_VM_GEN_2], result)
def test_get_supported_vm_types(self):
with mock.patch.object(self._hostutils,
'check_min_windows_version') as mock_check_win:
mock_check_win.return_value = False
result = self._hostutils.get_supported_vm_types()
self.assertEqual([constants.IMAGE_PROP_VM_GEN_1], result)
| apache-2.0 |
rspavel/spack | lib/spack/spack/util/naming.py | 5 | 8560 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# Need this because of spack.util.string
from __future__ import absolute_import
import string
import itertools
import re
from six import StringIO
import spack.error
__all__ = [
'mod_to_class',
'spack_module_to_python_module',
'valid_module_name',
'valid_fully_qualified_module_name',
'validate_fully_qualified_module_name',
'validate_module_name',
'possible_spack_module_names',
'simplify_name',
'NamespaceTrie']
# Valid module names can contain '-' but can't start with it.
_valid_module_re = r'^\w[\w-]*$'
# Valid module names can contain '-' but can't start with it.
_valid_fully_qualified_module_re = r'^(\w[\w-]*)(\.\w[\w-]*)*$'
def mod_to_class(mod_name):
"""Convert a name from module style to class name style. Spack mostly
follows `PEP-8 <http://legacy.python.org/dev/peps/pep-0008/>`_:
* Module and package names use lowercase_with_underscores.
* Class names use the CapWords convention.
Regular source code follows these convetions. Spack is a bit
more liberal with its Package names and Compiler names:
* They can contain '-' as well as '_', but cannot start with '-'.
* They can start with numbers, e.g. "3proxy".
This function converts from the module convention to the class
convention by removing _ and - and converting surrounding
lowercase text to CapWords. If mod_name starts with a number,
the class name returned will be prepended with '_' to make a
valid Python identifier.
"""
validate_module_name(mod_name)
class_name = re.sub(r'[-_]+', '-', mod_name)
class_name = string.capwords(class_name, '-')
class_name = class_name.replace('-', '')
# If a class starts with a number, prefix it with Number_ to make it
# a valid Python class name.
if re.match(r'^[0-9]', class_name):
class_name = "_%s" % class_name
return class_name
def spack_module_to_python_module(mod_name):
"""Given a Spack module name, returns the name by which it can be
imported in Python.
"""
if re.match(r'[0-9]', mod_name):
mod_name = 'num' + mod_name
return mod_name.replace('-', '_')
def possible_spack_module_names(python_mod_name):
"""Given a Python module name, return a list of all possible spack module
names that could correspond to it."""
mod_name = re.sub(r'^num(\d)', r'\1', python_mod_name)
parts = re.split(r'(_)', mod_name)
options = [['_', '-']] * mod_name.count('_')
results = []
for subs in itertools.product(*options):
s = list(parts)
s[1::2] = subs
results.append(''.join(s))
return results
def simplify_name(name):
"""Simplify package name to only lowercase, digits, and dashes.
Simplifies a name which may include uppercase letters, periods,
underscores, and pluses. In general, we want our package names to
only contain lowercase letters, digits, and dashes.
Args:
name (str): The original name of the package
Returns:
str: The new name of the package
"""
# Convert CamelCase to Dashed-Names
# e.g. ImageMagick -> Image-Magick
# e.g. SuiteSparse -> Suite-Sparse
# name = re.sub('([a-z])([A-Z])', r'\1-\2', name)
# Rename Intel downloads
# e.g. l_daal, l_ipp, l_mkl -> daal, ipp, mkl
if name.startswith('l_'):
name = name[2:]
# Convert UPPERCASE to lowercase
# e.g. SAMRAI -> samrai
name = name.lower()
# Replace '_' and '.' with '-'
# e.g. backports.ssl_match_hostname -> backports-ssl-match-hostname
name = name.replace('_', '-')
name = name.replace('.', '-')
# Replace "++" with "pp" and "+" with "-plus"
# e.g. gtk+ -> gtk-plus
# e.g. voro++ -> voropp
name = name.replace('++', 'pp')
name = name.replace('+', '-plus')
# Simplify Lua package names
# We don't want "lua" to occur multiple times in the name
name = re.sub('^(lua)([^-])', r'\1-\2', name)
# Simplify Bio++ package names
name = re.sub('^(bpp)([^-])', r'\1-\2', name)
return name
def valid_module_name(mod_name):
"""Return whether mod_name is valid for use in Spack."""
return bool(re.match(_valid_module_re, mod_name))
def valid_fully_qualified_module_name(mod_name):
"""Return whether mod_name is a valid namespaced module name."""
return bool(re.match(_valid_fully_qualified_module_re, mod_name))
def validate_module_name(mod_name):
"""Raise an exception if mod_name is not valid."""
if not valid_module_name(mod_name):
raise InvalidModuleNameError(mod_name)
def validate_fully_qualified_module_name(mod_name):
"""Raise an exception if mod_name is not a valid namespaced module name."""
if not valid_fully_qualified_module_name(mod_name):
raise InvalidFullyQualifiedModuleNameError(mod_name)
class InvalidModuleNameError(spack.error.SpackError):
"""Raised when we encounter a bad module name."""
def __init__(self, name):
super(InvalidModuleNameError, self).__init__(
"Invalid module name: " + name)
self.name = name
class InvalidFullyQualifiedModuleNameError(spack.error.SpackError):
"""Raised when we encounter a bad full package name."""
def __init__(self, name):
super(InvalidFullyQualifiedModuleNameError, self).__init__(
"Invalid fully qualified package name: " + name)
self.name = name
class NamespaceTrie(object):
class Element(object):
def __init__(self, value):
self.value = value
def __init__(self, separator='.'):
self._subspaces = {}
self._value = None
self._sep = separator
def __setitem__(self, namespace, value):
first, sep, rest = namespace.partition(self._sep)
if not first:
self._value = NamespaceTrie.Element(value)
return
if first not in self._subspaces:
self._subspaces[first] = NamespaceTrie()
self._subspaces[first][rest] = value
def _get_helper(self, namespace, full_name):
first, sep, rest = namespace.partition(self._sep)
if not first:
if not self._value:
raise KeyError("Can't find namespace '%s' in trie" % full_name)
return self._value.value
elif first not in self._subspaces:
raise KeyError("Can't find namespace '%s' in trie" % full_name)
else:
return self._subspaces[first]._get_helper(rest, full_name)
def __getitem__(self, namespace):
return self._get_helper(namespace, namespace)
def is_prefix(self, namespace):
"""True if the namespace has a value, or if it's the prefix of one that
does."""
first, sep, rest = namespace.partition(self._sep)
if not first:
return True
elif first not in self._subspaces:
return False
else:
return self._subspaces[first].is_prefix(rest)
def is_leaf(self, namespace):
"""True if this namespace has no children in the trie."""
first, sep, rest = namespace.partition(self._sep)
if not first:
return bool(self._subspaces)
elif first not in self._subspaces:
return False
else:
return self._subspaces[first].is_leaf(rest)
def has_value(self, namespace):
"""True if there is a value set for the given namespace."""
first, sep, rest = namespace.partition(self._sep)
if not first:
return self._value is not None
elif first not in self._subspaces:
return False
else:
return self._subspaces[first].has_value(rest)
def __contains__(self, namespace):
"""Returns whether a value has been set for the namespace."""
return self.has_value(namespace)
def _str_helper(self, stream, level=0):
indent = (level * ' ')
for name in sorted(self._subspaces):
stream.write(indent + name + '\n')
if self._value:
stream.write(indent + ' ' + repr(self._value.value))
stream.write(self._subspaces[name]._str_helper(stream, level + 1))
def __str__(self):
stream = StringIO()
self._str_helper(stream)
return stream.getvalue()
| lgpl-2.1 |
raspibo/Livello1 | var/www/cgi-bin/viewpagegrapharray.py | 1 | 2724 | #!/usr/bin/env python3
# Questo file visualizza un grafico da file "csv"
# Serve per la parte di gestione html in python
import cgi
import cgitb
# Abilita gli errori al server web/http
cgitb.enable()
""" Stavolta e` tutta un'altra cosa
Modificato perche` accetti un file in input
Esempio: viewpagegraph.py?file=graph.csv
Ha il percorso sulla "root": ../filename
Dovrebbe funzionare anche per gli archivi
"""
# Parametri generali // Non li uso tutti, li lascio per "abitudine"
TestoPagina="Visualizza grafico da file: "
DirBase="/var/www"
ConfigFile=DirBase+"/conf/config.json"
ExecFile="none"
# Redis "key"
RedisKey = "none"
# Form name/s
FormName = "file"
FileName = ""
form=cgi.FieldStorage()
if FormName not in form:
pass
else:
FileName = cgi.escape(form[FormName].value)
TestoPagina = TestoPagina + FileName
print("""<!DOCTYPE html>
<html>
<head>
<title>Livello 1</title>
<meta name="GENERATOR" content="Midnight Commander (mcedit)">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta name="Keywords" content="centralina, livello1, grafico, python">
<meta name="Author" content="Davide">
<!-- <meta http-equiv="refresh" content="300">
L`ho chiamato da un GET, quindi il timer funziona, ma non serve per come ho pensato di fare/usare .. !!!
-->
<meta http-equiv="refresh" content="300">
<script type="text/javascript"
src="../dygraph.min.js"></script>
</head>
<body>
<title>Graphic</title>
""")
# Scrivo il Titolo/Testo della pagina
print ("<h1>","<center>",TestoPagina,"</center>","</h1>")
#print ("<hr/>","<br/>")
# Eventuale help/annotazione
#print ("Non ho rinominato i campi e non sono stato a riordinare le voci.<br/>")
print("""
<p>
Questa "chart" e` interattiva.
Muovi il mouse per evidenziare i singoli valori.
Clicca e trascina per selezionare ed effettuare uno zoom sull'area selezionata.
Doppio click del mouse per ritornare alla visualizzazione globale.
Con il tasto "Shift" premuto, usa il click del mouse per trascinare l'area di visualizzazione.
</p>
<div id="graphdiv" style="position:absolute; left:20px; right:20px; top:200px; bottom:20px;"></div>
<script type="text/javascript">
g = new Dygraph(
// containing div
document.getElementById("graphdiv"),
// CSV or path to a CSV file.
""")
#print(" \""+FileName+"\",")
print("""
[
[1,10,100],
[2,20,80],
[3,50,60],
[4,70,80],
],
""")
print("""
{
labels: [ "val1", "val2", "val3" ],
showRoller: false,
connectSeparatedPoints: true,
//title: 'Grafico',
ylabel: 'Value',
//xlabel: 'Time',
//legend: 'always',
labelDivStyles: {'textalign':'right'}
}
);
</script>
</body>
</html>""")
| mit |
leilihh/novaha | nova/tests/api/openstack/compute/plugins/v3/test_flavor_rxtx.py | 19 | 3365 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.api.openstack.compute.plugins.v3 import flavor_rxtx
from nova.compute import flavors
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
FAKE_FLAVORS = {
'flavor 1': {
"flavorid": '1',
"name": 'flavor 1',
"memory_mb": '256',
"root_gb": '10',
"rxtx_factor": '1.0',
"swap": 0,
"ephemeral_gb": 0,
"vcpus": 1,
"disabled": False,
},
'flavor 2': {
"flavorid": '2',
"name": 'flavor 2',
"memory_mb": '512',
"root_gb": '10',
"rxtx_factor": None,
"swap": 0,
"vcpus": 1,
"ephemeral_gb": 0,
"disabled": False,
},
}
def fake_flavor_get_by_flavor_id(flavorid, ctxt=None):
return FAKE_FLAVORS['flavor %s' % flavorid]
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_flavor_get_by_flavor_id(1),
fake_flavor_get_by_flavor_id(2)
]
class FlavorRxtxTest(test.NoDBTestCase):
content_type = 'application/json'
prefix = '%s:' % flavor_rxtx.ALIAS
def setUp(self):
super(FlavorRxtxTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(flavors, "get_all_flavors_sorted_list",
fake_get_all_flavors_sorted_list)
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_flavor_get_by_flavor_id)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
app = fakes.wsgi_app_v3(init_only=('servers', 'flavors',
'os-flavor-rxtx'))
res = req.get_response(app)
return res
def _get_flavor(self, body):
return jsonutils.loads(body).get('flavor')
def _get_flavors(self, body):
return jsonutils.loads(body).get('flavors')
def assertFlavorRxtx(self, flavor, rxtx):
self.assertEqual(
flavor.get('%srxtx_factor' % self.prefix), rxtx)
def test_show(self):
url = '/v3/flavors/1'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertFlavorRxtx(self._get_flavor(res.body), '1.0')
def test_detail(self):
url = '/v3/flavors/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
flavors = self._get_flavors(res.body)
self.assertFlavorRxtx(flavors[0], '1.0')
self.assertFlavorRxtx(flavors[1], '')
| apache-2.0 |
PourroyJean/performance_modelisation | script/data visualisation/venv/lib/python3.6/site-packages/pip/utils/appdirs.py | 340 | 8811 | """
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS, expanduser
from pip._vendor.six import PY2, text_type
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if PY2 and isinstance(path, text_type):
path = _win_path_to_bytes(path)
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
macOS: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
| gpl-3.0 |
salguarnieri/intellij-community | python/helpers/pydev/third_party/wrapped_for_pydev/ctypes/macholib/dylib.py | 320 | 1828 | """
Generic dylib path manipulation
"""
import re
__all__ = ['dylib_info']
DYLIB_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+?)
(?:\.(?P<version>[^._]+))?
(?:_(?P<suffix>[^._]+))?
\.dylib$
)
""")
def dylib_info(filename):
"""
A dylib name can take one of the following four forms:
Location/Name.SomeVersion_Suffix.dylib
Location/Name.SomeVersion.dylib
Location/Name_Suffix.dylib
Location/Name.dylib
returns None if not found or a mapping equivalent to:
dict(
location='Location',
name='Name.SomeVersion_Suffix.dylib',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present.
"""
is_dylib = DYLIB_RE.match(filename)
if not is_dylib:
return None
return is_dylib.groupdict()
def test_dylib_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert dylib_info('completely/invalid') is None
assert dylib_info('completely/invalide_debug') is None
assert dylib_info('P/Foo.dylib') == d('P', 'Foo.dylib', 'Foo')
assert dylib_info('P/Foo_debug.dylib') == d('P', 'Foo_debug.dylib', 'Foo', suffix='debug')
assert dylib_info('P/Foo.A.dylib') == d('P', 'Foo.A.dylib', 'Foo', 'A')
assert dylib_info('P/Foo_debug.A.dylib') == d('P', 'Foo_debug.A.dylib', 'Foo_debug', 'A')
assert dylib_info('P/Foo.A_debug.dylib') == d('P', 'Foo.A_debug.dylib', 'Foo', 'A', 'debug')
if __name__ == '__main__':
test_dylib_info()
| apache-2.0 |
hreeder/ignition | manager/core/views.py | 1 | 7096 | from flask import render_template, redirect, url_for, flash, request, current_app
from flask.ext.login import login_required, login_user, logout_user, current_user
from manager import app, db
from manager.utils import send_email, flash_errors
from manager.core import core
from manager.core.forms import LoginForm, RegistrationForm, ForgotPasswordForm, NewPasswordForm
from manager.core.models import User
@core.route("/")
@login_required
def home():
return render_template("core/home.html")
@core.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(
username=form.username.data,
active=True
).first()
if not user:
flash("User account not found!", "danger")
return redirect(url_for("core.login"))
if user.validate_password(form.password.data):
login_user(user)
return redirect(request.args.get("next") or url_for("core.home"))
else:
flash("Your password was incorrect!", "danger")
else:
flash_errors(form)
return render_template("core/login.html", form=form)
@core.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("core.home"))
@core.route("/register", methods=["GET", "POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
# Some extra validation - we should check to see if there's already a user registered with either that email
# or that username
existing_username = User.query.filter_by(username=form.username.data).first()
if existing_username:
flash("That username is already in use!", "danger")
return redirect(url_for("core.register"))
existing_email = User.query.filter_by(email=form.email.data).first()
if existing_email:
flash("That email address is already in use!", "danger")
return redirect(url_for("core.register"))
# Create user model
new_user = User(
username=form.username.data,
email=form.email.data,
password=form.password.data
)
if app.config['FORCE_EMAIL_ACTIVATION']:
# Set activation key
new_user.generate_activation_key()
# Send the new user their activation code
send_email(
[new_user.email],
"[Ignition] Welcome to Ignition, Please Activate Your Account",
render_template(
'core/email_registration.txt',
username=new_user.username,
siteurl=url_for("core.home", _external=True),
activationurl=url_for("core.validate_registration", username=new_user.username, key=new_user.activation_key, _external=True)
),
render_template(
'core/email_registration.html',
username=new_user.username,
siteurl=url_for("core.home", _external=True),
activationurl=url_for("core.validate_registration", username=new_user.username, key=new_user.activation_key, _external=True)
)
)
post = url_for('core.post_register')
else:
new_user.activate()
flash('Account created, you may now log in', 'success')
post = url_for('core.home')
# Save user
db.session.add(new_user)
db.session.commit()
return redirect(post)
else:
flash_errors(form)
return render_template("core/register.html", form=form)
@core.route("/register/validating")
def post_register():
return render_template("core/post_register.html")
@core.route("/register/validate/<username>/<key>")
def validate_registration(username, key):
user = User.query.filter_by(username=username, activation_key=key, active=False).first_or_404()
user.activate()
db.session.add(user)
db.session.commit()
flash("User '%s' has now been activated and you may log in." % (username,), "success")
return redirect(url_for('core.login'))
@core.route("/login/forgot_password", methods=["GET", "POST"])
def forgotten_password():
form = ForgotPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data, email=form.email.data).first()
if not user:
flash("Account not found!", "danger")
return redirect(url_for('core.forgotten_password'))
user.generate_activation_key()
send_email(
[user.email],
"[Ignition] Password Reset Link",
render_template(
'core/email_registration.txt',
username=user.username,
siteurl=url_for("core.home", _external=True),
activationurl=url_for("core.reset_password", username=user.username, key=user.activation_key, _external=True)
),
render_template(
'core/email_registration.html',
username=user.username,
siteurl=url_for("core.home", _external=True),
activationurl=url_for("core.reset_password", username=user.username, key=user.activation_key, _external=True)
)
)
db.session.add(user)
db.session.commit()
flash("An email has been dispatched and you will have a password reset link shortly", "success")
return redirect(url_for("core.login"))
else:
flash_errors(form)
return render_template('core/forgot_password.html', form=form)
@core.route("/login/reset/<username>/<key>", methods=['GET', 'POST'])
def reset_password(username, key):
user = User.query.filter_by(username=username, activation_key=key, active=True).first_or_404()
form = NewPasswordForm()
if form.validate_on_submit():
user.activate()
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Password changed successfully!', 'success')
return redirect(url_for('core.home'))
else:
flash_errors(form)
return render_template('core/reset_password.html', form=form)
@core.route("/profile")
@login_required
def profile():
return render_template('core/profile.html')
@core.route("/profile/change_password", methods=["GET", "POST"])
@login_required
def change_password():
form = NewPasswordForm()
if form.validate_on_submit():
current_user.set_password(form.password.data)
db.session.add(current_user)
db.session.commit()
flash('Password changed successfully!', 'success')
return redirect(url_for('core.profile'))
else:
flash_errors(form)
return render_template('core/change_password.html', form=form)
| mit |
EndyKaufman/django-postgres-angularjs-blog | app/manager/migrations/0009_htmlcache.py | 1 | 1154 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-16 09:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('manager', '0008_properties_fill_from_mock'),
]
operations = [
migrations.CreateModel(
name='HtmlCache',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.TextField(unique=True)),
('content', models.TextField(blank=True, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True, verbose_name='date created')),
('updated', models.DateTimeField(auto_now=True, null=True, verbose_name='date updated')),
('created_user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit |
BillBillBillBill/Tickeys-linux | tickeys/kivy_32/kivy/core/text/text_sdl2.py | 37 | 1288 | '''
SDL2 text provider
==================
Based on SDL2 + SDL2_ttf
'''
__all__ = ('LabelSDL2', )
from kivy.compat import PY2
from kivy.core.text import LabelBase
from kivy.core.text._text_sdl2 import (_SurfaceContainer, _get_extents,
_get_fontdescent, _get_fontascent)
class LabelSDL2(LabelBase):
def _get_font_id(self):
if PY2:
try:
return '|'.join([unicode(self.options[x]) for x
in ('font_size', 'font_name_r', 'bold', 'italic')])
except UnicodeDecodeError:
pass
return '|'.join([str(self.options[x]) for x
in ('font_size', 'font_name_r', 'bold', 'italic')])
def get_extents(self, text):
try:
if PY2:
text = text.encode('UTF-8')
except:
pass
return _get_extents(self, text)
def get_descent(self):
return _get_fontdescent(self)
def get_ascent(self):
return _get_fontascent(self)
def _render_begin(self):
self._surface = _SurfaceContainer(self._size[0], self._size[1])
def _render_text(self, text, x, y):
self._surface.render(self, text, x, y)
def _render_end(self):
return self._surface.get_data()
| mit |
anhiga/poliastro | src/poliastro/tests/tests_twobody/test_states.py | 1 | 4110 | from numpy.testing import assert_allclose
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from poliastro.bodies import Sun, Earth
from poliastro.twobody.rv import RVState
from poliastro.twobody.classical import ClassicalState
def test_state_has_attractor_given_in_constructor():
_d = 1.0 * u.AU # Unused distance
_ = 0.5 * u.one # Unused dimensionless value
_a = 1.0 * u.deg # Unused angle
ss = ClassicalState(Sun, _d, _, _a, _a, _a, _a)
assert ss.attractor == Sun
def test_state_has_elements_given_in_constructor():
# Mars data from HORIZONS at J2000
a = 1.523679 * u.AU
ecc = 0.093315 * u.one
inc = 1.85 * u.deg
raan = 49.562 * u.deg
argp = 286.537 * u.deg
nu = 23.33 * u.deg
ss = ClassicalState(Sun, a, ecc, inc, raan, argp, nu)
assert ss.coe() == (a, ecc, inc, raan, argp, nu)
def test_state_has_individual_elements():
a = 1.523679 * u.AU
ecc = 0.093315 * u.one
inc = 1.85 * u.deg
raan = 49.562 * u.deg
argp = 286.537 * u.deg
nu = 23.33 * u.deg
ss = ClassicalState(Sun, a, ecc, inc, raan, argp, nu)
assert ss.a == a
assert ss.ecc == ecc
assert ss.inc == inc
assert ss.raan == raan
assert ss.argp == argp
assert ss.nu == nu
def test_state_has_rv_given_in_constructor():
r = [1.0, 0.0, 0.0] * u.AU
v = [0.0, 1.0e-6, 0.0] * u.AU / u.s
ss = RVState(Sun, r, v)
assert ss.rv() == (r, v)
def test_perigee_and_apogee():
expected_r_a = 500 * u.km
expected_r_p = 300 * u.km
a = (expected_r_a + expected_r_p) / 2
ecc = expected_r_a / a - 1
_a = 1.0 * u.deg # Unused angle
ss = ClassicalState(Earth, a, ecc, _a, _a, _a, _a)
assert_allclose(ss.r_a.to(u.km).value,
expected_r_a.to(u.km).value)
assert_allclose(ss.r_p.to(u.km).value,
expected_r_p.to(u.km).value)
def test_convert_from_rv_to_coe():
# Data from Vallado, example 2.6
attractor = Earth
p = 11067.790 * u.km
ecc = 0.83285 * u.one
a = p / (1 - ecc ** 2)
inc = 87.87 * u.deg
raan = 227.89 * u.deg
argp = 53.38 * u.deg
nu = 92.335 * u.deg
expected_r = [6525.344, 6861.535, 6449.125] * u.km
expected_v = [4.902276, 5.533124, -1.975709] * u.km / u.s
r, v = ClassicalState(Earth, a, ecc, inc, raan, argp, nu).rv()
assert_quantity_allclose(r, expected_r, rtol=1e-5)
assert_quantity_allclose(v, expected_v, rtol=1e-5)
def test_convert_from_coe_to_rv():
# Data from Vallado, example 2.5
attractor = Earth
r = [6524.384, 6862.875, 6448.296] * u.km
v = [4.901327, 5.533756, -1.976341] * u.km / u.s
expected_p = 11067.79 * u.km
expected_ecc = 0.832853 * u.one
expected_inc = 87.870 * u.deg
expected_raan = 227.89 * u.deg
expected_argp = 53.38 * u.deg
expected_nu = 92.335 * u.deg
ss = RVState(attractor, r, v)
_, ecc, inc, raan, argp, nu = ss.coe()
p = ss.p
assert_quantity_allclose(p, expected_p, rtol=1e-4)
assert_quantity_allclose(ecc, expected_ecc, rtol=1e-4)
assert_quantity_allclose(inc, expected_inc, rtol=1e-4)
assert_quantity_allclose(raan, expected_raan, rtol=1e-4)
assert_quantity_allclose(argp, expected_argp, rtol=1e-4)
assert_quantity_allclose(nu, expected_nu, rtol=1e-4)
def test_perifocal_points_to_perigee():
_d = 1.0 * u.AU # Unused distance
_ = 0.5 * u.one # Unused dimensionless value
_a = 1.0 * u.deg # Unused angle
ss = ClassicalState(Sun, _d, _, _a, _a, _a, _a)
p, _, _ = ss.pqw()
assert_allclose(p, ss.e_vec / ss.ecc)
def test_arglat_within_range():
r = [3539.08827417, 5310.19903462, 3066.31301457] * u.km
v = [-6.49780849, 3.24910291, 1.87521413] * u.km / u.s
ss = RVState(Earth, r, v)
assert 0 * u.deg <= ss.arglat <= 360 * u.deg
def test_pqw_returns_dimensionless():
r_0 = ([1, 0, 0] * u.au).to(u.km)
v_0 = ([0, 6, 0] * u.au / u.year).to(u.km / u.day)
ss = RVState(Sun, r_0, v_0)
p, q, w = ss.pqw()
assert p.unit == u.one
assert q.unit == u.one
assert w.unit == u.one
| mit |
malramsay64/MD-Molecules-Hoomd | statdyn/analysis/relaxation.py | 1 | 4191 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""These are a series of summary values of the dynamics quantities.
This provides methods of easily comparing values across variables.
"""
import logging
from typing import Tuple
import numpy as np
from scipy.optimize import curve_fit, newton
logger = logging.getLogger(__name__)
def _msd_function(x: np.ndarray, m: float, b: float) -> np.ndarray:
return m*x + b
def _exponential_decay(x: np.ndarray, a: float, b: float, c: float=0) -> np.ndarray:
return a * np.exp(-b * x) + c
def _ddx_exponential_decay(x: np.ndarray, a: float, b: float, c: float=0) -> np.ndarray:
return -b * a * np.exp(-b * x)
def _d2dx2_exponential_decay(x: np.ndarray, a: float, b: float, c: float=0) -> np.ndarray:
return b * b * a * np.exp(-b * x)
def diffusion_constant(time: np.ndarray,
msd: np.ndarray,
sigma: np.ndarray=None,
) -> Tuple[float, float]:
"""Compute the diffusion_constant from the mean squared displacement.
Args:
time (class:`np.ndarray`): The timesteps corresponding to each msd value.
msd (class:`np.ndarray`): Values of the mean squared displacement
Returns:
diffusion_constant (float): The diffusion constant
error (float): The error in the fit of the diffusion constant
(float, float): The diffusion constant
"""
linear_region = np.logical_and(2 < msd, msd < 100)
try:
popt, pcov = curve_fit(_msd_function, time[linear_region], msd[linear_region])
except TypeError:
return 0, 0
perr = 2*np.sqrt(np.diag(pcov))
return popt[0], perr[0]
def threshold_relaxation(time: np.ndarray,
value: np.ndarray,
threshold: float=1/np.exp(1),
greater: bool=True,
) -> Tuple[float, float]:
"""Compute the relaxation through the reaching of a specific value.
Args:
time (class:`np.ndarray`): The timesteps corresponding to each msd value.
value (class:`np.ndarray`): Values of the relaxation paramter
Returns:
relaxation time (float): The relaxation time for the given quantity.
error (float): The error in the fit of the relaxation
"""
if greater:
index = np.argmax(value > threshold)
else:
index = np.argmin(value < threshold)
return time[index], time[index]-time[index-1]
def exponential_relaxation(time: np.ndarray,
value: np.ndarray,
sigma: np.ndarray=None,
value_width: float=0.3) -> Tuple[float, float, float]:
"""Fit a region of the exponential relaxation with an exponential.
This fits an exponential to the small region around the value 1/e.
Returns:
relaxation_time (float): The relaxation time for the given quantity
error_min (float): The minmum error value
error_max (float): The maximum error value
"""
exp_value = 1/np.exp(1)
fit_region = np.logical_and((exp_value - value_width/2) < value,
(exp_value + value_width/2) > value)
logger.debug('Num elements: %d', np.sum(fit_region))
zero_est = time[np.argmin(np.abs(value - exp_value))]
if sigma is not None:
sigma = sigma[fit_region]
popt, pcov = curve_fit(
_exponential_decay,
time[fit_region],
value[fit_region],
p0=[1., 1/zero_est],
sigma=sigma,
)
perr = 2*np.sqrt(np.diag(pcov))
logger.debug('Fit Parameters: %s', popt)
def find_root(a, b):
return newton(
_exponential_decay,
args=(a, b, -exp_value),
x0=zero_est,
fprime=_ddx_exponential_decay,
maxiter=100,
tol=1e-4
)
val_mean: float = find_root(*popt)
val_min: float = find_root(*(popt-perr))
val_max: float = find_root(*(popt+perr))
return val_mean, val_mean - val_min, val_max - val_min
| mit |
gsi-upm/soba | projects/oldProyects/workStress/model/time.py | 1 | 1570 | from mesa import Agent, Model
import configuration.settings
class Time(Agent):
def __init__(self):
self.timeByStep = configuration.settings.time_by_step
self.day = 0
self.hour = 8
self.minute = 0
self.seg = 0
self.clock = 00.00
def step(self):
self.seg = self.seg + self.timeByStep
if self.seg > 59:
self.seg = self.seg - 60
self.minute = self.minute + 1
if self.minute > 59:
self.minute = self.minute - 60
self.hour = self.hour + 1
if self.hour > 23:
self.hour = self.hour - 24
self.day = self.day + 1
self.clock = (self.hour*100 + self.minute) / 100
print('Day: ', (self.day + 1), ' - Hour: ', self.clock)
def getCorrectHour(self, hour):
dec = float('0'+str(hour-int(hour))[1:])
response = hour
if dec > 0.59:
responseH = int(hour) + 1
responseD = dec - 0.60
response = responseH + responseD
return round(response,2)
def getDownCorrectHour(self, hour):
dec = float('0'+str(hour-int(hour))[1:])
response = hour
if dec > 0.59:
responseH = int(hour)
responseD = dec - 0.40
response = responseH + responseD
return round(response,2)
def getMinuteFromHours(self, hour):
dec = float('0'+str(hour-int(hour))[1:])
uni = float(int(hour))
minutes = dec*100 + uni*60
return minutes | mit |
travisfcollins/gnuradio | gr-filter/python/filter/pfb.py | 27 | 19928 | #!/usr/bin/env python
#
# Copyright 2009,2010,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import optfir, math
from gnuradio import gr, fft
import filter_swig as filter
try:
from gnuradio import blocks
except ImportError:
import blocks_swig as blocks
class channelizer_ccf(gr.hier_block2):
'''
Make a Polyphase Filter channelizer (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream for each channel.
'''
def __init__(self, numchans, taps=None, oversample_rate=1, atten=100):
gr.hier_block2.__init__(self, "pfb_channelizer_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(numchans, numchans, gr.sizeof_gr_complex))
self._nchans = numchans
self._oversample_rate = oversample_rate
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
made = False
while not made:
try:
self._taps = optfir.low_pass(1, self._nchans, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, self._nchans)
self.pfb = filter.pfb_channelizer_ccf(self._nchans, self._taps,
self._oversample_rate)
self.connect(self, self.s2ss)
for i in xrange(self._nchans):
self.connect((self.s2ss,i), (self.pfb,i))
self.connect((self.pfb,i), (self,i))
def set_channel_map(self, newmap):
self.pfb.set_channel_map(newmap)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def taps(self):
return self.pfb.taps()
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class interpolator_ccf(gr.hier_block2):
'''
Make a Polyphase Filter interpolator (complex in, complex out, floating-point taps)
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
'''
def __init__(self, interp, taps=None, atten=100):
gr.hier_block2.__init__(self, "pfb_interpolator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
self._interp = interp
self._taps = taps
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.99
made = False
while not made:
try:
self._taps = optfir.low_pass(self._interp, self._interp, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.pfb = filter.pfb_interpolator_ccf(self._interp, self._taps)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class decimator_ccf(gr.hier_block2):
'''
Make a Polyphase Filter decimator (complex in, complex out, floating-point taps)
This simplifies the interface by allowing a single input stream to connect to this block.
It will then output a stream that is the decimated output stream.
'''
def __init__(self, decim, taps=None, channel=0, atten=100,
use_fft_rotators=True, use_fft_filters=True):
gr.hier_block2.__init__(self, "pfb_decimator_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex))
self._decim = decim
self._channel = channel
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
made = False
while not made:
try:
self._taps = optfir.low_pass(1, self._decim, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, self._decim)
self.pfb = filter.pfb_decimator_ccf(self._decim, self._taps, self._channel,
use_fft_rotators, use_fft_filters)
self.connect(self, self.s2ss)
for i in xrange(self._decim):
self.connect((self.s2ss,i), (self.pfb,i))
self.connect(self.pfb, self)
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_channel(self, chan):
self.pfb.set_channel(chan)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class arb_resampler_ccf(gr.hier_block2):
'''
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
'''
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(self, "pfb_arb_resampler_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the output signal
# If rate >= 1, we need to prevent images in the output,
# so we have to filter it to less than half the channel
# width of 0.5. If rate < 1, we need to filter to less
# than half the output signal's bw to avoid aliasing, so
# the half-band here is 0.5*rate.
percent = 0.80
if(self._rate < 1):
halfband = 0.5*self._rate
bw = percent*halfband
tb = (percent/2.0)*halfband
ripple = 0.1
# As we drop the bw factor, the optfir filter has a harder time converging;
# using the firdes method here for better results.
self._taps = filter.firdes.low_pass_2(self._size, self._size, bw, tb, atten,
filter.firdes.WIN_BLACKMAN_HARRIS)
else:
halfband = 0.5
bw = percent*halfband
tb = (percent/2.0)*halfband
ripple = 0.1
made = False
while not made:
try:
self._taps = optfir.low_pass(self._size, self._size, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.pfb = filter.pfb_arb_resampler_ccf(self._rate, self._taps, self._size)
#print "PFB has %d taps\n" % (len(self._taps),)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class arb_resampler_fff(gr.hier_block2):
'''
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single float stream in and outputs a single float
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
'''
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(self, "pfb_arb_resampler_fff",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
# If rate >= 1, we need to prevent images in the output,
# so we have to filter it to less than half the channel
# width of 0.5. If rate < 1, we need to filter to less
# than half the output signal's bw to avoid aliasing, so
# the half-band here is 0.5*rate.
percent = 0.80
if(self._rate < 1):
halfband = 0.5*self._rate
bw = percent*halfband
tb = (percent/2.0)*halfband
ripple = 0.1
# As we drop the bw factor, the optfir filter has a harder time converging;
# using the firdes method here for better results.
self._taps = filter.firdes.low_pass_2(self._size, self._size, bw, tb, atten,
filter.firdes.WIN_BLACKMAN_HARRIS)
else:
halfband = 0.5
bw = percent*halfband
tb = (percent/2.0)*halfband
ripple = 0.1
made = False
while not made:
try:
self._taps = optfir.low_pass(self._size, self._size, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.pfb = filter.pfb_arb_resampler_fff(self._rate, self._taps, self._size)
#print "PFB has %d taps\n" % (len(self._taps),)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class arb_resampler_ccc(gr.hier_block2):
'''
Convenience wrapper for the polyphase filterbank arbitrary resampler.
The block takes a single complex stream in and outputs a single complex
stream out. As such, it requires no extra glue to handle the input/output
streams. This block is provided to be consistent with the interface to the
other PFB block.
'''
def __init__(self, rate, taps=None, flt_size=32, atten=100):
gr.hier_block2.__init__(self, "pfb_arb_resampler_ccc",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature
self._rate = rate
self._size = flt_size
if (taps is not None) and (len(taps) > 0):
self._taps = taps
else:
# Create a filter that covers the full bandwidth of the input signal
bw = 0.4
tb = 0.2
ripple = 0.1
#self._taps = filter.firdes.low_pass_2(self._size, self._size, bw, tb, atten)
made = False
while not made:
try:
self._taps = optfir.low_pass(self._size, self._size, bw, bw+tb, ripple, atten)
made = True
except RuntimeError:
ripple += 0.01
made = False
print("Warning: set ripple to %.4f dB. If this is a problem, adjust the attenuation or create your own filter taps." % (ripple))
# Build in an exit strategy; if we've come this far, it ain't working.
if(ripple >= 1.0):
raise RuntimeError("optfir could not generate an appropriate filter.")
self.pfb = filter.pfb_arb_resampler_ccc(self._rate, self._taps, self._size)
#print "PFB has %d taps\n" % (len(self._taps),)
self.connect(self, self.pfb)
self.connect(self.pfb, self)
# Note -- set_taps not implemented in base class yet
def set_taps(self, taps):
self.pfb.set_taps(taps)
def set_rate(self, rate):
self.pfb.set_rate(rate)
def declare_sample_delay(self, delay):
self.pfb.declare_sample_delay(delay)
class channelizer_hier_ccf(gr.hier_block2):
"""
Make a Polyphase Filter channelizer (complex in, complex out, floating-point taps)
Args:
n_chans: The number of channels to split into.
n_filterbanks: The number of filterbank blocks to use (default=2).
taps: The taps to use. If this is `None` then taps are generated using optfir.low_pass.
outchans: Which channels to output streams for (a list of integers) (default is all channels).
atten: Stop band attenuation.
bw: The fraction of the channel you want to keep.
tb: Transition band with as fraction of channel width.
ripple: Pass band ripple in dB.
"""
def __init__(self, n_chans, n_filterbanks=1, taps=None, outchans=None,
atten=100, bw=1.0, tb=0.2, ripple=0.1):
if n_filterbanks > n_chans:
n_filterbanks = n_chans
if outchans is None:
outchans = range(n_chans)
gr.hier_block2.__init__(
self, "pfb_channelizer_hier_ccf",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(len(outchans), len(outchans), gr.sizeof_gr_complex))
if taps is None:
taps = optfir.low_pass(1, n_chans, bw, bw+tb, ripple, atten)
taps = list(taps)
extra_taps = int(math.ceil(1.0*len(taps)/n_chans)*n_chans - len(taps))
taps = taps + [0] * extra_taps
# Make taps for each channel
chantaps = [list(reversed(taps[i: len(taps): n_chans])) for i in range(0, n_chans)]
# Convert the input stream into a stream of vectors.
self.s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, n_chans)
# Create a mapping to separate out each filterbank (a group of channels to be processed together)
# And a list of sets of taps for each filterbank.
low_cpp = int(n_chans/n_filterbanks)
extra = n_chans - low_cpp*n_filterbanks
cpps = [low_cpp+1]*extra + [low_cpp]*(n_filterbanks-extra)
splitter_mapping = []
filterbanktaps = []
total = 0
for cpp in cpps:
splitter_mapping.append([(0, i) for i in range(total, total+cpp)])
filterbanktaps.append(chantaps[total: total+cpp])
total += cpp
assert(total == n_chans)
# Split the stream of vectors in n_filterbanks streams of vectors.
self.splitter = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], splitter_mapping)
# Create the filterbanks
self.fbs = [filter.filterbank_vcvcf(taps) for taps in filterbanktaps]
# Combine the streams of vectors back into a single stream of vectors.
combiner_mapping = [[]]
for i, cpp in enumerate(cpps):
for j in range(cpp):
combiner_mapping[0].append((i, j))
self.combiner = blocks.vector_map(gr.sizeof_gr_complex, cpps, combiner_mapping)
# Add the final FFT to the channelizer.
self.fft = fft.fft_vcc(n_chans, forward=True, window=[1.0]*n_chans)
# Select the desired channels
if outchans != range(n_chans):
selector_mapping = [[(0, i) for i in outchans]]
self.selector = blocks.vector_map(gr.sizeof_gr_complex, [n_chans], selector_mapping)
# Convert stream of vectors to a normal stream.
self.v2ss = blocks.vector_to_streams(gr.sizeof_gr_complex, len(outchans))
self.connect(self, self.s2v, self.splitter)
for i in range(0, n_filterbanks):
self.connect((self.splitter, i), self.fbs[i], (self.combiner, i))
self.connect(self.combiner, self.fft)
if outchans != range(n_chans):
self.connect(self.fft, self.selector, self.v2ss)
else:
self.connect(self.fft, self.v2ss)
for i in range(0, len(outchans)):
self.connect((self.v2ss, i), (self, i))
| gpl-3.0 |
imrehg/arduino-navspark | app/src/processing/app/i18n/python/requests/compat.py | 289 | 2433 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| lgpl-2.1 |
SujaySKumar/django | django/contrib/gis/gdal/field.py | 355 | 6739 | from ctypes import byref, c_int
from datetime import date, datetime, time
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_Fld_* routines are relevant here.
class Field(GDALBase):
"""
This class wraps an OGR Field, and needs to be instantiated
from a Feature object.
"""
def __init__(self, feat, index):
"""
Initializes on the feature object and the integer index of
the field within the feature.
"""
# Setting the feature pointer and index.
self._feat = feat
self._index = index
# Getting the pointer for this field.
fld_ptr = capi.get_feat_field_defn(feat.ptr, index)
if not fld_ptr:
raise GDALException('Cannot create OGR Field, invalid pointer given.')
self.ptr = fld_ptr
# Setting the class depending upon the OGR Field Type (OFT)
self.__class__ = OGRFieldTypes[self.type]
# OFTReal with no precision should be an OFTInteger.
if isinstance(self, OFTReal) and self.precision == 0:
self.__class__ = OFTInteger
self._double = True
def __str__(self):
"Returns the string representation of the Field."
return str(self.value).strip()
# #### Field Methods ####
def as_double(self):
"Retrieves the Field's value as a double (float)."
return capi.get_field_as_double(self._feat.ptr, self._index)
def as_int(self, is_64=False):
"Retrieves the Field's value as an integer."
if is_64:
return capi.get_field_as_integer64(self._feat.ptr, self._index)
else:
return capi.get_field_as_integer(self._feat.ptr, self._index)
def as_string(self):
"Retrieves the Field's value as a string."
string = capi.get_field_as_string(self._feat.ptr, self._index)
return force_text(string, encoding=self._feat.encoding, strings_only=True)
def as_datetime(self):
"Retrieves the Field's value as a tuple of date & time components."
yy, mm, dd, hh, mn, ss, tz = [c_int() for i in range(7)]
status = capi.get_field_as_datetime(
self._feat.ptr, self._index, byref(yy), byref(mm), byref(dd),
byref(hh), byref(mn), byref(ss), byref(tz))
if status:
return (yy, mm, dd, hh, mn, ss, tz)
else:
raise GDALException('Unable to retrieve date & time information from the field.')
# #### Field Properties ####
@property
def name(self):
"Returns the name of this Field."
name = capi.get_field_name(self.ptr)
return force_text(name, encoding=self._feat.encoding, strings_only=True)
@property
def precision(self):
"Returns the precision of this Field."
return capi.get_field_precision(self.ptr)
@property
def type(self):
"Returns the OGR type of this Field."
return capi.get_field_type(self.ptr)
@property
def type_name(self):
"Return the OGR field type name for this Field."
return capi.get_field_type_name(self.type)
@property
def value(self):
"Returns the value of this Field."
# Default is to get the field as a string.
return self.as_string()
@property
def width(self):
"Returns the width of this Field."
return capi.get_field_width(self.ptr)
# ### The Field sub-classes for each OGR Field type. ###
class OFTInteger(Field):
_double = False
_bit64 = False
@property
def value(self):
"Returns an integer contained in this field."
if self._double:
# If this is really from an OFTReal field with no precision,
# read as a double and cast as Python int (to prevent overflow).
return int(self.as_double())
else:
return self.as_int(self._bit64)
@property
def type(self):
"""
GDAL uses OFTReals to represent OFTIntegers in created
shapefiles -- forcing the type here since the underlying field
type may actually be OFTReal.
"""
return 0
class OFTReal(Field):
@property
def value(self):
"Returns a float contained in this field."
return self.as_double()
# String & Binary fields, just subclasses
class OFTString(Field):
pass
class OFTWideString(Field):
pass
class OFTBinary(Field):
pass
# OFTDate, OFTTime, OFTDateTime fields.
class OFTDate(Field):
@property
def value(self):
"Returns a Python `date` object for the OFTDate field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return date(yy.value, mm.value, dd.value)
except (ValueError, GDALException):
return None
class OFTDateTime(Field):
@property
def value(self):
"Returns a Python `datetime` object for this OFTDateTime field."
# TODO: Adapt timezone information.
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-February/007990.html
# The `tz` variable has values of: 0=unknown, 1=localtime (ambiguous),
# 100=GMT, 104=GMT+1, 80=GMT-5, etc.
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return datetime(yy.value, mm.value, dd.value, hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTTime(Field):
@property
def value(self):
"Returns a Python `time` object for this OFTTime field."
try:
yy, mm, dd, hh, mn, ss, tz = self.as_datetime()
return time(hh.value, mn.value, ss.value)
except (ValueError, GDALException):
return None
class OFTInteger64(OFTInteger):
_bit64 = True
# List fields are also just subclasses
class OFTIntegerList(Field):
pass
class OFTRealList(Field):
pass
class OFTStringList(Field):
pass
class OFTWideStringList(Field):
pass
class OFTInteger64List(Field):
pass
# Class mapping dictionary for OFT Types and reverse mapping.
OGRFieldTypes = {
0: OFTInteger,
1: OFTIntegerList,
2: OFTReal,
3: OFTRealList,
4: OFTString,
5: OFTStringList,
6: OFTWideString,
7: OFTWideStringList,
8: OFTBinary,
9: OFTDate,
10: OFTTime,
11: OFTDateTime,
# New 64-bit integer types in GDAL 2
12: OFTInteger64,
13: OFTInteger64List,
}
ROGRFieldTypes = {cls: num for num, cls in OGRFieldTypes.items()}
| bsd-3-clause |
oliver-sanders/cylc | cylc/flow/scripts/cylc_register.py | 1 | 3234 | #!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2019 NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [prep] register [OPTIONS] ARGS
Register the name REG for the suite definition in PATH. The suite server
program can then be started, stopped, and targeted by name REG. (Note that
"cylc run" can also register suites on the fly).
Registration creates a suite run directory "~/cylc-run/REG/" containing a
".service/source" symlink to the suite definition PATH. The .service directory
will also be used for server authentication files at run time.
Suite names can be hierarchical, corresponding to the path under ~/cylc-run.
% cylc register dogs/fido PATH
Register PATH/suite.rc as dogs/fido, with run directory ~/cylc-run/dogs/fido.
% cylc register dogs/fido
Register $PWD/suite.rc as dogs/fido.
% cylc register
Register $PWD/suite.rc as the parent directory name: $(basename $PWD).
The same suite can be registered with multiple names; this results in multiple
suite run directories that link to the same suite definition.
To "unregister" a suite, delete or rename its run directory (renaming it under
~/cylc-run effectively re-registers the original suite with the new name).
Use of "--redirect" is required to allow an existing name (and run directory)
to be associated with a different suite definition. This is potentially
dangerous because the new suite will overwrite files in the existing run
directory. You should consider deleting or renaming an existing run directory
rather than just re-use it with another suite."""
import sys
from cylc.flow.remote import remrun
if remrun():
sys.exit(0)
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.suite_files import register
from cylc.flow.terminal import cli_function
def get_option_parser():
parser = COP(
__doc__,
argdoc=[("[REG]", "Suite name"),
("[PATH]", "Suite definition directory (defaults to $PWD)")])
parser.add_option(
"--redirect", help="Allow an existing suite name and run directory"
" to be used with another suite.",
action="store_true", default=False, dest="redirect")
parser.add_option(
"--run-dir", help="Symlink $HOME/cylc-run/REG to RUNDIR/REG.",
action="store", metavar="RUNDIR", default=None, dest="rundir")
return parser
@cli_function(get_option_parser)
def main(parser, opts, reg=None, src=None):
register(reg, src, redirect=opts.redirect, rundir=opts.rundir)
if __name__ == "__main__":
main()
| gpl-3.0 |
TeamTwisted/external_chromium_org | third_party/closure_linter/closure_linter/statetracker.py | 74 | 37897 | #!/usr/bin/env python
#
# Copyright 2007 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Light weight EcmaScript state tracker that reads tokens and tracks state."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
import re
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class DocFlag(object):
"""Generic doc flag object.
Attribute:
flag_type: param, return, define, type, etc.
flag_token: The flag token.
type_start_token: The first token specifying the flag type,
including braces.
type_end_token: The last token specifying the flag type,
including braces.
type: The type spec.
name_token: The token specifying the flag name.
name: The flag name
description_start_token: The first token in the description.
description_end_token: The end token in the description.
description: The description.
"""
# Please keep these lists alphabetized.
# The list of standard jsdoc tags is from
STANDARD_DOC = frozenset([
'author',
'bug',
'classTemplate',
'consistentIdGenerator',
'const',
'constructor',
'define',
'deprecated',
'dict',
'enum',
'export',
'expose',
'extends',
'externs',
'fileoverview',
'idGenerator',
'implements',
'implicitCast',
'interface',
'lends',
'license',
'ngInject', # This annotation is specific to AngularJS.
'noalias',
'nocompile',
'nosideeffects',
'override',
'owner',
'package',
'param',
'preserve',
'private',
'protected',
'public',
'return',
'see',
'stableIdGenerator',
'struct',
'supported',
'template',
'this',
'type',
'typedef',
'unrestricted',
])
ANNOTATION = frozenset(['preserveTry', 'suppress'])
LEGAL_DOC = STANDARD_DOC | ANNOTATION
# Includes all Closure Compiler @suppress types.
# Not all of these annotations are interpreted by Closure Linter.
#
# Specific cases:
# - accessControls is supported by the compiler at the expression
# and method level to suppress warnings about private/protected
# access (method level applies to all references in the method).
# The linter mimics the compiler behavior.
SUPPRESS_TYPES = frozenset([
'accessControls',
'ambiguousFunctionDecl',
'checkRegExp',
'checkStructDictInheritance',
'checkTypes',
'checkVars',
'const',
'constantProperty',
'deprecated',
'duplicate',
'es5Strict',
'externsValidation',
'extraProvide',
'extraRequire',
'fileoverviewTags',
'globalThis',
'internetExplorerChecks',
'invalidCasts',
'missingProperties',
'missingProvide',
'missingRequire',
'missingReturn',
'nonStandardJsDocs',
'strictModuleDepCheck',
'suspiciousCode',
'tweakValidation',
'typeInvalidation',
'undefinedNames',
'undefinedVars',
'underscore',
'unknownDefines',
'unnecessaryCasts',
'unusedPrivateMembers',
'uselessCode',
'visibility',
'with'])
HAS_DESCRIPTION = frozenset([
'define', 'deprecated', 'desc', 'fileoverview', 'license', 'param',
'preserve', 'return', 'supported'])
HAS_TYPE = frozenset([
'define', 'enum', 'extends', 'implements', 'param', 'return', 'type',
'suppress', 'const', 'package', 'private', 'protected', 'public'])
CAN_OMIT_TYPE = frozenset(['enum', 'const', 'package', 'private',
'protected', 'public'])
TYPE_ONLY = frozenset(['enum', 'extends', 'implements', 'suppress', 'type',
'const', 'package', 'private', 'protected', 'public'])
HAS_NAME = frozenset(['param'])
EMPTY_COMMENT_LINE = re.compile(r'^\s*\*?\s*$')
EMPTY_STRING = re.compile(r'^\s*$')
def __init__(self, flag_token):
"""Creates the DocFlag object and attaches it to the given start token.
Args:
flag_token: The starting token of the flag.
"""
self.flag_token = flag_token
self.flag_type = flag_token.string.strip().lstrip('@')
# Extract type, if applicable.
self.type = None
self.type_start_token = None
self.type_end_token = None
if self.flag_type in self.HAS_TYPE:
brace = tokenutil.SearchUntil(flag_token, [Type.DOC_START_BRACE],
Type.FLAG_ENDING_TYPES)
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
self.type = contents
self.type_start_token = brace
self.type_end_token = end_token
elif (self.flag_type in self.TYPE_ONLY and
flag_token.next.type not in Type.FLAG_ENDING_TYPES and
flag_token.line_number == flag_token.next.line_number):
# b/10407058. If the flag is expected to be followed by a type then
# search for type in same line only. If no token after flag in same
# line then conclude that no type is specified.
self.type_start_token = flag_token.next
self.type_end_token, self.type = _GetEndTokenAndContents(
self.type_start_token)
if self.type is not None:
self.type = self.type.strip()
# Extract name, if applicable.
self.name_token = None
self.name = None
if self.flag_type in self.HAS_NAME:
# Handle bad case, name could be immediately after flag token.
self.name_token = _GetNextPartialIdentifierToken(flag_token)
# Handle good case, if found token is after type start, look for
# a identifier (substring to cover cases like [cnt] b/4197272) after
# type end, since types contain identifiers.
if (self.type and self.name_token and
tokenutil.Compare(self.name_token, self.type_start_token) > 0):
self.name_token = _GetNextPartialIdentifierToken(self.type_end_token)
if self.name_token:
self.name = self.name_token.string
# Extract description, if applicable.
self.description_start_token = None
self.description_end_token = None
self.description = None
if self.flag_type in self.HAS_DESCRIPTION:
search_start_token = flag_token
if self.name_token and self.type_end_token:
if tokenutil.Compare(self.type_end_token, self.name_token) > 0:
search_start_token = self.type_end_token
else:
search_start_token = self.name_token
elif self.name_token:
search_start_token = self.name_token
elif self.type:
search_start_token = self.type_end_token
interesting_token = tokenutil.Search(search_start_token,
Type.FLAG_DESCRIPTION_TYPES | Type.FLAG_ENDING_TYPES)
if interesting_token.type in Type.FLAG_DESCRIPTION_TYPES:
self.description_start_token = interesting_token
self.description_end_token, self.description = (
_GetEndTokenAndContents(interesting_token))
class DocComment(object):
"""JavaScript doc comment object.
Attributes:
ordered_params: Ordered list of parameters documented.
start_token: The token that starts the doc comment.
end_token: The token that ends the doc comment.
suppressions: Map of suppression type to the token that added it.
"""
def __init__(self, start_token):
"""Create the doc comment object.
Args:
start_token: The first token in the doc comment.
"""
self.__flags = []
self.start_token = start_token
self.end_token = None
self.suppressions = {}
self.invalidated = False
@property
def ordered_params(self):
"""Gives the list of parameter names as a list of strings."""
params = []
for flag in self.__flags:
if flag.flag_type == 'param' and flag.name:
params.append(flag.name)
return params
def Invalidate(self):
"""Indicate that the JSDoc is well-formed but we had problems parsing it.
This is a short-circuiting mechanism so that we don't emit false
positives about well-formed doc comments just because we don't support
hot new syntaxes.
"""
self.invalidated = True
def IsInvalidated(self):
"""Test whether Invalidate() has been called."""
return self.invalidated
def AddSuppression(self, token):
"""Add a new error suppression flag.
Args:
token: The suppression flag token.
"""
#TODO(user): Error if no braces
brace = tokenutil.SearchUntil(token, [Type.DOC_START_BRACE],
[Type.DOC_FLAG])
if brace:
end_token, contents = _GetMatchingEndBraceAndContents(brace)
for suppression in contents.split('|'):
self.suppressions[suppression] = token
def SuppressionOnly(self):
"""Returns whether this comment contains only suppression flags."""
if not self.__flags:
return False
for flag in self.__flags:
if flag.flag_type != 'suppress':
return False
return True
def AddFlag(self, flag):
"""Add a new document flag.
Args:
flag: DocFlag object.
"""
self.__flags.append(flag)
def InheritsDocumentation(self):
"""Test if the jsdoc implies documentation inheritance.
Returns:
True if documentation may be pulled off the superclass.
"""
return self.HasFlag('inheritDoc') or self.HasFlag('override')
def HasFlag(self, flag_type):
"""Test if the given flag has been set.
Args:
flag_type: The type of the flag to check.
Returns:
True if the flag is set.
"""
for flag in self.__flags:
if flag.flag_type == flag_type:
return True
return False
def GetFlag(self, flag_type):
"""Gets the last flag of the given type.
Args:
flag_type: The type of the flag to get.
Returns:
The last instance of the given flag type in this doc comment.
"""
for flag in reversed(self.__flags):
if flag.flag_type == flag_type:
return flag
def GetDocFlags(self):
"""Return the doc flags for this comment."""
return list(self.__flags)
def _YieldDescriptionTokens(self):
for token in self.start_token:
if (token is self.end_token or
token.type is javascripttokens.JavaScriptTokenType.DOC_FLAG or
token.type not in javascripttokens.JavaScriptTokenType.COMMENT_TYPES):
return
if token.type not in [
javascripttokens.JavaScriptTokenType.START_DOC_COMMENT,
javascripttokens.JavaScriptTokenType.END_DOC_COMMENT,
javascripttokens.JavaScriptTokenType.DOC_PREFIX]:
yield token
@property
def description(self):
return tokenutil.TokensToString(
self._YieldDescriptionTokens())
def GetTargetIdentifier(self):
"""Returns the identifier (as a string) that this is a comment for.
Note that this uses method uses GetIdentifierForToken to get the full
identifier, even if broken up by whitespace, newlines, or comments,
and thus could be longer than GetTargetToken().string.
Returns:
The identifier for the token this comment is for.
"""
token = self.GetTargetToken()
if token:
return tokenutil.GetIdentifierForToken(token)
def GetTargetToken(self):
"""Get this comment's target token.
Returns:
The token that is the target of this comment, or None if there isn't one.
"""
# File overviews describe the file, not a token.
if self.HasFlag('fileoverview'):
return
skip_types = frozenset([
Type.WHITESPACE,
Type.BLANK_LINE,
Type.START_PAREN])
target_types = frozenset([
Type.FUNCTION_NAME,
Type.IDENTIFIER,
Type.SIMPLE_LVALUE])
token = self.end_token.next
while token:
if token.type in target_types:
return token
# Handles the case of a comment on "var foo = ...'
if token.IsKeyword('var'):
next_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES)
if (next_code_token and
next_code_token.IsType(Type.SIMPLE_LVALUE)):
return next_code_token
return
# Handles the case of a comment on "function foo () {}"
if token.type is Type.FUNCTION_DECLARATION:
next_code_token = tokenutil.CustomSearch(
token,
lambda t: t.type not in Type.NON_CODE_TYPES)
if next_code_token.IsType(Type.FUNCTION_NAME):
return next_code_token
return
# Skip types will end the search.
if token.type not in skip_types:
return
token = token.next
def CompareParameters(self, params):
"""Computes the edit distance and list from the function params to the docs.
Uses the Levenshtein edit distance algorithm, with code modified from
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python
Args:
params: The parameter list for the function declaration.
Returns:
The edit distance, the edit list.
"""
source_len, target_len = len(self.ordered_params), len(params)
edit_lists = [[]]
distance = [[]]
for i in range(target_len+1):
edit_lists[0].append(['I'] * i)
distance[0].append(i)
for j in range(1, source_len+1):
edit_lists.append([['D'] * j])
distance.append([j])
for i in range(source_len):
for j in range(target_len):
cost = 1
if self.ordered_params[i] == params[j]:
cost = 0
deletion = distance[i][j+1] + 1
insertion = distance[i+1][j] + 1
substitution = distance[i][j] + cost
edit_list = None
best = None
if deletion <= insertion and deletion <= substitution:
# Deletion is best.
best = deletion
edit_list = list(edit_lists[i][j+1])
edit_list.append('D')
elif insertion <= substitution:
# Insertion is best.
best = insertion
edit_list = list(edit_lists[i+1][j])
edit_list.append('I')
edit_lists[i+1].append(edit_list)
else:
# Substitution is best.
best = substitution
edit_list = list(edit_lists[i][j])
if cost:
edit_list.append('S')
else:
edit_list.append('=')
edit_lists[i+1].append(edit_list)
distance[i+1].append(best)
return distance[source_len][target_len], edit_lists[source_len][target_len]
def __repr__(self):
"""Returns a string representation of this object.
Returns:
A string representation of this object.
"""
return '<DocComment: %s, %s>' % (
str(self.ordered_params), str(self.__flags))
#
# Helper methods used by DocFlag and DocComment to parse out flag information.
#
def _GetMatchingEndBraceAndContents(start_brace):
"""Returns the matching end brace and contents between the two braces.
If any FLAG_ENDING_TYPE token is encountered before a matching end brace, then
that token is used as the matching ending token. Contents will have all
comment prefixes stripped out of them, and all comment prefixes in between the
start and end tokens will be split out into separate DOC_PREFIX tokens.
Args:
start_brace: The DOC_START_BRACE token immediately before desired contents.
Returns:
The matching ending token (DOC_END_BRACE or FLAG_ENDING_TYPE) and a string
of the contents between the matching tokens, minus any comment prefixes.
"""
open_count = 1
close_count = 0
contents = []
# We don't consider the start brace part of the type string.
token = start_brace.next
while open_count != close_count:
if token.type == Type.DOC_START_BRACE:
open_count += 1
elif token.type == Type.DOC_END_BRACE:
close_count += 1
if token.type != Type.DOC_PREFIX:
contents.append(token.string)
if token.type in Type.FLAG_ENDING_TYPES:
break
token = token.next
#Don't include the end token (end brace, end doc comment, etc.) in type.
token = token.previous
contents = contents[:-1]
return token, ''.join(contents)
def _GetNextPartialIdentifierToken(start_token):
"""Returns the first token having identifier as substring after a token.
Searches each token after the start to see if it contains an identifier.
If found, token is returned. If no identifier is found returns None.
Search is abandoned when a FLAG_ENDING_TYPE token is found.
Args:
start_token: The token to start searching after.
Returns:
The token found containing identifier, None otherwise.
"""
token = start_token.next
while token and token.type not in Type.FLAG_ENDING_TYPES:
match = javascripttokenizer.JavaScriptTokenizer.IDENTIFIER.search(
token.string)
if match is not None and token.type == Type.COMMENT:
return token
token = token.next
return None
def _GetEndTokenAndContents(start_token):
"""Returns last content token and all contents before FLAG_ENDING_TYPE token.
Comment prefixes are split into DOC_PREFIX tokens and stripped from the
returned contents.
Args:
start_token: The token immediately before the first content token.
Returns:
The last content token and a string of all contents including start and
end tokens, with comment prefixes stripped.
"""
iterator = start_token
last_line = iterator.line_number
last_token = None
contents = ''
doc_depth = 0
while not iterator.type in Type.FLAG_ENDING_TYPES or doc_depth > 0:
if (iterator.IsFirstInLine() and
DocFlag.EMPTY_COMMENT_LINE.match(iterator.line)):
# If we have a blank comment line, consider that an implicit
# ending of the description. This handles a case like:
#
# * @return {boolean} True
# *
# * Note: This is a sentence.
#
# The note is not part of the @return description, but there was
# no definitive ending token. Rather there was a line containing
# only a doc comment prefix or whitespace.
break
# b/2983692
# don't prematurely match against a @flag if inside a doc flag
# need to think about what is the correct behavior for unterminated
# inline doc flags
if (iterator.type == Type.DOC_START_BRACE and
iterator.next.type == Type.DOC_INLINE_FLAG):
doc_depth += 1
elif (iterator.type == Type.DOC_END_BRACE and
doc_depth > 0):
doc_depth -= 1
if iterator.type in Type.FLAG_DESCRIPTION_TYPES:
contents += iterator.string
last_token = iterator
iterator = iterator.next
if iterator.line_number != last_line:
contents += '\n'
last_line = iterator.line_number
end_token = last_token
if DocFlag.EMPTY_STRING.match(contents):
contents = None
else:
# Strip trailing newline.
contents = contents[:-1]
return end_token, contents
class Function(object):
"""Data about a JavaScript function.
Attributes:
block_depth: Block depth the function began at.
doc: The DocComment associated with the function.
has_return: If the function has a return value.
has_this: If the function references the 'this' object.
is_assigned: If the function is part of an assignment.
is_constructor: If the function is a constructor.
name: The name of the function, whether given in the function keyword or
as the lvalue the function is assigned to.
start_token: First token of the function (the function' keyword token).
end_token: Last token of the function (the closing '}' token).
parameters: List of parameter names.
"""
def __init__(self, block_depth, is_assigned, doc, name):
self.block_depth = block_depth
self.is_assigned = is_assigned
self.is_constructor = doc and doc.HasFlag('constructor')
self.is_interface = doc and doc.HasFlag('interface')
self.has_return = False
self.has_throw = False
self.has_this = False
self.name = name
self.doc = doc
self.start_token = None
self.end_token = None
self.parameters = None
class StateTracker(object):
"""EcmaScript state tracker.
Tracks block depth, function names, etc. within an EcmaScript token stream.
"""
OBJECT_LITERAL = 'o'
CODE = 'c'
def __init__(self, doc_flag=DocFlag):
"""Initializes a JavaScript token stream state tracker.
Args:
doc_flag: An optional custom DocFlag used for validating
documentation flags.
"""
self._doc_flag = doc_flag
self.Reset()
def Reset(self):
"""Resets the state tracker to prepare for processing a new page."""
self._block_depth = 0
self._is_block_close = False
self._paren_depth = 0
self._function_stack = []
self._functions_by_name = {}
self._last_comment = None
self._doc_comment = None
self._cumulative_params = None
self._block_types = []
self._last_non_space_token = None
self._last_line = None
self._first_token = None
self._documented_identifiers = set()
self._variables_in_scope = []
def InFunction(self):
"""Returns true if the current token is within a function.
Returns:
True if the current token is within a function.
"""
return bool(self._function_stack)
def InConstructor(self):
"""Returns true if the current token is within a constructor.
Returns:
True if the current token is within a constructor.
"""
return self.InFunction() and self._function_stack[-1].is_constructor
def InInterfaceMethod(self):
"""Returns true if the current token is within an interface method.
Returns:
True if the current token is within an interface method.
"""
if self.InFunction():
if self._function_stack[-1].is_interface:
return True
else:
name = self._function_stack[-1].name
prototype_index = name.find('.prototype.')
if prototype_index != -1:
class_function_name = name[0:prototype_index]
if (class_function_name in self._functions_by_name and
self._functions_by_name[class_function_name].is_interface):
return True
return False
def InTopLevelFunction(self):
"""Returns true if the current token is within a top level function.
Returns:
True if the current token is within a top level function.
"""
return len(self._function_stack) == 1 and self.InTopLevel()
def InAssignedFunction(self):
"""Returns true if the current token is within a function variable.
Returns:
True if if the current token is within a function variable
"""
return self.InFunction() and self._function_stack[-1].is_assigned
def IsFunctionOpen(self):
"""Returns true if the current token is a function block open.
Returns:
True if the current token is a function block open.
"""
return (self._function_stack and
self._function_stack[-1].block_depth == self._block_depth - 1)
def IsFunctionClose(self):
"""Returns true if the current token is a function block close.
Returns:
True if the current token is a function block close.
"""
return (self._function_stack and
self._function_stack[-1].block_depth == self._block_depth)
def InBlock(self):
"""Returns true if the current token is within a block.
Returns:
True if the current token is within a block.
"""
return bool(self._block_depth)
def IsBlockClose(self):
"""Returns true if the current token is a block close.
Returns:
True if the current token is a block close.
"""
return self._is_block_close
def InObjectLiteral(self):
"""Returns true if the current token is within an object literal.
Returns:
True if the current token is within an object literal.
"""
return self._block_depth and self._block_types[-1] == self.OBJECT_LITERAL
def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types
def InParentheses(self):
"""Returns true if the current token is within parentheses.
Returns:
True if the current token is within parentheses.
"""
return bool(self._paren_depth)
def ParenthesesDepth(self):
"""Returns the number of parens surrounding the token.
Returns:
The number of parenthesis surrounding the token.
"""
return self._paren_depth
def BlockDepth(self):
"""Returns the number of blocks in which the token is nested.
Returns:
The number of blocks in which the token is nested.
"""
return self._block_depth
def FunctionDepth(self):
"""Returns the number of functions in which the token is nested.
Returns:
The number of functions in which the token is nested.
"""
return len(self._function_stack)
def InTopLevel(self):
"""Whether we are at the top level in the class.
This function call is language specific. In some languages like
JavaScript, a function is top level if it is not inside any parenthesis.
In languages such as ActionScript, a function is top level if it is directly
within a class.
"""
raise TypeError('Abstract method InTopLevel not implemented')
def GetBlockType(self, token):
"""Determine the block type given a START_BLOCK token.
Code blocks come after parameters, keywords like else, and closing parens.
Args:
token: The current token. Can be assumed to be type START_BLOCK.
Returns:
Code block type for current token.
"""
raise TypeError('Abstract method GetBlockType not implemented')
def GetParams(self):
"""Returns the accumulated input params as an array.
In some EcmasSript languages, input params are specified like
(param:Type, param2:Type2, ...)
in other they are specified just as
(param, param2)
We handle both formats for specifying parameters here and leave
it to the compilers for each language to detect compile errors.
This allows more code to be reused between lint checkers for various
EcmaScript languages.
Returns:
The accumulated input params as an array.
"""
params = []
if self._cumulative_params:
params = re.compile(r'\s+').sub('', self._cumulative_params).split(',')
# Strip out the type from parameters of the form name:Type.
params = map(lambda param: param.split(':')[0], params)
return params
def GetLastComment(self):
"""Return the last plain comment that could be used as documentation.
Returns:
The last plain comment that could be used as documentation.
"""
return self._last_comment
def GetDocComment(self):
"""Return the most recent applicable documentation comment.
Returns:
The last applicable documentation comment.
"""
return self._doc_comment
def HasDocComment(self, identifier):
"""Returns whether the identifier has been documented yet.
Args:
identifier: The identifier.
Returns:
Whether the identifier has been documented yet.
"""
return identifier in self._documented_identifiers
def InDocComment(self):
"""Returns whether the current token is in a doc comment.
Returns:
Whether the current token is in a doc comment.
"""
return self._doc_comment and self._doc_comment.end_token is None
def GetDocFlag(self):
"""Returns the current documentation flags.
Returns:
The current documentation flags.
"""
return self._doc_flag
def IsTypeToken(self, t):
if self.InDocComment() and t.type not in (Type.START_DOC_COMMENT,
Type.DOC_FLAG, Type.DOC_INLINE_FLAG, Type.DOC_PREFIX):
f = tokenutil.SearchUntil(t, [Type.DOC_FLAG], [Type.START_DOC_COMMENT],
None, True)
if (f and f.attached_object.type_start_token is not None and
f.attached_object.type_end_token is not None):
return (tokenutil.Compare(t, f.attached_object.type_start_token) > 0 and
tokenutil.Compare(t, f.attached_object.type_end_token) < 0)
return False
def GetFunction(self):
"""Return the function the current code block is a part of.
Returns:
The current Function object.
"""
if self._function_stack:
return self._function_stack[-1]
def GetBlockDepth(self):
"""Return the block depth.
Returns:
The current block depth.
"""
return self._block_depth
def GetLastNonSpaceToken(self):
"""Return the last non whitespace token."""
return self._last_non_space_token
def GetLastLine(self):
"""Return the last line."""
return self._last_line
def GetFirstToken(self):
"""Return the very first token in the file."""
return self._first_token
def IsVariableInScope(self, token_string):
"""Checks if string is variable in current scope.
For given string it checks whether the string is a defined variable
(including function param) in current state.
E.g. if variables defined (variables in current scope) is docs
then docs, docs.length etc will be considered as variable in current
scope. This will help in avoding extra goog.require for variables.
Args:
token_string: String to check if its is a variable in current scope.
Returns:
true if given string is a variable in current scope.
"""
for variable in self._variables_in_scope:
if (token_string == variable
or token_string.startswith(variable + '.')):
return True
return False
def HandleToken(self, token, last_non_space_token):
"""Handles the given token and updates state.
Args:
token: The token to handle.
last_non_space_token:
"""
self._is_block_close = False
if not self._first_token:
self._first_token = token
# Track block depth.
type = token.type
if type == Type.START_BLOCK:
self._block_depth += 1
# Subclasses need to handle block start very differently because
# whether a block is a CODE or OBJECT_LITERAL block varies significantly
# by language.
self._block_types.append(self.GetBlockType(token))
# When entering a function body, record its parameters.
if self.InFunction():
function = self._function_stack[-1]
if self._block_depth == function.block_depth + 1:
function.parameters = self.GetParams()
# Track block depth.
elif type == Type.END_BLOCK:
self._is_block_close = not self.InObjectLiteral()
self._block_depth -= 1
self._block_types.pop()
# Track parentheses depth.
elif type == Type.START_PAREN:
self._paren_depth += 1
# Track parentheses depth.
elif type == Type.END_PAREN:
self._paren_depth -= 1
elif type == Type.COMMENT:
self._last_comment = token.string
elif type == Type.START_DOC_COMMENT:
self._last_comment = None
self._doc_comment = DocComment(token)
elif type == Type.END_DOC_COMMENT:
self._doc_comment.end_token = token
elif type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
flag = self._doc_flag(token)
token.attached_object = flag
self._doc_comment.AddFlag(flag)
if flag.flag_type == 'suppress':
self._doc_comment.AddSuppression(token)
elif type == Type.FUNCTION_DECLARATION:
last_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES, None,
True)
doc = None
# Only functions outside of parens are eligible for documentation.
if not self._paren_depth:
doc = self._doc_comment
name = ''
is_assigned = last_code and (last_code.IsOperator('=') or
last_code.IsOperator('||') or last_code.IsOperator('&&') or
(last_code.IsOperator(':') and not self.InObjectLiteral()))
if is_assigned:
# TODO(robbyw): This breaks for x[2] = ...
# Must use loop to find full function name in the case of line-wrapped
# declarations (bug 1220601) like:
# my.function.foo.
# bar = function() ...
identifier = tokenutil.Search(last_code, Type.SIMPLE_LVALUE, None, True)
while identifier and identifier.type in (
Type.IDENTIFIER, Type.SIMPLE_LVALUE):
name = identifier.string + name
# Traverse behind us, skipping whitespace and comments.
while True:
identifier = identifier.previous
if not identifier or not identifier.type in Type.NON_CODE_TYPES:
break
else:
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
while next_token and next_token.IsType(Type.FUNCTION_NAME):
name += next_token.string
next_token = tokenutil.Search(next_token, Type.FUNCTION_NAME, 2)
function = Function(self._block_depth, is_assigned, doc, name)
function.start_token = token
self._function_stack.append(function)
self._functions_by_name[name] = function
# Add a delimiter in stack for scope variables to define start of
# function. This helps in popping variables of this function when
# function declaration ends.
self._variables_in_scope.append('')
elif type == Type.START_PARAMETERS:
self._cumulative_params = ''
elif type == Type.PARAMETERS:
self._cumulative_params += token.string
self._variables_in_scope.extend(self.GetParams())
elif type == Type.KEYWORD and token.string == 'return':
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if not next_token.IsType(Type.SEMICOLON):
function = self.GetFunction()
if function:
function.has_return = True
elif type == Type.KEYWORD and token.string == 'throw':
function = self.GetFunction()
if function:
function.has_throw = True
elif type == Type.KEYWORD and token.string == 'var':
function = self.GetFunction()
next_token = tokenutil.Search(token, [Type.IDENTIFIER,
Type.SIMPLE_LVALUE])
if next_token:
if next_token.type == Type.SIMPLE_LVALUE:
self._variables_in_scope.append(next_token.values['identifier'])
else:
self._variables_in_scope.append(next_token.string)
elif type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
jsdoc = self.GetDocComment()
if jsdoc:
self._documented_identifiers.add(identifier)
self._HandleIdentifier(identifier, True)
elif type == Type.IDENTIFIER:
self._HandleIdentifier(token.string, False)
# Detect documented non-assignments.
next_token = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_token and next_token.IsType(Type.SEMICOLON):
if (self._last_non_space_token and
self._last_non_space_token.IsType(Type.END_DOC_COMMENT)):
self._documented_identifiers.add(token.string)
def _HandleIdentifier(self, identifier, is_assignment):
"""Process the given identifier.
Currently checks if it references 'this' and annotates the function
accordingly.
Args:
identifier: The identifer to process.
is_assignment: Whether the identifer is being written to.
"""
if identifier == 'this' or identifier.startswith('this.'):
function = self.GetFunction()
if function:
function.has_this = True
def HandleAfterToken(self, token):
"""Handle updating state after a token has been checked.
This function should be used for destructive state changes such as
deleting a tracked object.
Args:
token: The token to handle.
"""
type = token.type
if type == Type.SEMICOLON or type == Type.END_PAREN or (
type == Type.END_BRACKET and
self._last_non_space_token.type not in (
Type.SINGLE_QUOTE_STRING_END, Type.DOUBLE_QUOTE_STRING_END)):
# We end on any numeric array index, but keep going for string based
# array indices so that we pick up manually exported identifiers.
self._doc_comment = None
self._last_comment = None
elif type == Type.END_BLOCK:
self._doc_comment = None
self._last_comment = None
if self.InFunction() and self.IsFunctionClose():
# TODO(robbyw): Detect the function's name for better errors.
function = self._function_stack.pop()
function.end_token = token
# Pop all variables till delimiter ('') those were defined in the
# function being closed so make them out of scope.
while self._variables_in_scope and self._variables_in_scope[-1]:
self._variables_in_scope.pop()
# Pop delimiter
if self._variables_in_scope:
self._variables_in_scope.pop()
elif type == Type.END_PARAMETERS and self._doc_comment:
self._doc_comment = None
self._last_comment = None
if not token.IsAnyType(Type.WHITESPACE, Type.BLANK_LINE):
self._last_non_space_token = token
self._last_line = token.line
| bsd-3-clause |
seoulcoin/seoulcoin | qa/rpc-tests/txn_doublespend.py | 152 | 4968 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with malleable transactions
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from decimal import Decimal
from util import *
import os
import shutil
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].move("", "foo", 1220)
self.nodes[0].move("", "bar", 30)
assert_equal(self.nodes[0].getbalance(""), 0)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1210 BTC to node1_address,
# but don't broadcast:
(total_in, inputs) = gather_inputs(self.nodes[0], 1210)
change_address = self.nodes[0].getnewaddress("foo")
outputs = {}
outputs[change_address] = 40
outputs[node1_address] = 1210
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two transaction from node[0] to node[1]; the
# second must spend change from the first because the first
# spends all mature inputs:
txid1 = self.nodes[0].sendfrom("foo", node1_address, 1210, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].setgenerate(True, 1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 1210, minus 20, and minus transaction fees:
expected = starting_balance
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo"), 1220+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar"), 30+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend to miner:
mutated_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].setgenerate(True, 1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].setgenerate(True, 1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -1)
assert_equal(tx2["confirmations"], -1)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1210 for the double-spend:
expected = starting_balance + 100 - 1210
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# foo account should be debited, but bar account should not:
assert_equal(self.nodes[0].getbalance("foo"), 1220-1210)
assert_equal(self.nodes[0].getbalance("bar"), 30)
# Node1's "from" account balance should be just the mutated send:
assert_equal(self.nodes[1].getbalance("from0"), 1210)
if __name__ == '__main__':
TxnMallTest().main()
| mit |
PeterPetrik/QGIS | python/plugins/processing/algs/gdal/gdalinfo.py | 19 | 5661 | # -*- coding: utf-8 -*-
"""
***************************************************************************
gdalinfo.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterString,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBoolean,
QgsProcessingParameterFileDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class gdalinfo(GdalAlgorithm):
INPUT = 'INPUT'
MIN_MAX = 'MIN_MAX'
STATS = 'STATS'
NO_GCP = 'NOGCP'
NO_METADATA = 'NO_METADATA'
EXTRA = 'EXTRA'
OUTPUT = 'OUTPUT'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBoolean(self.MIN_MAX,
self.tr('Force computation of the actual min/max values for each band'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.STATS,
self.tr('Read and display image statistics (force computation if necessary)'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_GCP,
self.tr('Suppress GCP info'),
defaultValue=False))
self.addParameter(QgsProcessingParameterBoolean(self.NO_METADATA,
self.tr('Suppress metadata info'),
defaultValue=False))
extra_param = QgsProcessingParameterString(self.EXTRA,
self.tr('Additional command-line parameters'),
defaultValue=None,
optional=True)
extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
self.addParameter(extra_param)
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT,
self.tr('Layer information'),
self.tr('HTML files (*.html)')))
def name(self):
return 'gdalinfo'
def displayName(self):
return self.tr('Raster information')
def group(self):
return self.tr('Raster miscellaneous')
def groupId(self):
return 'rastermiscellaneous'
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-info.png'))
def commandName(self):
return 'gdalinfo'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
arguments = []
if self.parameterAsBoolean(parameters, self.MIN_MAX, context):
arguments.append('-mm')
if self.parameterAsBoolean(parameters, self.STATS, context):
arguments.append('-stats')
if self.parameterAsBoolean(parameters, self.NO_GCP, context):
arguments.append('-nogcp')
if self.parameterAsBoolean(parameters, self.NO_METADATA, context):
arguments.append('-nomd')
if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''):
extra = self.parameterAsString(parameters, self.EXTRA, context)
arguments.append(extra)
raster = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if raster is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
arguments.append(raster.source())
return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
def processAlgorithm(self, parameters, context, feedback):
console_output = GdalUtils.runGdal(self.getConsoleCommands(parameters, context, feedback), feedback)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
with open(output, 'w') as f:
f.write('<pre>')
for s in console_output[1:]:
f.write(str(s))
f.write('</pre>')
return {self.OUTPUT: output}
| gpl-2.0 |
Royal-Society-of-New-Zealand/NZ-ORCID-Hub | orcid_api_v3/models/organization_v20.py | 1 | 5232 | # coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.disambiguated_organization_v20 import DisambiguatedOrganizationV20 # noqa: F401,E501
from orcid_api_v3.models.organization_address_v20 import OrganizationAddressV20 # noqa: F401,E501
class OrganizationV20(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'address': 'OrganizationAddressV20',
'disambiguated_organization': 'DisambiguatedOrganizationV20'
}
attribute_map = {
'name': 'name',
'address': 'address',
'disambiguated_organization': 'disambiguated-organization'
}
def __init__(self, name=None, address=None, disambiguated_organization=None): # noqa: E501
"""OrganizationV20 - a model defined in Swagger""" # noqa: E501
self._name = None
self._address = None
self._disambiguated_organization = None
self.discriminator = None
self.name = name
self.address = address
if disambiguated_organization is not None:
self.disambiguated_organization = disambiguated_organization
@property
def name(self):
"""Gets the name of this OrganizationV20. # noqa: E501
:return: The name of this OrganizationV20. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OrganizationV20.
:param name: The name of this OrganizationV20. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def address(self):
"""Gets the address of this OrganizationV20. # noqa: E501
:return: The address of this OrganizationV20. # noqa: E501
:rtype: OrganizationAddressV20
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this OrganizationV20.
:param address: The address of this OrganizationV20. # noqa: E501
:type: OrganizationAddressV20
"""
if address is None:
raise ValueError("Invalid value for `address`, must not be `None`") # noqa: E501
self._address = address
@property
def disambiguated_organization(self):
"""Gets the disambiguated_organization of this OrganizationV20. # noqa: E501
:return: The disambiguated_organization of this OrganizationV20. # noqa: E501
:rtype: DisambiguatedOrganizationV20
"""
return self._disambiguated_organization
@disambiguated_organization.setter
def disambiguated_organization(self, disambiguated_organization):
"""Sets the disambiguated_organization of this OrganizationV20.
:param disambiguated_organization: The disambiguated_organization of this OrganizationV20. # noqa: E501
:type: DisambiguatedOrganizationV20
"""
self._disambiguated_organization = disambiguated_organization
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(OrganizationV20, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationV20):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| mit |
RueLaLaTech/savory-pie | savory_pie/django/validators.py | 2 | 27662 | import collections
import datetime
import re
import savory_pie
class ValidationError(Exception):
def __init__(self, resource, errors):
self.resource = resource
self.errors = errors
def validate(ctx, key, resource, source_dict):
"""
Descend through a resource, including its fields and any related resources
or submodels, looking for validation errors in any resources or models whose
validators flag issues with content therein.
Parameters:
``ctx``
``key``
the current path fragment of the dictionary key which will be used to store
any errors found in the returned dict -- in the initial call to validate,
this should probably be the name of the class being validated e.g. "user"
``resource``
the ModelResource instance whose data is to be validated
``source_dict``
Returns:
a dict mapping dotted keys (representing resources or fields) to
validation errors
"""
key = ctx.formatter.convert_to_public_property(key)
error_dict = {}
if source_dict and resource:
if hasattr(resource, 'fields') and \
isinstance(resource.fields, collections.Iterable):
for field in resource.fields:
if not hasattr(field, 'name'):
continue
fieldname = ctx.formatter.convert_to_public_property(field.name)
if fieldname in source_dict:
value = source_dict[fieldname]
if isinstance(value, list):
# RelatedManagerField validator might want to examine parent dict
class SourceDictList(list):
def __init__(self, source_dicts, parent_dict):
self[:] = source_dicts
self.parent_dict = parent_dict
value = SourceDictList(value, source_dict)
# ignore validation if value hasn't changed
if hasattr(resource, 'model'):
try:
orig_value = getattr(resource.model, field.name, None)
if orig_value == ctx.formatter.to_python_value(type(orig_value), value):
continue
except Exception:
pass
# attempt to validate field
if hasattr(field, 'validator') and field.validator:
if isinstance(field.validator, collections.Iterable):
for validator in field.validator:
validator.find_errors(error_dict, ctx, key, resource, field, value)
else:
field.validator.find_errors(error_dict, ctx, key, resource, field, value)
if hasattr(field, 'validate_resource'):
error_dict.update(field.validate_resource(ctx, key, resource, value))
if hasattr(resource, 'validators') and \
isinstance(resource.validators, collections.Iterable):
for validator in resource.validators:
validator.find_errors(error_dict, ctx, key, resource, source_dict)
return error_dict
class BaseValidator(object):
"""
Validators are used to determine that the values of model fields are acceptable
according to programmatically specifiable criteria::
class ValidationTestResource(resources.ModelResource):
parent_resource_path = 'users'
model_class = User
validators = [
DatetimeFieldSequenceValidator('start_date', 'end_date')
]
fields = [
fields.AttributeField(attribute='name', type=str,
validator=StringFieldExactMatchValidator('Bob')),
fields.AttributeField(attribute='age', type=int,
validator=(IntFieldMinValidator(21, 'too young to drink'),
IntFieldPrimeValidator(100))),
# A field can take either a single validator,
# or a list or tuple of multiple validators.
fields.AttributeField(attribute='before', type=datetime),
fields.AttributeField(attribute='after', type=datetime),
fields.AttributeField(attribute='systolic_bp', type=int,
validator=IntFieldRangeValidator(100, 120,
'blood pressure out of range')),
]
When you apply *BaseValidator.validate* to an instance of ValidationTestResource,
it will check to see if all the criteria are satisfied, and will return a dict giving
all violations as key-value pairs, where the keys are dotted Python names for the
model or field in question, and the values are lists of error messages. So if several
criteria fail to be met, you might see something like this::
{
'savory_pie.tests.django.test_validators.ValidationTestResource':
['Datetimes are not in expected sequence.'],
'savory_pie.tests.django.test_validators.ValidationTestResource.age':
['too young to drink',
'This should be a prime number.'],
'savory_pie.tests.django.test_validators.ValidationTestResource.name':
['This should exactly match the expected value.'],
'savory_pie.tests.django.test_validators.ValidationTestResource.systolic_bp':
['blood pressure out of range']
}
You can write your own validators, like *IntFieldPrimeValidator* above::
class IntFieldPrimeValidator(FieldValidator):
error_message = 'This should be a prime number.'
def __init__(self, maxprime):
self._primes = _primes = [2, 3, 5, 7]
def test_prime(x, _primes=_primes):
for p in _primes:
if p * p > x:
return True
if (x %% p) == 0:
return False
for x in range(11, maxprime + 1, 2):
if test_prime(x):
_primes.append(x)
def check_value(self, value):
return value in self._primes
As a general rule, a validator has a *find_errors* method which makes calls to the
*check_value* method, and if errors are found, they are stored in a dict, keyed by
the dotted name of the non-compliant model or field.
"""
error_message = 'Validation failure message goes here'
"""
The error message should give a clear description of the nature of the validation
failure, if one occurs.
"""
json_name = 'What the front end calls this validator'
"""
This should be a name understood by the front-end developers as referring to this
particular validator so that they can wire up JavaScript to validate HTML forms in
the browser.
"""
null = False
"""
Ignore null values for any fields which should be validated.
"""
def __init__(self, *args, **kwargs):
self.error_message = kwargs.pop('error_message', self.error_message)
self.null = kwargs.pop('null', False)
self.populate_schema(**kwargs)
def _add_error(self, error_dict, key, error):
if key in error_dict:
error_dict[key].append(error)
else:
error_dict[key] = [error]
def populate_schema(self, **kwargs):
"""
Every validator *MUST* call this method in its constructor. The *kwargs*
should be name-value pairs for any parameters required for validation. If the
constructor sets error_message, that should happen *before* the call to this
method.
"""
self._schema = schema = {
'name': self.json_name,
'text': self.error_message
}
for key, value in kwargs.items():
schema[key] = value
def to_schema(self):
"""
Subclasses are expected to overload this method with a string used in
the front end for HTML form validation, for example in the context of
something like `jQuery-Validation-Engine`_.
.. _`jQuery-Validation-Engine`: https://github.com/posabsolute/jQuery-Validation-Engine
Returns:
a string representing the constraints on this resource or field, in a form
that's useful on the front end, e.g. JavaScript
"""
return self._schema
def check_value(self, value):
"""
Extend this method to test whatever needs testing on a model or field. Return
True if the value is OK, False if it's unacceptable.
"""
return False
# Resource Validators
class ResourceValidator(BaseValidator):
"""
Base class for validators that apply to ModelResource instances. These will usually
look at relationships between field values, as the fields themselves will be
individually validated.
"""
def find_errors(self, error_dict, ctx, key, resource, source_dict):
"""
Search for validation errors in the database model underlying a resource.
"""
if not self.check_value(source_dict):
self._add_error(error_dict, key, self.error_message)
class DatetimeFieldSequenceValidator(ResourceValidator):
"""
Test an AttributeField of type 'int' to make sure it falls within a given
range (inclusive at both ends).
Parameters:
``*date_fields``
a list of names of AttributeFields of type 'datetime' which are
required to be in chronological sequence
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'dates_in_sequence'
error_message = 'Datetimes are not in expected sequence.'
def __init__(self, *args, **kwargs):
kwargs['fields'] = ','.join(args)
super(DatetimeFieldSequenceValidator, self).__init__(**kwargs)
self._date_fields = args
def find_errors(self, error_dict, ctx, key, resource, source_dict):
"""
Verify that specified datetime fields exist, and are in chronological sequence
as expected.
"""
values = []
for attr in self._date_fields:
public_attr = ctx.formatter.convert_to_public_property(attr)
if self.null and source_dict.get(public_attr) is None:
return
elif public_attr not in source_dict:
self._add_error(error_dict, key,
'Cannot find datetime field "' + attr + '"')
return
values.append(ctx.formatter.to_python_value(datetime.datetime,
source_dict[public_attr]))
for before, after in zip(values[:-1], values[1:]):
if before > after:
self._add_error(error_dict, key, self.error_message)
return
class RequiredFieldValidator(ResourceValidator):
"""
Ensure that a particular field of a resource exists.
Parameters:
``field``
the name of the required field
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'required_field'
error_message = 'This field is required'
def __init__(self, field, *args, **kwargs):
self.field = field
super(RequiredFieldValidator, self).__init__(**kwargs)
def find_errors(self, error_dict, ctx, key, resource, source_dict):
json_key = ctx.formatter.convert_to_public_property(self.field)
if not source_dict.get(json_key):
self._add_error(error_dict, key, self.error_message + ': ' + json_key)
class RequiredTogetherValidator(ResourceValidator):
"""
Test a tuple of fields to ensure that if a value for one field in the set is
provided, that all fields in the set have values provided.
Parameters:
``*fields``
a list of names of savory_pie Fields, which as a set is required if a
value for any single field is provided
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'required_together'
error_message = 'This set of fields is required if any one is provided.'
def __init__(self, *args, **kwargs):
kwargs['fields'] = ','.join(args)
super(RequiredTogetherValidator, self).__init__(**kwargs)
self._fields = args
def find_errors(self, error_dict, ctx, key, resource, source_dict):
values = {}
for attr in self._fields:
public_attr = ctx.formatter.convert_to_public_property(attr)
if not self.null and public_attr not in source_dict:
self._add_error(error_dict, key, 'Cannot find field "' + attr + '"')
return
values[attr] = source_dict.get(public_attr)
missing = False
required = False
for value in values.values():
if value is None:
missing = True
else:
required = True
if required and missing:
self._add_error(error_dict, key, self.error_message)
class UniqueTogetherValidator(ResourceValidator):
"""
Test a tuple of fields to ensure their proposed values represent a unique set
within the database. This validator is similar to Django ORM's 'unique together'
constraint, but differs in that it accepts only a single level of fields:
https://docs.djangoproject.com/en/dev/ref/models/options/#unique-together
Parameters:
``*fields``
a list of names of savory_pie Fields, which as a set should be unique
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'unique_together'
error_message = 'This set of fields must be unique.'
def __init__(self, *args, **kwargs):
kwargs['fields'] = ','.join(args)
super(UniqueTogetherValidator, self).__init__(**kwargs)
self._fields = args
def find_errors(self, error_dict, ctx, key, resource, source_dict):
filters = []
for attr in self._fields:
public_attr = ctx.formatter.convert_to_public_property(attr)
if self.null and source_dict.get(public_attr) is None:
return
elif public_attr not in source_dict:
self._add_error(error_dict, key, 'Cannot find field "' + attr + '"')
return
for field in resource.fields:
if attr == getattr(field, 'name', None):
try:
if field.__class__ == savory_pie.django.fields.SubModelResourceField:
if 'resourceUri' in source_dict[public_attr]:
pk = source_dict[public_attr]['resourceUri'].split('/')[-1]
filters.append({'{}__pk'.format(attr): pk})
elif 'name' in source_dict[public_attr]:
filters.append({'{}__name'.format(attr): source_dict[public_attr]['name']})
else:
#TODO allow lookup by fields other than id/name?
return
elif issubclass(field.__class__, savory_pie.django.fields.AttributeField):
filters.append({attr: source_dict[public_attr]})
except Exception:
pass
if filters and hasattr(resource, 'model'):
try:
qset = resource.model.__class__.objects.all()
for f in filters:
qset = qset.filter(**f)
if len(qset):
# if validation fails because we're re-saving an existing object, ignore
if len(qset) == 1 and resource.model.pk and qset[0].pk == resource.model.pk:
return
self._add_error(error_dict, key, self.error_message)
except Exception:
pass
class UniquePairedFieldValidator(ResourceValidator):
"""
Test a pair of fields (a, b), such that for a given a, only one b can exist. However,
this _unique_ combination of fields can exist unlimited times.
Parameters:
``*fields``
a pair of savory_pie Fields (a, b), such that for a given a,
only one b can exist
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'unique_paired_field'
error_message = 'First field is already present in another pair.'
def __init__(self, *args, **kwargs):
kwargs['fields'] = ','.join(args)
super(UniquePairedFieldValidator, self).__init__(**kwargs)
self._fields = args
def find_errors(self, error_dict, ctx, key, resource, source_dict):
filters = []
for attr in self._fields:
public_attr = ctx.formatter.convert_to_public_property(attr)
if self.null and source_dict.get(public_attr) is None:
return
elif public_attr not in source_dict:
self._add_error(error_dict, key, 'Cannot find field "' + attr + '"')
return
for field in resource.fields:
if attr == getattr(field, 'name', None):
try:
if field.__class__ == savory_pie.django.fields.SubModelResourceField:
if 'resourceUri' in source_dict[public_attr]:
pk = source_dict[public_attr]['resourceUri'].split('/')[-1]
filters.append({'{}__pk'.format(attr): pk})
elif 'name' in source_dict[public_attr]:
filters.append({'{}__name'.format(attr): source_dict[public_attr]['name']})
else:
#TODO allow lookup by fields other than id/name?
return
elif issubclass(field.__class__, savory_pie.django.fields.AttributeField):
filters.append({attr: source_dict[public_attr]})
except Exception:
pass
if filters and hasattr(resource, 'model'):
try:
qset = resource.model.__class__.objects.filter(**filters[0]).exclude(**filters[1])
if len(qset):
self._add_error(error_dict, key, self.error_message)
except Exception:
pass
# Field Validators
class FieldValidator(BaseValidator):
"""
Base class for all validators of fields: AttributeField, URIResourceField,
SubObjectResourceField, IterableField
"""
def find_errors(self, error_dict, ctx, key, resource, field, value):
"""
Search for validation errors in a field of a database model.
"""
fieldname = ctx.formatter.convert_to_public_property(field.name)
value = ctx.formatter.to_python_value(field._type, value)
if value is None:
if self.null:
return
self._add_error(error_dict, key + '.' + fieldname, '{} is required'.format(fieldname))
if not self.check_value(value):
self._add_error(error_dict, key + '.' + fieldname, self.error_message)
class StringFieldZipcodeValidator(FieldValidator):
"""
Test an AttributeField of type 'str' to make sure it's a valid zipcode.
**TODO**:
Handle international postal codes, some are six digits???
"""
json_name = 'us_zipcode'
error_message = 'This should be a zipcode.'
pattern = re.compile(r'^\d{5}(-\d{4})?$')
def check_value(self, value):
"""
Verify that the value is a five-digit string.
"""
try:
return self.pattern.match(value)
except TypeError:
return False
class StringFieldExactMatchValidator(FieldValidator):
"""
Test an AttributeField of type 'str' to make sure it exactly matches an
expected value.
Parameters:
``expected``
the case-sensitive string value that we expect to see
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'exact_string'
error_message = 'This should exactly match the expected value.'
def __init__(self, expected, **kwargs):
kwargs['expected'] = expected
super(StringFieldExactMatchValidator, self).__init__(**kwargs)
self._expected = expected
def check_value(self, value):
"""
Verify that the value is a string exactly matching the constructor argument.
"""
return value == self._expected
class StringFieldMaxLengthValidator(FieldValidator):
"""
Test an AttributeField of type 'str' to make sure it does not exceed the
expected length.
Parameters:
``expected_length``
the maximum length for an allowable string
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'string_maxlen'
error_message = 'This should not exceed the expected string length.'
def __init__(self, expected_length, **kwargs):
kwargs['expected_length'] = expected_length
super(StringFieldMaxLengthValidator, self).__init__(**kwargs)
self._expected_length = expected_length
def check_value(self, value):
"""
Verify that the value is a string whose length doesn't exceed the maximum.
"""
return ((isinstance(value, str) or isinstance(value, unicode)) and
len(value) <= self._expected_length)
class IntFieldMinValidator(FieldValidator):
"""
Test an AttributeField of type 'int' to make sure it is no smaller than a
specified minimum
Parameters:
``min``
the specified minimum
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'int_min'
error_message = 'This value should be greater than or equal to the minimum.'
def __init__(self, _min, **kwargs):
kwargs['min'] = _min
super(IntFieldMinValidator, self).__init__(**kwargs)
self._min = _min
def check_value(self, intvalue):
"""
Verify integer value is no less than specified minimum.
"""
return type(intvalue) is int and intvalue >= self._min
class IntFieldMaxValidator(FieldValidator):
"""
Test an AttributeField of type 'int' to make sure it is no greater than a
specified maximum.
Parameters:
``max``
the specified maximum
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'int_max'
error_message = 'This value should be less than or equal to the maximum.'
def __init__(self, _max, **kwargs):
kwargs['max'] = _max
super(IntFieldMaxValidator, self).__init__(**kwargs)
self._max = _max
def check_value(self, intvalue):
"""
Verify integer value is no greater than specified maximum.
"""
return type(intvalue) is int and intvalue <= self._max
class IntFieldRangeValidator(FieldValidator):
"""
Test an AttributeField of type 'int' to make sure it falls within a given
range (inclusive at both ends).
Parameters:
``min``
the bottom of the range
``max``
the top of the range
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'int_range'
error_message = 'This value should be within the allowed integer range.'
def __init__(self, _min, _max, **kwargs):
kwargs.update({'min': _min, 'max': _max})
super(IntFieldRangeValidator, self).__init__(**kwargs)
self._min, self._max = _min, _max
def check_value(self, intvalue):
"""
Verify that numerical value is within specified range.
"""
return type(intvalue) is int and intvalue >= self._min and intvalue <= self._max
class DatetimeFieldMinValidator(FieldValidator):
"""
Test an AttributeField of type datetime to make sure it is no earlier than a
specified minimum.
Parameters:
``min``
the specified minimum
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'datetime_min'
error_message = 'This value should be no earlier than the minimum datetime.'
def __init__(self, _min, **kwargs):
kwargs['min'] = _min.isoformat()
super(DatetimeFieldMinValidator, self).__init__(**kwargs)
self._min = _min
def check_value(self, datetimevalue):
"""
Verify integer value is no less than specified minimum.
"""
return type(datetimevalue) is datetime.datetime and datetimevalue >= self._min
class DatetimeFieldMaxValidator(FieldValidator):
"""
Test an AttributeField of type datetime to make sure it is no later than a
specified maximum.
Parameters:
``max``
the specified maximum
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'datetime_max'
error_message = 'This value should be no later than the maximum datetime.'
def __init__(self, _max, **kwargs):
kwargs['max'] = _max.isoformat()
super(DatetimeFieldMaxValidator, self).__init__(**kwargs)
self._max = _max
def check_value(self, datetimevalue):
"""
Verify integer value is no greater than specified maximum.
"""
return type(datetimevalue) is datetime.datetime and datetimevalue <= self._max
class DateFieldMinValidator(FieldValidator):
"""
Test an AttributeField of type date to make sure it is no earlier than a
specified minimum.
Parameters:
``min``
the specified minimum
``error_message``
optional: the message to appear in the error dictionary if this
condition is not met
"""
json_name = 'date_min'
error_message = 'This value should be no earlier than the minimum date.'
def __init__(self, _min, **kwargs):
kwargs['min'] = _min.isoformat()
super(DateFieldMinValidator, self).__init__(**kwargs)
self._min = _min
def check_value(self, datevalue):
"""
Verify integer value is no less than specified minimum.
"""
return type(datevalue) is datetime.date and datevalue >= self._min
| mit |
Beyond-Imagination/BlubBlub | RaspberryPI/django-env/lib/python3.4/site-packages/django/db/migrations/topological_sort.py | 69 | 1138 | def topological_sort_as_sets(dependency_graph):
"""Variation of Kahn's algorithm (1962) that returns sets.
Takes a dependency graph as a dictionary of node => dependencies.
Yields sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""
todo = dependency_graph.copy()
while todo:
current = {node for node, deps in todo.items() if len(deps) == 0}
if not current:
raise ValueError('Cyclic dependency in graph: {}'.format(
', '.join(repr(x) for x in todo.items())))
yield current
# remove current from todo's nodes & dependencies
todo = {node: (dependencies - current) for node, dependencies in
todo.items() if node not in current}
def stable_topological_sort(l, dependency_graph):
result = []
for layer in topological_sort_as_sets(dependency_graph):
for node in l:
if node in layer:
result.append(node)
return result
| gpl-3.0 |
sql-machine-learning/sqlflow | python/runtime/tensorflow/keras_example_reg.py | 1 | 4694 | # Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tensorflow as tf
from runtime.tensorflow.estimator_example import datasource
from runtime.tensorflow.predict import pred
from runtime.tensorflow.train import train
# NOTE: this file is used by train_predict_test.py, do **NOT** delete!
select = "select * from housing.train"
validation_select = "select * from housing.test"
feature_column_names = ["f%d" % i for i in range(1, 14)]
feature_column_names_map = {
"feature_columns": ["f%d" % i for i in range(1, 14)]
}
feature_columns = {
"feature_columns": [
tf.feature_column.numeric_column("f%d" % i, shape=[1])
for i in range(1, 14)
]
}
feature_metas = {
"f1": {
"feature_name": "f1",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f2": {
"feature_name": "f2",
"dtype": "int64",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f3": {
"feature_name": "f3",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f4": {
"feature_name": "f4",
"dtype": "int64",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f5": {
"feature_name": "f5",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f6": {
"feature_name": "f6",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f7": {
"feature_name": "f7",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f8": {
"feature_name": "f8",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f9": {
"feature_name": "f9",
"dtype": "int64",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f10": {
"feature_name": "f10",
"dtype": "int64",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f11": {
"feature_name": "f11",
"dtype": "int64",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f12": {
"feature_name": "f12",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
},
"f13": {
"feature_name": "f13",
"dtype": "float32",
"delimiter": "",
"shape": [1],
"is_sparse": "false" == "true"
}
}
label_meta = {
"feature_name": "target",
"dtype": "float32",
"delimiter": "",
"shape": [],
"is_sparse": "false" == "true"
}
if __name__ == "__main__":
train(datasource=datasource,
estimator_string="sqlflow_models.DNNRegressor",
select=select,
validation_select=validation_select,
feature_columns=feature_columns,
feature_column_names=feature_column_names,
feature_metas=feature_metas,
label_meta=label_meta,
model_params={"hidden_units": [10, 20]},
validation_metrics=["CategoricalAccuracy"],
save="myregmodel_keras",
batch_size=1,
epoch=3,
verbose=0)
pred(datasource=datasource,
estimator_string="sqlflow_models.DNNRegressor",
select=validation_select,
result_table="housing.predict",
feature_columns=feature_columns,
feature_column_names=feature_column_names,
feature_column_names_map=feature_column_names_map,
train_label_name=label_meta["feature_name"],
result_col_name=label_meta["feature_name"],
feature_metas=feature_metas,
model_params={"hidden_units": [10, 20]},
save="myregmodel_keras",
batch_size=1)
shutil.rmtree("myregmodel_keras")
| apache-2.0 |
ahmed-mahran/hue | desktop/core/ext-py/Babel-0.9.6/babel/tests/__init__.py | 61 | 1033 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
import unittest
def suite():
from babel.tests import core, dates, localedata, numbers, support, util
from babel.messages import tests as messages
suite = unittest.TestSuite()
suite.addTest(core.suite())
suite.addTest(dates.suite())
suite.addTest(localedata.suite())
suite.addTest(messages.suite())
suite.addTest(numbers.suite())
suite.addTest(support.suite())
suite.addTest(util.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 |
Addepar/buck | third-party/py/unittest2/unittest2/compatibility.py | 176 | 2100 | import os
import sys
try:
from functools import wraps
except ImportError:
# only needed for Python 2.4
def wraps(_):
def _wraps(func):
return func
return _wraps
__unittest = True
def _relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# default to posixpath definition
def _relpath_posix(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if os.path is sys.modules.get('ntpath'):
relpath = _relpath_nt
else:
relpath = _relpath_posix
| apache-2.0 |
AsiganTheSunk/python-torcurl | torcurl/listeners/ExitRelayListener.py | 1 | 1750 | #!/usr/bin/env python
import functools
import os
from stem import StreamStatus
from stem.control import EventType, Controller
from time import sleep
from ExitRelay import ExitRelay
class ExitRelayListener():
def __init__(self):
print("TorPyCurl Status: Tracking Tor Exit...")
self.controller = Controller.from_port()
self.controller.authenticate('ultramegachachi')
stream_listener = functools.partial(self.stream_event, self.controller)
self.controller.add_event_listener(stream_listener, EventType.STREAM)
sleep(10)
def stream_event(self, controller, event):
if event.status == StreamStatus.SUCCEEDED and event.circ_id:
circ = controller.get_circuit(event.circ_id)
exit_fingerprint = circ.path[-1][0]
exit_relay = controller.get_network_status(exit_fingerprint)
#print("Exit relay for our connection to %s" % (event.target))
#print(" address: %s:%i" % (exit_relay.address, exit_relay.or_port))
#print(" fingerprint: %s" % exit_relay.fingerprint)
#print(" nickname: %s" % exit_relay.nickname)
#print(" locale: %s" % controller.get_info("ip-to-country/%s" % exit_relay.address, 'unknown'))
result = ExitRelay(exit_relay=str(event.target),
exit_address=(str(exit_relay.address) + ':' + str(exit_relay.or_port)),
exit_fingerprint=str(exit_relay.fingerprint), exit_nickname=str(exit_relay.nickname),
exit_locale=str(controller.get_info('ip-to-country/%s' % exit_relay.address, 'unknown')))
result.save()
#print(result)
return
return | mit |
happy5214/pywikibot-core | pywikibot/data/mysql.py | 5 | 2289 | # -*- coding: utf-8 -*-
"""Miscellaneous helper functions for mysql queries."""
#
# (C) Pywikibot team, 2016-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
# Requires oursql <https://pythonhosted.org/oursql/> or
# MySQLdb <https://sourceforge.net/projects/mysql-python/>
try:
import oursql as mysqldb
except ImportError:
import MySQLdb as mysqldb
import pywikibot
from pywikibot import config2 as config
def mysql_query(query, params=(), dbname=None, encoding='utf-8', verbose=None):
"""
Yield rows from a MySQL query.
An example query that yields all ns0 pages might look like::
SELECT
page_namespace,
page_title,
FROM page
WHERE page_namespace = 0;
@param query: MySQL query to execute
@type query: str
@param params: input parametes for the query, if needed
@type params: tuple
@param dbname: db name
@type dbname: str
@param encoding: encoding used by the database
@type encoding: str
@param verbose: if True, print query to be executed;
if None, config.verbose_output will be used.
@type verbose: None or bool
@return: generator which yield tuples
"""
if verbose is None:
verbose = config.verbose_output
if config.db_connect_file is None:
conn = mysqldb.connect(config.db_hostname,
db=config.db_name_format.format(dbname),
user=config.db_username,
passwd=config.db_password,
port=config.db_port)
else:
conn = mysqldb.connect(config.db_hostname,
db=config.db_name_format.format(dbname),
read_default_file=config.db_connect_file,
port=config.db_port)
cursor = conn.cursor()
if verbose:
pywikibot.output('Executing query:\n%s' % query)
query = query.encode(encoding)
params = tuple(p.encode(encoding) for p in params)
if params:
cursor.execute(query, params)
else:
cursor.execute(query)
for row in cursor:
yield row
cursor.close()
conn.close()
| mit |
dougluce/ansible-modules-core | cloud/amazon/rds_param_group.py | 19 | 11052 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds_param_group
version_added: "1.5"
short_description: manage RDS parameter groups
description:
- Creates, modifies, and deletes RDS parameter groups. This module has a dependency on python-boto >= 2.5.
options:
state:
description:
- Specifies whether the group should be present or absent.
required: true
default: present
aliases: []
choices: [ 'present' , 'absent' ]
name:
description:
- Database parameter group identifier.
required: true
default: null
aliases: []
description:
description:
- Database parameter group description. Only set when a new group is added.
required: false
default: null
aliases: []
engine:
description:
- The type of database for this group. Required for state=present.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
required: false
default: null
aliases: []
params:
description:
- Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
default: null
aliases: [ 'aws_region', 'ec2_region' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
requirements: [ "boto" ]
author: Scott Anderson
'''
EXAMPLES = '''
# Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024
- rds_param_group:
state: present
name: norwegian_blue
description: 'My Fancy Ex Parrot Group'
engine: 'mysql5.6'
params:
auto_increment_increment: "42K"
# Remove a parameter group
- rds_param_group:
state: absent
name: norwegian_blue
'''
import sys
import time
VALID_ENGINES = [
'mysql5.1',
'mysql5.5',
'mysql5.6',
'oracle-ee-11.2',
'oracle-se-11.2',
'oracle-se1-11.2',
'postgres9.3',
'sqlserver-ee-10.5',
'sqlserver-ee-11.0',
'sqlserver-ex-10.5',
'sqlserver-ex-11.0',
'sqlserver-se-10.5',
'sqlserver-se-11.0',
'sqlserver-web-10.5',
'sqlserver-web-11.0',
]
try:
import boto.rds
from boto.exception import BotoServerError
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
class NotModifiableError(StandardError):
def __init__(self, error_message, *args):
super(NotModifiableError, self).__init__(error_message, *args)
self.error_message = error_message
def __repr__(self):
return 'NotModifiableError: %s' % self.error_message
def __str__(self):
return 'NotModifiableError: %s' % self.error_message
INT_MODIFIERS = {
'K': 1024,
'M': pow(1024, 2),
'G': pow(1024, 3),
'T': pow(1024, 4),
}
TRUE_VALUES = ('on', 'true', 'yes', '1',)
def set_parameter(param, value, immediate):
"""
Allows setting parameters with 10M = 10* 1024 * 1024 and so on.
"""
converted_value = value
if param.type == 'string':
converted_value = str(value)
elif param.type == 'integer':
if isinstance(value, basestring):
try:
for modifier in INT_MODIFIERS.keys():
if value.endswith(modifier):
converted_value = int(value[:-1]) * INT_MODIFIERS[modifier]
converted_value = int(converted_value)
except ValueError:
# may be based on a variable (ie. {foo*3/4}) so
# just pass it on through to boto
converted_value = str(value)
elif type(value) == bool:
converted_value = 1 if value else 0
else:
converted_value = int(value)
elif param.type == 'boolean':
if isinstance(value, basestring):
converted_value = value in TRUE_VALUES
else:
converted_value = bool(value)
param.value = converted_value
param.apply(immediate)
def modify_group(group, params, immediate=False):
""" Set all of the params in a group to the provided new params. Raises NotModifiableError if any of the
params to be changed are read only.
"""
changed = {}
new_params = dict(params)
for key in new_params.keys():
if group.has_key(key):
param = group[key]
new_value = new_params[key]
try:
old_value = param.value
except ValueError:
# some versions of boto have problems with retrieving
# integer values from params that may have their value
# based on a variable (ie. {foo*3/4}), so grab it in a
# way that bypasses the property functions
old_value = param._value
if old_value != new_value:
if not param.is_modifiable:
raise NotModifiableError('Parameter %s is not modifiable.' % key)
changed[key] = {'old': param.value, 'new': new_value}
set_parameter(param, new_value, immediate)
del new_params[key]
return changed, new_params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state = dict(required=True, choices=['present', 'absent']),
name = dict(required=True),
engine = dict(required=False, choices=VALID_ENGINES),
description = dict(required=False),
params = dict(required=False, aliases=['parameters'], type='dict'),
immediate = dict(required=False, type='bool'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
state = module.params.get('state')
group_name = module.params.get('name').lower()
group_engine = module.params.get('engine')
group_description = module.params.get('description')
group_params = module.params.get('params') or {}
immediate = module.params.get('immediate') or False
if state == 'present':
for required in ['name', 'description', 'engine', 'params']:
if not module.params.get(required):
module.fail_json(msg = str("Parameter %s required for state='present'" % required))
else:
for not_allowed in ['description', 'engine', 'params']:
if module.params.get(not_allowed):
module.fail_json(msg = str("Parameter %s not allowed for state='absent'" % not_allowed))
# Retrieve any AWS settings from the environment.
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION."))
try:
conn = boto.rds.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key)
except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message)
group_was_added = False
try:
changed = False
try:
all_groups = conn.get_all_dbparameter_groups(group_name, max_records=100)
exists = len(all_groups) > 0
except BotoServerError, e:
if e.error_code != 'DBParameterGroupNotFound':
module.fail_json(msg = e.error_message)
exists = False
if state == 'absent':
if exists:
conn.delete_parameter_group(group_name)
changed = True
else:
changed = {}
if not exists:
new_group = conn.create_parameter_group(group_name, engine=group_engine, description=group_description)
group_was_added = True
# If a "Marker" is present, this group has more attributes remaining to check. Get the next batch, but only
# if there are parameters left to set.
marker = None
while len(group_params):
next_group = conn.get_all_dbparameters(group_name, marker=marker)
changed_params, group_params = modify_group(next_group, group_params, immediate)
changed.update(changed_params)
if hasattr(next_group, 'Marker'):
marker = next_group.Marker
else:
break
except BotoServerError, e:
module.fail_json(msg = e.error_message)
except NotModifiableError, e:
msg = e.error_message
if group_was_added:
msg = '%s The group "%s" was added first.' % (msg, group_name)
module.fail_json(msg=msg)
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
Adel-Magebinary/odoo | addons/share/wizard/__init__.py | 448 | 1067 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import share_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rgommers/scipy | scipy/_lib/tests/test_warnings.py | 22 | 3761 | """
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all. This file was adapted
from NumPy.
"""
import os
from pathlib import Path
import ast
import tokenize
import scipy
import pytest
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
self.bad_filters = []
self.bad_stacklevels = []
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
self.bad_filters.append(
"{}:{}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if self.__filename == "_lib/tests/test_warnings.py":
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" not in args:
self.bad_stacklevels.append(
"{}:{}".format(self.__filename, node.lineno))
@pytest.fixture(scope="session")
def warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
bad_filters = []
bad_stacklevels = []
for path in base.rglob("*.py"):
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g., LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read(), filename=str(path))
finder = FindFuncs(path.relative_to(base))
finder.visit(tree)
bad_filters.extend(finder.bad_filters)
bad_stacklevels.extend(finder.bad_stacklevels)
return bad_filters, bad_stacklevels
@pytest.mark.slow
def test_warning_calls_filters(warning_calls):
bad_filters, bad_stacklevels = warning_calls
# There is still one simplefilter occurrence in optimize.py that could be removed.
bad_filters = [item for item in bad_filters
if 'optimize.py' not in item]
# The filterwarnings calls in sparse are needed.
bad_filters = [item for item in bad_filters
if os.path.join('sparse', '__init__.py') not in item
and os.path.join('sparse', 'sputils.py') not in item]
if bad_filters:
raise AssertionError(
"warning ignore filter should not be used, instead, use\n"
"numpy.testing.suppress_warnings (in tests only);\n"
"found in:\n {}".format(
"\n ".join(bad_filters)))
@pytest.mark.slow
@pytest.mark.xfail(reason="stacklevels currently missing")
def test_warning_calls_stacklevels(warning_calls):
bad_filters, bad_stacklevels = warning_calls
msg = ""
if bad_filters:
msg += ("warning ignore filter should not be used, instead, use\n"
"numpy.testing.suppress_warnings (in tests only);\n"
"found in:\n {}".format("\n ".join(bad_filters)))
msg += "\n\n"
if bad_stacklevels:
msg += "warnings should have an appropriate stacklevel:\n {}".format(
"\n ".join(bad_stacklevels))
if msg:
raise AssertionError(msg)
| bsd-3-clause |
cakeboss893/volatility | volatility/plugins/getsids.py | 44 | 6517 | # Volatility
# Copyright (C) 2008-2013 Volatility Foundation
#
# Additional Authors:
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#
# Based heavily upon the getsids plugin by Moyix
# http://kurtz.cs.wesleyan.edu/%7Ebdolangavitt/memory/getsids.py
"""
@author: AAron Walters and Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: awalters@4tphi.net,bdolangavitt@wesleyan.edu
@organization: Volatility Foundation
"""
import volatility.plugins.taskmods as taskmods
import re
def find_sid_re(sid_string, sid_re_list):
for reg, name in sid_re_list:
if reg.search(sid_string):
return name
well_known_sid_re = [
(re.compile(r'S-1-5-[0-9-]+-500'), 'Administrator'),
(re.compile(r'S-1-5-[0-9-]+-501'), 'Guest'),
(re.compile(r'S-1-5-[0-9-]+-502'), 'KRBTGT'),
(re.compile(r'S-1-5-[0-9-]+-512'), 'Domain Admins'),
(re.compile(r'S-1-5-[0-9-]+-513'), 'Domain Users'),
(re.compile(r'S-1-5-[0-9-]+-514'), 'Domain Guests'),
(re.compile(r'S-1-5-[0-9-]+-515'), 'Domain Computers'),
(re.compile(r'S-1-5-[0-9-]+-516'), 'Domain Controllers'),
(re.compile(r'S-1-5-[0-9-]+-517'), 'Cert Publishers'),
(re.compile(r'S-1-5-[0-9-]+-520'), 'Group Policy Creator Owners'),
(re.compile(r'S-1-5-[0-9-]+-533'), 'RAS and IAS Servers'),
(re.compile(r'S-1-5-5-[0-9]+-[0-9]+'), 'Logon Session'),
(re.compile(r'S-1-5-21-[0-9-]+-518'), 'Schema Admins'),
(re.compile(r'S-1-5-21-[0-9-]+-519'), 'Enterprise Admins'),
(re.compile(r'S-1-5-21-[0-9-]+-553'), 'RAS Servers'),
]
well_known_sids = {
'S-1-0': 'Null Authority',
'S-1-0-0': 'Nobody',
'S-1-1': 'World Authority',
'S-1-1-0': 'Everyone',
'S-1-2': 'Local Authority',
'S-1-2-0': 'Local (Users with the ability to log in locally)',
'S-1-2-1': 'Console Logon (Users who are logged onto the physical console)',
'S-1-3': 'Creator Authority',
'S-1-3-0': 'Creator Owner',
'S-1-3-1': 'Creator Group',
'S-1-3-2': 'Creator Owner Server',
'S-1-3-3': 'Creator Group Server',
'S-1-3-4': 'Owner Rights',
'S-1-4': 'Non-unique Authority',
'S-1-5': 'NT Authority',
'S-1-5-1': 'Dialup',
'S-1-5-2': 'Network',
'S-1-5-3': 'Batch',
'S-1-5-4': 'Interactive',
'S-1-5-6': 'Service',
'S-1-5-7': 'Anonymous',
'S-1-5-8': 'Proxy',
'S-1-5-9': 'Enterprise Domain Controllers',
'S-1-5-10': 'Principal Self',
'S-1-5-11': 'Authenticated Users',
'S-1-5-12': 'Restricted Code',
'S-1-5-13': 'Terminal Server Users',
'S-1-5-14': 'Remote Interactive Logon',
'S-1-5-15': 'This Organization',
'S-1-5-17': 'This Organization (Used by the default IIS user)',
'S-1-5-18': 'Local System',
'S-1-5-19': 'NT Authority',
'S-1-5-20': 'NT Authority',
'S-1-5-32-544': 'Administrators',
'S-1-5-32-545': 'Users',
'S-1-5-32-546': 'Guests',
'S-1-5-32-547': 'Power Users',
'S-1-5-32-548': 'Account Operators',
'S-1-5-32-549': 'Server Operators',
'S-1-5-32-550': 'Print Operators',
'S-1-5-32-551': 'Backup Operators',
'S-1-5-32-552': 'Replicators',
'S-1-5-32-554': 'BUILTIN\Pre-Windows 2000 Compatible Access',
'S-1-5-32-555': 'BUILTIN\Remote Desktop Users',
'S-1-5-32-556': 'BUILTIN\Network Configuration Operators',
'S-1-5-32-557': 'BUILTIN\Incoming Forest Trust Builders',
'S-1-5-32-558': 'BUILTIN\Performance Monitor Users',
'S-1-5-32-559': 'BUILTIN\Performance Log Users',
'S-1-5-32-560': 'BUILTIN\Windows Authorization Access Group',
'S-1-5-32-561': 'BUILTIN\Terminal Server License Servers',
'S-1-5-32-562': 'BUILTIN\Distributed COM Users',
'S-1-5-32-568': 'BUILTIN\IIS IUSRS',
'S-1-5-32-569': 'Cryptographic Operators',
'S-1-5-32-573': 'BUILTIN\Event Log Readers',
'S-1-5-32-574': 'BUILTIN\Certificate Service DCOM Access',
'S-1-5-33': 'Write Restricted',
'S-1-5-64-10': 'NTLM Authentication',
'S-1-5-64-14': 'SChannel Authentication',
'S-1-5-64-21': 'Digest Authentication',
'S-1-5-80': 'NT Service',
'S-1-5-86-1544737700-199408000-2549878335-3519669259-381336952': 'WMI (Local Service)',
'S-1-5-86-615999462-62705297-2911207457-59056572-3668589837': 'WMI (Network Service)',
'S-1-5-1000': 'Other Organization',
'S-1-16-0': 'Untrusted Mandatory Level',
'S-1-16-4096': 'Low Mandatory Level',
'S-1-16-8192': 'Medium Mandatory Level',
'S-1-16-8448': 'Medium Plus Mandatory Level',
'S-1-16-12288': 'High Mandatory Level',
'S-1-16-16384': 'System Mandatory Level',
'S-1-16-20480': 'Protected Process Mandatory Level',
'S-1-16-28672': 'Secure Process Mandatory Level',
}
class GetSIDs(taskmods.DllList):
"""Print the SIDs owning each process"""
# Declare meta information associated with this plugin
meta_info = {}
meta_info['author'] = 'Brendan Dolan-Gavitt'
meta_info['copyright'] = 'Copyright (c) 2007,2008 Brendan Dolan-Gavitt'
meta_info['contact'] = 'bdolangavitt@wesleyan.edu'
meta_info['license'] = 'GNU General Public License 2.0'
meta_info['url'] = 'http://moyix.blogspot.com/'
meta_info['os'] = 'WIN_32_XP_SP2'
meta_info['version'] = '1.0'
def render_text(self, outfd, data):
"""Renders the sids as text"""
for task in data:
token = task.get_token()
if not token:
outfd.write("{0} ({1}): Token unreadable\n".format(task.ImageFileName, int(task.UniqueProcessId)))
continue
for sid_string in token.get_sids():
if sid_string in well_known_sids:
sid_name = " ({0})".format(well_known_sids[sid_string])
else:
sid_name_re = find_sid_re(sid_string, well_known_sid_re)
if sid_name_re:
sid_name = " ({0})".format(sid_name_re)
else:
sid_name = ""
outfd.write("{0} ({1}): {2}{3}\n".format(task.ImageFileName, task.UniqueProcessId, sid_string, sid_name))
| gpl-2.0 |
jameskeaveney/ElecSus | elecsus/libs/RRFittingRoutine.py | 1 | 6567 | # Copyright 2014-2019 M. A. Zentile, J. Keaveney, L. Weller, D. Whiting,
# C. S. Adams and I. G. Hughes.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Random restart fitting routine.
Fit by taking a random sample around parameters and then
fit using Marquardt-Levenberg.
Complete rebuild of the original RR fitting module now using lmfit
Author: JK
Last updated 2018-02-21 MAZ
"""
# py 2.7 compatibility
from __future__ import (division, print_function, absolute_import)
import numpy as np
import matplotlib.pyplot as plt
import warnings
import sys
import copy
import psutil
from multiprocessing import Pool
import MLFittingRoutine as ML
import lmfit as lm
from spectra import get_spectra
p_dict_bounds_default = {'lcell':1e-3,'Bfield':100., 'T':20.,
'GammaBuf':20., 'shift':100.,
# Polarisation of light
'theta0':10., 'E_x':0.05, 'E_y':0.05, 'E_phase':0.01,
# B-field angle w.r.t. light k-vector
'Btheta':10*3.14/180, 'Bphi':10*3.14/180,
'DoppTemp':20.,
'rb85frac':1, 'K40frac':1, 'K41frac':1,
}
def evaluate(args):
data = args[0]
E_in = args[1]
p_dict = args[2]
p_dict_bools = args[3]
data_type = args[4]
best_params, result = ML.ML_fit(data, E_in, p_dict, p_dict_bools, data_type)
#print 'Eval_ML COmplete'
# returns reduced chi-squared value and best fit parameters
return result.redchi, best_params #, result
def RR_fit(data,E_in,p_dict,p_dict_bools,p_dict_bounds=None,no_evals=None,data_type='S0',verbose=False):
"""
Random restart fitting method.
data: an Nx2 iterable for the x and y data to be fitted
E_in: the initial electric field input. See docstring for the spectra.py module for details.
no_evals: The number of randomly-selected start points for downhill fitting. Defaults to 2**(3+2*nFitParams) where nFitParams is
the number of varying fit parameters
p_dict: dictionary containing all the calculation (initial) parameters
p_dict_bools: dictionary with the same keys as p_dict, with Boolean values representing each parameter that is to be varied in the fitting
p_dict_bounds: dictionary with the same keys as p_dict, with values that are pairs of min/max values that each parameter can take.
NOTE: this works slightly differently to p_dict_bounds in the other fitting methods. In RR fitting, the bounds
select the range in parameter space that is randomly explored as starting parameters for a downhill fit, rather than being
strict bounds on the fit parameters.
data_type: Data type to fit experimental data to. Can be one of:
'S0', 'S1', 'S2', 'S3', 'Ix', 'Iy', ...
verbose: Boolean - more print statements provided as the program progresses
"""
if p_dict_bounds is None:
p_dict_bounds = p_dict_bounds_default
print('Starting Random Restart Fitting Routine')
x = np.array(data[0])
y = np.array(data[1])
p_dict['E_x'] = E_in[0]
p_dict['E_y'] = E_in[1][0]
p_dict['E_phase'] = E_in[1][1]
# count number of fit parameters
nFitParams = 0
for key in p_dict_bools:
if p_dict_bools[key]: nFitParams += 1
# default number of iterations based on number of fit parameters
if no_evals == None:
no_evals = nFitParams**2 + 5 # 2**(3+2*nFitParams)
# Create random array of starting parameters based on parameter ranges given in p_dict range dictionary
# Scattered uniformly over the parameter space
#clone the parameter dictionary
p_dict_list = []
for i in range(no_evals):
p_dict_list.append(copy.deepcopy(p_dict))
for key in p_dict_bools:
if p_dict_bools[key]==True:
start_vals = p_dict[key]
#print start_vals
for i in range(len(p_dict_list)):
p_dict_list[i][key] = start_vals + np.random.uniform(-1,1) * p_dict_bounds[key]
if verbose:
print('List of initial parameter dictionaries:')
for pd in p_dict_list:
print(pd)
#print p_dict_list
print('\n\n')
#Do parallel ML fitting by utilising multiple cores
po = Pool() # Pool() uses all cores, Pool(3) uses 3 cores for example.
## use lower process priority so computer is still responsive while calculating!!
# parent = psutil.Process()
# parent.nice(psutil.BELOW_NORMAL_PRIORITY_CLASS)
# for child in parent.children():
# child.nice(psutil.IDLE_PRIORITY_CLASS)
args_list = [(data, E_in, p_dict_list[k], p_dict_bools, data_type) for k in range(no_evals)]
Res = po.map_async(evaluate,args_list)
result = Res.get()
po.close()
po.join()
if verbose: print('RR calculation complete')
#Find best fit
result = np.array(result)
#print result
#result = result.astype(np.float64)
lineMin = np.argmin(result[:,0]) ## pick the fit with the lowest cost value
best_values = result[lineMin][1] # best parameter dictionary
if verbose:
print('\n\n\n')
print(best_values)
p_dict_best = copy.deepcopy(p_dict)
p_dict_best.update(best_values)
# Finally run the ML fitting one more time, using the best parameters
# (so we get the final_result object, which cannot be pickled and therefore isn't supported in multiprocessing)
best_values, final_result = ML.ML_fit(data, E_in, p_dict_best, p_dict_bools, data_type)
# return best fit parameters, and the lmfit result object
return best_values, final_result
def test_fit():
p_dict = {'Elem':'Rb','Dline':'D2','T':80.,'lcell':2e-3,'Bfield':600.,'Btheta':0.,
'Bphi':0.,'GammaBuf':0.,'shift':0.}
# only need to specify parameters that are varied
p_dict_bools = {'T':True,'Bfield':True,'E_x':True}
p_dict_bounds = {'T':10,'Bfield':100,'E_x':0.01}
E_in = np.array([0.7,0.7,0])
E_in_angle = [E_in[0].real,[abs(E_in[1]),np.angle(E_in[1])]]
print(E_in_angle)
x = np.linspace(-10000,10000,100)
[y] = get_spectra(x,E_in,p_dict,outputs=['S1']) + np.random.randn(len(x))*0.015
data = [x,y.real]
best_params, result = RR_fit(data, E_in_angle, p_dict, p_dict_bools, p_dict_bounds, no_evals = 8, data_type='S1')
report = result.fit_report()
fit = result.best_fit
print(report)
plt.plot(x,y,'ko')
plt.plot(x,fit,'r-',lw=2)
plt.show()
if __name__ == '__main__':
test_fit()
| apache-2.0 |
vmax-feihu/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/prototypes/predicates.py | 623 | 1777 | """
This module houses the GEOS ctypes prototype functions for the
unary and binary predicate operations on geometries.
"""
from ctypes import c_char, c_char_p, c_double
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
## Binary & unary predicate functions ##
def binary_predicate(func, *args):
"For GEOS binary predicate functions."
argtypes = [GEOM_PTR, GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = c_char
func.errcheck = check_predicate
return func
def unary_predicate(func):
"For GEOS unary predicate functions."
func.argtypes = [GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
## Unary Predicates ##
geos_hasz = unary_predicate(GEOSFunc('GEOSHasZ'))
geos_isempty = unary_predicate(GEOSFunc('GEOSisEmpty'))
geos_isring = unary_predicate(GEOSFunc('GEOSisRing'))
geos_issimple = unary_predicate(GEOSFunc('GEOSisSimple'))
geos_isvalid = unary_predicate(GEOSFunc('GEOSisValid'))
## Binary Predicates ##
geos_contains = binary_predicate(GEOSFunc('GEOSContains'))
geos_crosses = binary_predicate(GEOSFunc('GEOSCrosses'))
geos_disjoint = binary_predicate(GEOSFunc('GEOSDisjoint'))
geos_equals = binary_predicate(GEOSFunc('GEOSEquals'))
geos_equalsexact = binary_predicate(GEOSFunc('GEOSEqualsExact'), c_double)
geos_intersects = binary_predicate(GEOSFunc('GEOSIntersects'))
geos_overlaps = binary_predicate(GEOSFunc('GEOSOverlaps'))
geos_relatepattern = binary_predicate(GEOSFunc('GEOSRelatePattern'), c_char_p)
geos_touches = binary_predicate(GEOSFunc('GEOSTouches'))
geos_within = binary_predicate(GEOSFunc('GEOSWithin'))
| apache-2.0 |
vijaylbais/boto | boto/dynamodb/layer2.py | 135 | 33814 | # Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.dynamodb.layer1 import Layer1
from boto.dynamodb.table import Table
from boto.dynamodb.schema import Schema
from boto.dynamodb.item import Item
from boto.dynamodb.batch import BatchList, BatchWriteList
from boto.dynamodb.types import get_dynamodb_type, Dynamizer, \
LossyFloatDynamizer, NonBooleanDynamizer
class TableGenerator(object):
"""
This is an object that wraps up the table_generator function.
The only real reason to have this is that we want to be able
to accumulate and return the ConsumedCapacityUnits element that
is part of each response.
:ivar last_evaluated_key: A sequence representing the key(s)
of the item last evaluated, or None if no additional
results are available.
:ivar remaining: The remaining quantity of results requested.
:ivar table: The table to which the call was made.
"""
def __init__(self, table, callable, remaining, item_class, kwargs):
self.table = table
self.callable = callable
self.remaining = -1 if remaining is None else remaining
self.item_class = item_class
self.kwargs = kwargs
self._consumed_units = 0.0
self.last_evaluated_key = None
self._count = 0
self._scanned_count = 0
self._response = None
@property
def count(self):
"""
The total number of items retrieved thus far. This value changes with
iteration and even when issuing a call with count=True, it is necessary
to complete the iteration to assert an accurate count value.
"""
self.response
return self._count
@property
def scanned_count(self):
"""
As above, but representing the total number of items scanned by
DynamoDB, without regard to any filters.
"""
self.response
return self._scanned_count
@property
def consumed_units(self):
"""
Returns a float representing the ConsumedCapacityUnits accumulated.
"""
self.response
return self._consumed_units
@property
def response(self):
"""
The current response to the call from DynamoDB.
"""
return self.next_response() if self._response is None else self._response
def next_response(self):
"""
Issue a call and return the result. You can invoke this method
while iterating over the TableGenerator in order to skip to the
next "page" of results.
"""
# preserve any existing limit in case the user alters self.remaining
limit = self.kwargs.get('limit')
if (self.remaining > 0 and (limit is None or limit > self.remaining)):
self.kwargs['limit'] = self.remaining
self._response = self.callable(**self.kwargs)
self.kwargs['limit'] = limit
self._consumed_units += self._response.get('ConsumedCapacityUnits', 0.0)
self._count += self._response.get('Count', 0)
self._scanned_count += self._response.get('ScannedCount', 0)
# at the expense of a possibly gratuitous dynamize, ensure that
# early generator termination won't result in bad LEK values
if 'LastEvaluatedKey' in self._response:
lek = self._response['LastEvaluatedKey']
esk = self.table.layer2.dynamize_last_evaluated_key(lek)
self.kwargs['exclusive_start_key'] = esk
lektuple = (lek['HashKeyElement'],)
if 'RangeKeyElement' in lek:
lektuple += (lek['RangeKeyElement'],)
self.last_evaluated_key = lektuple
else:
self.last_evaluated_key = None
return self._response
def __iter__(self):
while self.remaining != 0:
response = self.response
for item in response.get('Items', []):
self.remaining -= 1
yield self.item_class(self.table, attrs=item)
if self.remaining == 0:
break
if response is not self._response:
break
else:
if self.last_evaluated_key is not None:
self.next_response()
continue
break
if response is not self._response:
continue
break
class Layer2(object):
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, security_token=None, region=None,
validate_certs=True, dynamizer=LossyFloatDynamizer,
profile_name=None):
self.layer1 = Layer1(aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, security_token, region,
validate_certs=validate_certs,
profile_name=profile_name)
self.dynamizer = dynamizer()
def use_decimals(self, use_boolean=False):
"""
Use the ``decimal.Decimal`` type for encoding/decoding numeric types.
By default, ints/floats are used to represent numeric types
('N', 'NS') received from DynamoDB. Using the ``Decimal``
type is recommended to prevent loss of precision.
"""
# Eventually this should be made the default dynamizer.
self.dynamizer = Dynamizer() if use_boolean else NonBooleanDynamizer()
def dynamize_attribute_updates(self, pending_updates):
"""
Convert a set of pending item updates into the structure
required by Layer1.
"""
d = {}
for attr_name in pending_updates:
action, value = pending_updates[attr_name]
if value is None:
# DELETE without an attribute value
d[attr_name] = {"Action": action}
else:
d[attr_name] = {"Action": action,
"Value": self.dynamizer.encode(value)}
return d
def dynamize_item(self, item):
d = {}
for attr_name in item:
d[attr_name] = self.dynamizer.encode(item[attr_name])
return d
def dynamize_range_key_condition(self, range_key_condition):
"""
Convert a layer2 range_key_condition parameter into the
structure required by Layer1.
"""
return range_key_condition.to_dict()
def dynamize_scan_filter(self, scan_filter):
"""
Convert a layer2 scan_filter parameter into the
structure required by Layer1.
"""
d = None
if scan_filter:
d = {}
for attr_name in scan_filter:
condition = scan_filter[attr_name]
d[attr_name] = condition.to_dict()
return d
def dynamize_expected_value(self, expected_value):
"""
Convert an expected_value parameter into the data structure
required for Layer1.
"""
d = None
if expected_value:
d = {}
for attr_name in expected_value:
attr_value = expected_value[attr_name]
if attr_value is True:
attr_value = {'Exists': True}
elif attr_value is False:
attr_value = {'Exists': False}
else:
val = self.dynamizer.encode(expected_value[attr_name])
attr_value = {'Value': val}
d[attr_name] = attr_value
return d
def dynamize_last_evaluated_key(self, last_evaluated_key):
"""
Convert a last_evaluated_key parameter into the data structure
required for Layer1.
"""
d = None
if last_evaluated_key:
hash_key = last_evaluated_key['HashKeyElement']
d = {'HashKeyElement': self.dynamizer.encode(hash_key)}
if 'RangeKeyElement' in last_evaluated_key:
range_key = last_evaluated_key['RangeKeyElement']
d['RangeKeyElement'] = self.dynamizer.encode(range_key)
return d
def build_key_from_values(self, schema, hash_key, range_key=None):
"""
Build a Key structure to be used for accessing items
in Amazon DynamoDB. This method takes the supplied hash_key
and optional range_key and validates them against the
schema. If there is a mismatch, a TypeError is raised.
Otherwise, a Python dict version of a Amazon DynamoDB Key
data structure is returned.
:type hash_key: int|float|str|unicode|Binary
:param hash_key: The hash key of the item you are looking for.
The type of the hash key should match the type defined in
the schema.
:type range_key: int|float|str|unicode|Binary
:param range_key: The range key of the item your are looking for.
This should be supplied only if the schema requires a
range key. The type of the range key should match the
type defined in the schema.
"""
dynamodb_key = {}
dynamodb_value = self.dynamizer.encode(hash_key)
if list(dynamodb_value.keys())[0] != schema.hash_key_type:
msg = 'Hashkey must be of type: %s' % schema.hash_key_type
raise TypeError(msg)
dynamodb_key['HashKeyElement'] = dynamodb_value
if range_key is not None:
dynamodb_value = self.dynamizer.encode(range_key)
if list(dynamodb_value.keys())[0] != schema.range_key_type:
msg = 'RangeKey must be of type: %s' % schema.range_key_type
raise TypeError(msg)
dynamodb_key['RangeKeyElement'] = dynamodb_value
return dynamodb_key
def new_batch_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchList`
object.
"""
return BatchList(self)
def new_batch_write_list(self):
"""
Return a new, empty :class:`boto.dynamodb.batch.BatchWriteList`
object.
"""
return BatchWriteList(self)
def list_tables(self, limit=None):
"""
Return a list of the names of all tables associated with the
current account and region.
:type limit: int
:param limit: The maximum number of tables to return.
"""
tables = []
start_table = None
while not limit or len(tables) < limit:
this_round_limit = None
if limit:
this_round_limit = limit - len(tables)
this_round_limit = min(this_round_limit, 100)
result = self.layer1.list_tables(limit=this_round_limit, start_table=start_table)
tables.extend(result.get('TableNames', []))
start_table = result.get('LastEvaluatedTableName', None)
if not start_table:
break
return tables
def describe_table(self, name):
"""
Retrieve information about an existing table.
:type name: str
:param name: The name of the desired table.
"""
return self.layer1.describe_table(name)
def table_from_schema(self, name, schema):
"""
Create a Table object from a schema.
This method will create a Table object without
making any API calls. If you know the name and schema
of the table, you can use this method instead of
``get_table``.
Example usage::
table = layer2.table_from_schema(
'tablename',
Schema.create(hash_key=('foo', 'N')))
:type name: str
:param name: The name of the table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The schema associated with the table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
return Table.create_from_schema(self, name, schema)
def get_table(self, name):
"""
Retrieve the Table object for an existing table.
:type name: str
:param name: The name of the desired table.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the table.
"""
response = self.layer1.describe_table(name)
return Table(self, response)
lookup = get_table
def create_table(self, name, schema, read_units, write_units):
"""
Create a new Amazon DynamoDB table.
:type name: str
:param name: The name of the desired table.
:type schema: :class:`boto.dynamodb.schema.Schema`
:param schema: The Schema object that defines the schema used
by this table.
:type read_units: int
:param read_units: The value for ReadCapacityUnits.
:type write_units: int
:param write_units: The value for WriteCapacityUnits.
:rtype: :class:`boto.dynamodb.table.Table`
:return: A Table object representing the new Amazon DynamoDB table.
"""
response = self.layer1.create_table(name, schema.dict,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
return Table(self, response)
def update_throughput(self, table, read_units, write_units):
"""
Update the ProvisionedThroughput for the Amazon DynamoDB Table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object whose throughput is being updated.
:type read_units: int
:param read_units: The new value for ReadCapacityUnits.
:type write_units: int
:param write_units: The new value for WriteCapacityUnits.
"""
response = self.layer1.update_table(table.name,
{'ReadCapacityUnits': read_units,
'WriteCapacityUnits': write_units})
table.update_from_response(response)
def delete_table(self, table):
"""
Delete this table and all items in it. After calling this
the Table objects status attribute will be set to 'DELETING'.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being deleted.
"""
response = self.layer1.delete_table(table.name)
table.update_from_response(response)
def create_schema(self, hash_key_name, hash_key_proto_value,
range_key_name=None, range_key_proto_value=None):
"""
Create a Schema object used when creating a Table.
:type hash_key_name: str
:param hash_key_name: The name of the HashKey for the schema.
:type hash_key_proto_value: int|long|float|str|unicode|Binary
:param hash_key_proto_value: A sample or prototype of the type
of value you want to use for the HashKey. Alternatively,
you can also just pass in the Python type (e.g. int, float, etc.).
:type range_key_name: str
:param range_key_name: The name of the RangeKey for the schema.
This parameter is optional.
:type range_key_proto_value: int|long|float|str|unicode|Binary
:param range_key_proto_value: A sample or prototype of the type
of value you want to use for the RangeKey. Alternatively,
you can also pass in the Python type (e.g. int, float, etc.)
This parameter is optional.
"""
hash_key = (hash_key_name, get_dynamodb_type(hash_key_proto_value))
if range_key_name and range_key_proto_value is not None:
range_key = (range_key_name,
get_dynamodb_type(range_key_proto_value))
else:
range_key = None
return Schema.create(hash_key, range_key)
def get_item(self, table, hash_key, range_key=None,
attributes_to_get=None, consistent_read=False,
item_class=Item):
"""
Retrieve an existing item from the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object from which the item is retrieved.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key: int|long|float|str|unicode|Binary
:param range_key: The optional RangeKey of the requested item.
The type of the value must match the type defined in the
schema for the table.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
"""
key = self.build_key_from_values(table.schema, hash_key, range_key)
response = self.layer1.get_item(table.name, key,
attributes_to_get, consistent_read,
object_hook=self.dynamizer.decode)
item = item_class(table, hash_key, range_key, response['Item'])
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return item
def batch_get_item(self, batch_list):
"""
Return a set of attributes for a multiple items in
multiple tables using their primary keys.
:type batch_list: :class:`boto.dynamodb.batch.BatchList`
:param batch_list: A BatchList object which consists of a
list of :class:`boto.dynamoddb.batch.Batch` objects.
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_get_item(request_items,
object_hook=self.dynamizer.decode)
def batch_write_item(self, batch_list):
"""
Performs multiple Puts and Deletes in one batch.
:type batch_list: :class:`boto.dynamodb.batch.BatchWriteList`
:param batch_list: A BatchWriteList object which consists of a
list of :class:`boto.dynamoddb.batch.BatchWrite` objects.
Each Batch object contains the information about one
batch of objects that you wish to put or delete.
"""
request_items = batch_list.to_dict()
return self.layer1.batch_write_item(request_items,
object_hook=self.dynamizer.decode)
def put_item(self, item, expected_value=None, return_values=None):
"""
Store a new item or completely replace an existing item
in Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to write to Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
response = self.layer1.put_item(item.table.name,
self.dynamize_item(item),
expected_value, return_values,
object_hook=self.dynamizer.decode)
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def update_item(self, item, expected_value=None, return_values=None):
"""
Commit pending item updates to Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to update in Amazon DynamoDB. It is expected
that you would have called the add_attribute, put_attribute
and/or delete_attribute methods on this Item prior to calling
this method. Those queued changes are what will be updated.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you
expect. This dictionary should have name/value pairs where the
name is the name of the attribute and the value is either the
value you are expecting or False if you expect the attribute
not to exist.
:type return_values: str
:param return_values: Controls the return of attribute name/value pairs
before they were updated. Possible values are: None, 'ALL_OLD',
'UPDATED_OLD', 'ALL_NEW' or 'UPDATED_NEW'. If 'ALL_OLD' is
specified and the item is overwritten, the content of the old item
is returned. If 'ALL_NEW' is specified, then all the attributes of
the new version of the item are returned. If 'UPDATED_NEW' is
specified, the new versions of only the updated attributes are
returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
attr_updates = self.dynamize_attribute_updates(item._updates)
response = self.layer1.update_item(item.table.name, key,
attr_updates,
expected_value, return_values,
object_hook=self.dynamizer.decode)
item._updates.clear()
if 'ConsumedCapacityUnits' in response:
item.consumed_units = response['ConsumedCapacityUnits']
return response
def delete_item(self, item, expected_value=None, return_values=None):
"""
Delete the item from Amazon DynamoDB.
:type item: :class:`boto.dynamodb.item.Item`
:param item: The Item to delete from Amazon DynamoDB.
:type expected_value: dict
:param expected_value: A dictionary of name/value pairs that you expect.
This dictionary should have name/value pairs where the name
is the name of the attribute and the value is either the value
you are expecting or False if you expect the attribute not to
exist.
:type return_values: str
:param return_values: Controls the return of attribute
name-value pairs before then were changed. Possible
values are: None or 'ALL_OLD'. If 'ALL_OLD' is
specified and the item is overwritten, the content
of the old item is returned.
"""
expected_value = self.dynamize_expected_value(expected_value)
key = self.build_key_from_values(item.table.schema,
item.hash_key, item.range_key)
return self.layer1.delete_item(item.table.name, key,
expected=expected_value,
return_values=return_values,
object_hook=self.dynamizer.decode)
def query(self, table, hash_key, range_key_condition=None,
attributes_to_get=None, request_limit=None,
max_results=None, consistent_read=False,
scan_index_forward=True, exclusive_start_key=None,
item_class=Item, count=False):
"""
Perform a query on the table.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being queried.
:type hash_key: int|long|float|str|unicode|Binary
:param hash_key: The HashKey of the requested item. The
type of the value must match the type defined in the
schema for the table.
:type range_key_condition: :class:`boto.dynamodb.condition.Condition`
:param range_key_condition: A Condition object.
Condition object can be one of the following types:
EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN
The only condition which expects or will accept two
values is 'BETWEEN', otherwise a single value should
be passed to the Condition constructor.
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type consistent_read: bool
:param consistent_read: If True, a consistent read
request is issued. Otherwise, an eventually consistent
request is issued.
:type scan_index_forward: bool
:param scan_index_forward: Specified forward or backward
traversal of the index. Default is forward (True).
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Query operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if range_key_condition:
rkc = self.dynamize_range_key_condition(range_key_condition)
else:
rkc = None
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'hash_key_value': self.dynamizer.encode(hash_key),
'range_key_conditions': rkc,
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'consistent_read': consistent_read,
'scan_index_forward': scan_index_forward,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.query,
max_results, item_class, kwargs)
def scan(self, table, scan_filter=None,
attributes_to_get=None, request_limit=None, max_results=None,
exclusive_start_key=None, item_class=Item, count=False):
"""
Perform a scan of DynamoDB.
:type table: :class:`boto.dynamodb.table.Table`
:param table: The Table object that is being scanned.
:type scan_filter: A dict
:param scan_filter: A dictionary where the key is the
attribute name and the value is a
:class:`boto.dynamodb.condition.Condition` object.
Valid Condition objects include:
* EQ - equal (1)
* NE - not equal (1)
* LE - less than or equal (1)
* LT - less than (1)
* GE - greater than or equal (1)
* GT - greater than (1)
* NOT_NULL - attribute exists (0, use None)
* NULL - attribute does not exist (0, use None)
* CONTAINS - substring or value in list (1)
* NOT_CONTAINS - absence of substring or value in list (1)
* BEGINS_WITH - substring prefix (1)
* IN - exact match in list (N)
* BETWEEN - >= first value, <= second value (2)
:type attributes_to_get: list
:param attributes_to_get: A list of attribute names.
If supplied, only the specified attribute names will
be returned. Otherwise, all attributes will be returned.
:type request_limit: int
:param request_limit: The maximum number of items to retrieve
from Amazon DynamoDB on each request. You may want to set
a specific request_limit based on the provisioned throughput
of your table. The default behavior is to retrieve as many
results as possible per request.
:type max_results: int
:param max_results: The maximum number of results that will
be retrieved from Amazon DynamoDB in total. For example,
if you only wanted to see the first 100 results from the
query, regardless of how many were actually available, you
could set max_results to 100 and the generator returned
from the query method will only yeild 100 results max.
:type count: bool
:param count: If True, Amazon DynamoDB returns a total
number of items for the Scan operation, even if the
operation has no matching items for the assigned filter.
If count is True, the actual items are not returned and
the count is accessible as the ``count`` attribute of
the returned object.
:type exclusive_start_key: list or tuple
:param exclusive_start_key: Primary key of the item from
which to continue an earlier query. This would be
provided as the LastEvaluatedKey in that query.
:type item_class: Class
:param item_class: Allows you to override the class used
to generate the items. This should be a subclass of
:class:`boto.dynamodb.item.Item`
:rtype: :class:`boto.dynamodb.layer2.TableGenerator`
"""
if exclusive_start_key:
esk = self.build_key_from_values(table.schema,
*exclusive_start_key)
else:
esk = None
kwargs = {'table_name': table.name,
'scan_filter': self.dynamize_scan_filter(scan_filter),
'attributes_to_get': attributes_to_get,
'limit': request_limit,
'count': count,
'exclusive_start_key': esk,
'object_hook': self.dynamizer.decode}
return TableGenerator(table, self.layer1.scan,
max_results, item_class, kwargs)
| mit |
mrknow/filmkodi | plugin.video.mrknow/mylib/xbmcplugin.py | 4 | 7203 | # coding: utf-8
"""
Functions for Kodi plugins
"""
SORT_METHOD_ALBUM = 13
SORT_METHOD_ALBUM_IGNORE_THE = 14
SORT_METHOD_ARTIST = 11
SORT_METHOD_ARTIST_IGNORE_THE = 12
SORT_METHOD_BITRATE = 40
SORT_METHOD_CHANNEL = 38
SORT_METHOD_COUNTRY = 16
SORT_METHOD_DATE = 3
SORT_METHOD_DATEADDED = 19
SORT_METHOD_DATE_TAKEN = 41
SORT_METHOD_DRIVE_TYPE = 6
SORT_METHOD_DURATION = 8
SORT_METHOD_EPISODE = 22
SORT_METHOD_FILE = 5
SORT_METHOD_FULLPATH = 32
SORT_METHOD_GENRE = 15
SORT_METHOD_LABEL = 1
SORT_METHOD_LABEL_IGNORE_FOLDERS = 33
SORT_METHOD_LABEL_IGNORE_THE = 2
SORT_METHOD_LASTPLAYED = 34
SORT_METHOD_LISTENERS = 36
SORT_METHOD_MPAA_RATING = 28
SORT_METHOD_NONE = 0
SORT_METHOD_PLAYCOUNT = 35
SORT_METHOD_PLAYLIST_ORDER = 21
SORT_METHOD_PRODUCTIONCODE = 26
SORT_METHOD_PROGRAM_COUNT = 20
SORT_METHOD_SIZE = 4
SORT_METHOD_SONG_RATING = 27
SORT_METHOD_STUDIO = 30
SORT_METHOD_STUDIO_IGNORE_THE = 31
SORT_METHOD_TITLE = 9
SORT_METHOD_TITLE_IGNORE_THE = 10
SORT_METHOD_TRACKNUM = 7
SORT_METHOD_UNSORTED = 37
SORT_METHOD_VIDEO_RATING = 18
SORT_METHOD_VIDEO_RUNTIME = 29
SORT_METHOD_VIDEO_SORT_TITLE = 24
SORT_METHOD_VIDEO_SORT_TITLE_IGNORE_THE = 25
SORT_METHOD_VIDEO_TITLE = 23
SORT_METHOD_VIDEO_YEAR = 17
__author__ = 'Team Kodi <http://kodi.tv>'
__credits__ = 'Team Kodi'
__date__ = 'Fri May 01 16:22:19 BST 2015'
__platform__ = 'ALL'
__version__ = '2.20.0'
def addDirectoryItem(handle, url, listitem, isFolder=False, totalItems=0):
"""Callback function to pass directory contents back to XBMC.
Returns a bool for successful completion.
:param handle: integer - handle the plugin was started with.
:param url: string - url of the entry. would be plugin:// for another virtual directory.
:param listitem: ListItem - item to add.
:param isFolder: bool - True=folder / False=not a folder.
:param totalItems: integer - total number of items that will be passed. (used for progressbar)
Example::
if not xbmcplugin.addDirectoryItem(int(sys.argv[1]), 'F:\\Trailers\\300.mov', listitem, totalItems=50):
break
"""
return bool(1)
def addDirectoryItems(handle, items, totalItems=0):
"""Callback function to pass directory contents back to XBMC as a list.
Returns a bool for successful completion.
:param handle: integer - handle the plugin was started with.
:param items: List - list of (url, listitem[, isFolder]) as a tuple to add.
:param totalItems: integer - total number of items that will be passed. (used for progressbar)
.. note::
Large lists benefit over using the standard addDirectoryItem().
You may call this more than once to add items in chunks.
Example::
if not xbmcplugin.addDirectoryItems(int(sys.argv[1]), [(url, listitem, False,)]:
raise
"""
return bool(1)
def endOfDirectory(handle, succeeded=True, updateListing=False, cacheToDisc=True):
"""Callback function to tell XBMC that the end of the directory listing in a virtualPythonFolder module is reached.
:param handle: integer - handle the plugin was started with.
:param succeeded: bool - True=script completed successfully/False=Script did not.
:param updateListing: bool - True=this folder should update the current listing/False=Folder is a subfolder.
:param cacheToDisc: bool - True=Folder will cache if extended time/False=this folder will never cache to disc.
Example::
xbmcplugin.endOfDirectory(int(sys.argv[1]), cacheToDisc=False)
"""
pass
def setResolvedUrl(handle, succeeded, listitem):
"""Callback function to tell XBMC that the file plugin has been resolved to a url
:param handle: integer - handle the plugin was started with.
:param succeeded: bool - True=script completed successfully/False=Script did not.
:param listitem: ListItem - item the file plugin resolved to for playback.
Example::
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
"""
pass
def addSortMethod(handle, sortMethod, label2Mask=''):
"""Adds a sorting method for the media list.
:param handle: integer - handle the plugin was started with.
:param sortMethod: integer - number for sortmethod see FileItem.h.
:param label2Mask: string - the label mask to use for the second label. Defaults to '%D'
applies to: ``SORT_METHOD_NONE``, ``SORT_METHOD_UNSORTED``, ``SORT_METHOD_VIDEO_TITLE``,
``SORT_METHOD_TRACKNUM``, ``SORT_METHOD_FILE``, ``SORT_METHOD_TITLE``,
``SORT_METHOD_TITLE_IGNORE_THE``, ``SORT_METHOD_LABEL``,
``SORT_METHOD_LABEL_IGNORE_THE``
Example::
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_TITLE)
"""
pass
def getSetting(handle, id):
"""Returns the value of a setting as a string.
:param handle: integer - handle the plugin was started with.
:param id: string - id of the setting that the module needs to access.
Example::
apikey = xbmcplugin.getSetting(int(sys.argv[1]), 'apikey')
"""
return str()
def setSetting(handle, id, value):
"""Sets a plugin setting for the current running plugin.
:param handle: integer - handle the plugin was started with.
:param id: string - id of the setting that the module needs to access.
:param value: string or unicode - value of the setting.
Example::
xbmcplugin.setSetting(int(sys.argv[1]), id='username', value='teamxbmc')
"""
pass
def setContent(handle, content):
"""Sets the plugins content.
:param handle: integer - handle the plugin was started with.
:param content: string - content type (eg. movies).
.. note::
Possible values for content: files, songs, artists, albums, movies, tvshows, episodes, musicvideos
Example::
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
"""
pass
def setPluginCategory(handle, category):
"""Sets the plugins name for skins to display.
:param handle: integer - handle the plugin was started with.
:param category: string or unicode - plugins sub category.
Example::
xbmcplugin.setPluginCategory(int(sys.argv[1]), 'Comedy')
"""
pass
def setPluginFanart(handle, image=None, color1=None, color2=None, color3=None):
"""Sets the plugins fanart and color for skins to display.
:param handle: integer - handle the plugin was started with.
:param image: string - path to fanart image.
:param color1: hexstring - color1. (e.g. '0xFFFFFFFF')
:param color2: hexstring - color2. (e.g. '0xFFFF3300')
:param color3: hexstring - color3. (e.g. '0xFF000000')
Example::
xbmcplugin.setPluginFanart(int(sys.argv[1]),
'special://home/addons/plugins/video/Apple movie trailers II/fanart.png', color2='0xFFFF3300')
"""
pass
def setProperty(handle, key, value):
"""Sets a container property for this plugin.
:param handle: integer - handle the plugin was started with.
:param key: string - property name.
:param value: string or unicode - value of property.
.. note::
Key is NOT case sensitive.
Example::
xbmcplugin.setProperty(int(sys.argv[1]), 'Emulator', 'M.A.M.E.')
"""
pass
| apache-2.0 |
NicovincX2/Python-3.5 | Analyse (mathématiques)/Analyse fonctionnelle/Équation différentielle/Orbite/eccentric_orbits_compare.py | 1 | 1065 | # -*- coding: utf-8 -*-
import os
# compare Euler's method and the Euler-Cromer method for eccentric orbits
#
# M. Zingale (2013-02-19)
import numpy
import pylab
import math
from orbit import *
# circular orbit
o = orbit(1.0, 0.6) # eccentricity = 0.6
# period
P = o.keplerPeriod()
histEuler = o.intEuler(0.0125, P)
histEC = o.intEulerCromer(0.0125, P)
#histRK2 = o.intRK2(0.0125, P)
pylab.plot(histEuler.x, histEuler.y, label="Euler's method", color="k")
# mark the Sun
pylab.scatter([0], [0], s=250, marker=(20, 1), color="k")
pylab.scatter([0], [0], s=200, marker=(20, 1), color="y")
# draw a vertical line that the semi-major axis should fall on
yy = numpy.linspace(-2.0, 2.0, 100)
pylab.plot(0.0 * yy, yy, ls=":", color="0.5")
pylab.plot(histEC.x, histEC.y, label="Euler-Cromer method", color="b")
leg = pylab.legend()
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
leg.draw_frame(0)
pylab.xlim(-2, 2)
pylab.ylim(-2, 2)
ax = pylab.gca()
ax.set_aspect("equal", "datalim")
pylab.savefig("orbit-eccentric.png")
os.system("pause")
| gpl-3.0 |
eLBati/odoo | addons/account_asset/wizard/__init__.py | 445 | 1122 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_asset_change_duration
import wizard_asset_compute
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
camptocamp/c2c-rd-addons | c2c_product_price_unit/__openerp__.py | 4 | 3386 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name': 'Product Price Unit',
'version': '0.7',
'category': 'Warehouse Management',
'description': """
Attention the module must be installed and upgraded immediately to complete all modifications
This module allows to define price units
* price per unit (default)
* price per 100
* price per 1000
* price in cents ...
Example: gasoline is quoted 1 liter i= 115,5 cents or 1,115 €
diodes 4.99€/1000 Units
The module hides the original price fields and fills these with converted values.
Advantage - no change of the underlying logic of the base model for computation.
Users of the group "Product Price Unit Manager" will see the orginal price fields
Tested with sales/purchase installed
Forms Layout not optimized for group "Product Price Unit Manager" - this is mainly for debugging
ToDo:
* all onchange on product_id must return default price_unit_id (done for SO/PO,INV)
* all wizards must transfer price_unit_id and unit_price_pu (many do not work now)
* all "create" must transfer price_unit_id and unit_price_pu
* defaults for
* price_unit_id
* price_unit_pu (from price_unit)
* functions using price_unit must probably use price_unit_pu
* c2c_product.price_unit.xml must be loaded in product.py
before running the update statements
currenty the module must be updated immediately to fill price_unit_id
* Product
* Button Udate standard price (average costing)
* Request For Quotation
* Form
* Report
* Purchase Order
* Report
* Leads
* Quatations
* Sales Order
* Form small layout issue
* Report
* Warehouse
* Forms
* Reports
* Price Lists
""",
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH',
'depends': ['product', 'purchase', 'hr_expense','account_anglo_saxon', 'c2c_stock_accounting', 'c2c_product_price_unit_id'],
'data': [
'purchase_view.xml',
'sale_view.xml',
'stock_view.xml',
'account_invoice_view.xml',
'wizard/stock_partial_picking_view.xml',
],
#'data': ['product_view.xml'],
'demo_xml': [],
'installable': False,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
daenamkim/ansible | lib/ansible/modules/network/vyos/vyos_linkagg.py | 7 | 7773 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_linkagg
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage link aggregation groups on VyOS network devices
description:
- This module provides declarative management of link aggregation groups
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group.
choices: ['802.3ad', 'active-backup', 'broadcast',
'round-robin', 'transmit-load-balance',
'adaptive-load-balance', 'xor-hash', 'on']
members:
description:
- List of members of the link aggregation group.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure link aggregation group
vyos_linkagg:
name: bond0
members:
- eth0
- eth1
- name: remove configuration
vyos_linkagg:
name: bond0
state: absent
- name: Create aggregate of linkagg definitions
vyos_linkagg:
aggregate:
- { name: bond0, members: [eth1] }
- { name: bond1, members: [eth2] }
- name: Remove aggregate of linkagg definitions
vyos_linkagg:
aggregate:
- name: bond0
- name: bond1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces bonding bond0
- set interfaces ethernet eth0 bond-group 'bond0'
- set interfaces ethernet eth1 bond-group 'bond0'
"""
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import load_config, run_commands
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
members = w.get('members') or []
mode = w['mode']
if mode == 'on':
mode = '802.3ad'
state = w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
for m in obj_in_have['members']:
commands.append('delete interfaces ethernet ' + m + ' bond-group')
commands.append('delete interfaces bonding ' + name)
else:
if not obj_in_have:
commands.append('set interfaces bonding ' + name + ' mode ' + mode)
for m in members:
commands.append('set interfaces ethernet ' + m + ' bond-group ' + name)
if state == 'down':
commands.append('set interfaces bonding ' + name + ' disable')
else:
if mode != obj_in_have['mode']:
commands.append('set interfaces bonding ' + name + ' mode ' + mode)
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.append('set interfaces ethernet ' + m + ' bond-group ' + name)
if state == 'down' and obj_in_have['state'] == 'up':
commands.append('set interfaces bonding ' + name + ' disable')
elif state == 'up' and obj_in_have['state'] == 'down':
commands.append('delete interfaces bonding ' + name + ' disable')
return commands
def map_config_to_obj(module):
obj = []
output = run_commands(module, ['show interfaces bonding slaves'])
lines = output[0].splitlines()
if len(lines) > 1:
for line in lines[1:]:
splitted_line = line.split()
name = splitted_line[0]
mode = splitted_line[1]
state = splitted_line[2]
if len(splitted_line) > 4:
members = splitted_line[4:]
else:
members = []
obj.append({'name': name,
'mode': mode,
'members': members,
'state': state})
return obj
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
mode=dict(choices=['802.3ad', 'active-backup', 'broadcast',
'round-robin', 'transmit-load-balance',
'adaptive-load-balance', 'xor-hash', 'on'],
default='802.3ad'),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_06_01/aio/operations/_express_route_ports_operations.py | 1 | 30168 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsOperations:
"""ExpressRoutePortsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def get(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs
) -> "_models.ExpressRoutePort":
"""Retrieves the requested ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of ExpressRoutePort.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePort, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ExpressRoutePort
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.ExpressRoutePort",
**kwargs
) -> "_models.ExpressRoutePort":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRoutePort')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.ExpressRoutePort",
**kwargs
) -> AsyncLROPoller["_models.ExpressRoutePort"]:
"""Creates or updates the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to the create ExpressRoutePort operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.ExpressRoutePort
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.ExpressRoutePort":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
express_route_port_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.ExpressRoutePort"]:
"""Update ExpressRoutePort tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param parameters: Parameters supplied to update ExpressRoutePort resource tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRoutePort or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.ExpressRoutePort]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePort"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
express_route_port_name=express_route_port_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRoutePort', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRoutePortListResult"]:
"""List all the ExpressRoutePort resources in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRoutePortListResult"]:
"""List all the ExpressRoutePort resources in the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ExpressRoutePortListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePorts'} # type: ignore
| mit |
yonglehou/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
ArchiFleKs/magnum | magnum/objects/magnum_service.py | 2 | 6026 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_versionedobjects import fields
from magnum.db import api as dbapi
from magnum.objects import base
@base.MagnumObjectRegistry.register
class MagnumService(base.MagnumPersistentObject, base.MagnumObject):
# Version 1.0: Initial version
VERSION = '1.0'
dbapi = dbapi.get_instance()
fields = {
'id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'disabled': fields.BooleanField(),
'disabled_reason': fields.StringField(nullable=True),
'last_seen_up': fields.DateTimeField(nullable=True),
'forced_down': fields.BooleanField(),
'report_count': fields.IntegerField(),
}
@staticmethod
def _from_db_object(magnum_service, db_magnum_service):
"""Converts a database entity to a formal object."""
for field in magnum_service.fields:
setattr(magnum_service, field, db_magnum_service[field])
magnum_service.obj_reset_changes()
return magnum_service
@staticmethod
def _from_db_object_list(db_objects, cls, context):
"""Converts a list of database entities to a list of formal objects."""
return [MagnumService._from_db_object(cls(context), obj)
for obj in db_objects]
@base.remotable_classmethod
def get_by_host_and_binary(cls, context, host, binary):
"""Find a magnum_service based on its hostname and binary.
:param host: The host on which the binary is running.
:param binary: The name of the binary.
:param context: Security context.
:returns: a :class:`MagnumService` object.
"""
db_magnum_service = cls.dbapi.get_magnum_service_by_host_and_binary(
host, binary)
if db_magnum_service is None:
return None
magnum_service = MagnumService._from_db_object(
cls(context), db_magnum_service)
return magnum_service
@base.remotable_classmethod
def list(cls, context, limit=None, marker=None,
sort_key=None, sort_dir=None):
"""Return a list of MagnumService objects.
:param context: Security context.
:param limit: maximum number of resources to return in a single result.
:param marker: pagination marker for large data sets.
:param sort_key: column to sort results by.
:param sort_dir: direction to sort. "asc" or "desc".
:returns: a list of :class:`MagnumService` object.
"""
db_magnum_services = cls.dbapi.get_magnum_service_list(
limit=limit, marker=marker, sort_key=sort_key,
sort_dir=sort_dir)
return MagnumService._from_db_object_list(db_magnum_services, cls,
context)
@base.remotable
def create(self, context=None):
"""Create a MagnumService record in the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: MagnumService(context)
"""
values = self.obj_get_changes()
db_magnum_service = self.dbapi.create_magnum_service(values)
self._from_db_object(self, db_magnum_service)
@base.remotable
def destroy(self, context=None):
"""Delete the MagnumService from the DB.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: MagnumService(context)
"""
self.dbapi.destroy_magnum_service(self.id)
self.obj_reset_changes()
@base.remotable
def save(self, context=None):
"""Save updates to this MagnumService.
Updates will be made column by column based on the result
of self.what_changed().
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: MagnumService(context)
"""
updates = self.obj_get_changes()
self.dbapi.update_magnum_service(self.id, updates)
self.obj_reset_changes()
@base.remotable
def report_state_up(self, context=None):
"""Touching the magnum_service record to show aliveness.
:param context: Security context. NOTE: This should only
be used internally by the indirection_api.
Unfortunately, RPC requires context as the first
argument, even though we don't use it.
A context should be set when instantiating the
object, e.g.: MagnumService(context)
"""
self.report_count += 1
self.save()
| apache-2.0 |
laszlocsomor/tensorflow | tensorflow/python/keras/applications/vgg19/__init__.py | 74 | 1127 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""VGG19 Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.applications.vgg19 import decode_predictions
from tensorflow.python.keras._impl.keras.applications.vgg19 import preprocess_input
from tensorflow.python.keras._impl.keras.applications.vgg19 import VGG19
del absolute_import
del division
del print_function
| apache-2.0 |
Matt-Deacalion/django | django/contrib/contenttypes/migrations/0002_remove_content_type_name.py | 582 | 1168 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={'model_name': 'contenttype'},
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/contrib/learn/python/learn/tests/dataframe/batch_test.py | 33 | 2344 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for learn.dataframe.transforms.batch."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.dataframe.transforms import batch
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
class BatchTestCase(tf.test.TestCase):
"""Test class for Batch transform."""
def testBatch(self):
initial_batch_size = 7
final_batch_size = 13
iterations = 50
numpy_cols = in_memory_source.NumpySource(np.arange(1000, 2000),
batch_size=initial_batch_size)()
index_column = numpy_cols.index
value_column = numpy_cols.value
batcher = batch.Batch(batch_size=final_batch_size,
output_names=["index", "value"])
batched = batcher([index_column, value_column])
cache = {}
index_tensor = batched.index.build(cache)
value_tensor = batched.value.build(cache)
with self.test_session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = range(i * final_batch_size, (i + 1) * final_batch_size)
expected_value = range(1000 + i * final_batch_size, 1000 +
(i + 1) * final_batch_size)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v6/googleads-py/tests/unit/gapic/googleads.v6/services/test_customer_manager_link_service.py | 1 | 40962 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v6.enums.types import manager_link_status
from google.ads.googleads.v6.resources.types import customer_manager_link
from google.ads.googleads.v6.services.services.customer_manager_link_service import CustomerManagerLinkServiceClient
from google.ads.googleads.v6.services.services.customer_manager_link_service import transports
from google.ads.googleads.v6.services.types import customer_manager_link_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(None) is None
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CustomerManagerLinkServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_customer_manager_link_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CustomerManagerLinkServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_customer_manager_link_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CustomerManagerLinkServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CustomerManagerLinkServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_customer_manager_link_service_client_get_transport_class():
transport = CustomerManagerLinkServiceClient.get_transport_class()
assert transport == transports.CustomerManagerLinkServiceGrpcTransport
transport = CustomerManagerLinkServiceClient.get_transport_class("grpc")
assert transport == transports.CustomerManagerLinkServiceGrpcTransport
@mock.patch.object(CustomerManagerLinkServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomerManagerLinkServiceClient))
def test_customer_manager_link_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.CustomerManagerLinkServiceClient.get_transport_class') as gtc:
transport = transports.CustomerManagerLinkServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CustomerManagerLinkServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.CustomerManagerLinkServiceClient.get_transport_class') as gtc:
client = CustomerManagerLinkServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CustomerManagerLinkServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CustomerManagerLinkServiceClient()
@mock.patch.object(CustomerManagerLinkServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomerManagerLinkServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_customer_manager_link_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_customer_manager_link_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomerManagerLinkServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_customer_manager_link(transport: str = 'grpc', request_type=customer_manager_link_service.GetCustomerManagerLinkRequest):
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_customer_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link.CustomerManagerLink(
resource_name='resource_name_value',
manager_customer='manager_customer_value',
manager_link_id=1556,
status=manager_link_status.ManagerLinkStatusEnum.ManagerLinkStatus.UNKNOWN,
)
response = client.get_customer_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == customer_manager_link_service.GetCustomerManagerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customer_manager_link.CustomerManagerLink)
assert response.resource_name == 'resource_name_value'
assert response.manager_customer == 'manager_customer_value'
assert response.manager_link_id == 1556
assert response.status == manager_link_status.ManagerLinkStatusEnum.ManagerLinkStatus.UNKNOWN
def test_get_customer_manager_link_from_dict():
test_get_customer_manager_link(request_type=dict)
def test_get_customer_manager_link_field_headers():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = customer_manager_link_service.GetCustomerManagerLinkRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_customer_manager_link),
'__call__') as call:
call.return_value = customer_manager_link.CustomerManagerLink()
client.get_customer_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_customer_manager_link_flattened():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_customer_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link.CustomerManagerLink()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_customer_manager_link(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_customer_manager_link_flattened_error():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_customer_manager_link(
customer_manager_link_service.GetCustomerManagerLinkRequest(),
resource_name='resource_name_value',
)
def test_mutate_customer_manager_link(transport: str = 'grpc', request_type=customer_manager_link_service.MutateCustomerManagerLinkRequest):
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_customer_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link_service.MutateCustomerManagerLinkResponse(
)
response = client.mutate_customer_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == customer_manager_link_service.MutateCustomerManagerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customer_manager_link_service.MutateCustomerManagerLinkResponse)
def test_mutate_customer_manager_link_from_dict():
test_mutate_customer_manager_link(request_type=dict)
def test_mutate_customer_manager_link_field_headers():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = customer_manager_link_service.MutateCustomerManagerLinkRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_customer_manager_link),
'__call__') as call:
call.return_value = customer_manager_link_service.MutateCustomerManagerLinkResponse()
client.mutate_customer_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_customer_manager_link_flattened():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_customer_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link_service.MutateCustomerManagerLinkResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_customer_manager_link(
customer_id='customer_id_value',
operations=[customer_manager_link_service.CustomerManagerLinkOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [customer_manager_link_service.CustomerManagerLinkOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_customer_manager_link_flattened_error():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_customer_manager_link(
customer_manager_link_service.MutateCustomerManagerLinkRequest(),
customer_id='customer_id_value',
operations=[customer_manager_link_service.CustomerManagerLinkOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_move_manager_link(transport: str = 'grpc', request_type=customer_manager_link_service.MoveManagerLinkRequest):
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link_service.MoveManagerLinkResponse(
resource_name='resource_name_value',
)
response = client.move_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == customer_manager_link_service.MoveManagerLinkRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, customer_manager_link_service.MoveManagerLinkResponse)
assert response.resource_name == 'resource_name_value'
def test_move_manager_link_from_dict():
test_move_manager_link(request_type=dict)
def test_move_manager_link_field_headers():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = customer_manager_link_service.MoveManagerLinkRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_manager_link),
'__call__') as call:
call.return_value = customer_manager_link_service.MoveManagerLinkResponse()
client.move_manager_link(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_move_manager_link_flattened():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.move_manager_link),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = customer_manager_link_service.MoveManagerLinkResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.move_manager_link(
customer_id='customer_id_value',
previous_customer_manager_link='previous_customer_manager_link_value',
new_manager='new_manager_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].previous_customer_manager_link == 'previous_customer_manager_link_value'
assert args[0].new_manager == 'new_manager_value'
def test_move_manager_link_flattened_error():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.move_manager_link(
customer_manager_link_service.MoveManagerLinkRequest(),
customer_id='customer_id_value',
previous_customer_manager_link='previous_customer_manager_link_value',
new_manager='new_manager_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CustomerManagerLinkServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomerManagerLinkServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CustomerManagerLinkServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomerManagerLinkServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CustomerManagerLinkServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CustomerManagerLinkServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_customer_manager_link_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CustomerManagerLinkServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_customer_manager_link',
'mutate_customer_manager_link',
'move_manager_link',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_customer_manager_link_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v6.services.services.customer_manager_link_service.transports.CustomerManagerLinkServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CustomerManagerLinkServiceTransport()
adc.assert_called_once()
def test_customer_manager_link_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CustomerManagerLinkServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_customer_manager_link_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CustomerManagerLinkServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_customer_manager_link_service_host_no_port():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_customer_manager_link_service_host_with_port():
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_customer_manager_link_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CustomerManagerLinkServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CustomerManagerLinkServiceGrpcTransport])
def test_customer_manager_link_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CustomerManagerLinkServiceGrpcTransport,])
def test_customer_manager_link_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_customer_path():
customer_id = "squid"
expected = "customers/{customer_id}".format(customer_id=customer_id, )
actual = CustomerManagerLinkServiceClient.customer_path(customer_id)
assert expected == actual
def test_parse_customer_path():
expected = {
"customer_id": "clam",
}
path = CustomerManagerLinkServiceClient.customer_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_customer_path(path)
assert expected == actual
def test_customer_manager_link_path():
customer_id = "whelk"
manager_customer_id = "octopus"
manager_link_id = "oyster"
expected = "customers/{customer_id}/customerManagerLinks/{manager_customer_id}~{manager_link_id}".format(customer_id=customer_id, manager_customer_id=manager_customer_id, manager_link_id=manager_link_id, )
actual = CustomerManagerLinkServiceClient.customer_manager_link_path(customer_id, manager_customer_id, manager_link_id)
assert expected == actual
def test_parse_customer_manager_link_path():
expected = {
"customer_id": "nudibranch",
"manager_customer_id": "cuttlefish",
"manager_link_id": "mussel",
}
path = CustomerManagerLinkServiceClient.customer_manager_link_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_customer_manager_link_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CustomerManagerLinkServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = CustomerManagerLinkServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = CustomerManagerLinkServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = CustomerManagerLinkServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = CustomerManagerLinkServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = CustomerManagerLinkServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = CustomerManagerLinkServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = CustomerManagerLinkServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CustomerManagerLinkServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = CustomerManagerLinkServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CustomerManagerLinkServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CustomerManagerLinkServiceTransport, '_prep_wrapped_messages') as prep:
client = CustomerManagerLinkServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CustomerManagerLinkServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CustomerManagerLinkServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
joshwatson/binaryninja-api | python/examples/kaitai/shapefile_main.py | 1 | 43907 | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from .kaitaistruct import __version__ as ks_version, KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
import collections
if parse_version(ks_version) < parse_version('0.7'):
raise Exception("Incompatible Kaitai Struct Python API: 0.7 or later is required, but you have %s" % (ks_version))
class ShapefileMain(KaitaiStruct):
class ShapeType(Enum):
null_shape = 0
point = 1
poly_line = 3
polygon = 5
multi_point = 8
point_z = 11
poly_line_z = 13
polygon_z = 15
multi_point_z = 18
point_m = 21
poly_line_m = 23
polygon_m = 25
multi_point_m = 28
multi_patch = 31
class PartType(Enum):
triangle_strip = 0
triangle_fan = 1
outer_ring = 2
inner_ring = 3
first_ring = 4
ring = 5
SEQ_FIELDS = ["header", "records"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.FileHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['records']['start'] = self._io.pos()
self.records = []
i = 0
while not self._io.is_eof():
if not 'arr' in self._debug['records']:
self._debug['records']['arr'] = []
self._debug['records']['arr'].append({'start': self._io.pos()})
_t_records = self._root.Record(self._io, self, self._root)
_t_records._read()
self.records.append(_t_records)
self._debug['records']['arr'][len(self.records) - 1]['end'] = self._io.pos()
i += 1
self._debug['records']['end'] = self._io.pos()
class MultiPointM(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_points", "points", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class BoundingBoxXYZM(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z", "m"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._root.BoundsMinMax(self._io, self, self._root)
self.x._read()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._root.BoundsMinMax(self._io, self, self._root)
self.y._read()
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._root.BoundsMinMax(self._io, self, self._root)
self.z._read()
self._debug['z']['end'] = self._io.pos()
self._debug['m']['start'] = self._io.pos()
self.m = self._root.BoundsMinMax(self._io, self, self._root)
self.m._read()
self._debug['m']['end'] = self._io.pos()
class Point(KaitaiStruct):
SEQ_FIELDS = ["x", "y"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_f8le()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_f8le()
self._debug['y']['end'] = self._io.pos()
class Polygon(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
class BoundsMinMax(KaitaiStruct):
SEQ_FIELDS = ["min", "max"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['min']['start'] = self._io.pos()
self.min = self._io.read_f8le()
self._debug['min']['end'] = self._io.pos()
self._debug['max']['start'] = self._io.pos()
self.max = self._io.read_f8le()
self._debug['max']['end'] = self._io.pos()
class PolyLine(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
class MultiPointZ(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_points", "points", "z_range", "z_values", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['z_range']['start'] = self._io.pos()
self.z_range = self._root.BoundsMinMax(self._io, self, self._root)
self.z_range._read()
self._debug['z_range']['end'] = self._io.pos()
self._debug['z_values']['start'] = self._io.pos()
self.z_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['z_values']:
self._debug['z_values']['arr'] = []
self._debug['z_values']['arr'].append({'start': self._io.pos()})
self.z_values[i] = self._io.read_f8le()
self._debug['z_values']['arr'][i]['end'] = self._io.pos()
self._debug['z_values']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class PolyLineZ(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points", "z_range", "z_values", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['z_range']['start'] = self._io.pos()
self.z_range = self._root.BoundsMinMax(self._io, self, self._root)
self.z_range._read()
self._debug['z_range']['end'] = self._io.pos()
self._debug['z_values']['start'] = self._io.pos()
self.z_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['z_values']:
self._debug['z_values']['arr'] = []
self._debug['z_values']['arr'].append({'start': self._io.pos()})
self.z_values[i] = self._io.read_f8le()
self._debug['z_values']['arr'][i]['end'] = self._io.pos()
self._debug['z_values']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class PolygonZ(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points", "z_range", "z_values", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['z_range']['start'] = self._io.pos()
self.z_range = self._root.BoundsMinMax(self._io, self, self._root)
self.z_range._read()
self._debug['z_range']['end'] = self._io.pos()
self._debug['z_values']['start'] = self._io.pos()
self.z_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['z_values']:
self._debug['z_values']['arr'] = []
self._debug['z_values']['arr'].append({'start': self._io.pos()})
self.z_values[i] = self._io.read_f8le()
self._debug['z_values']['arr'][i]['end'] = self._io.pos()
self._debug['z_values']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class BoundingBoxXY(KaitaiStruct):
SEQ_FIELDS = ["x", "y"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._root.BoundsMinMax(self._io, self, self._root)
self.x._read()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._root.BoundsMinMax(self._io, self, self._root)
self.y._read()
self._debug['y']['end'] = self._io.pos()
class PointM(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "m"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_f8le()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_f8le()
self._debug['y']['end'] = self._io.pos()
self._debug['m']['start'] = self._io.pos()
self.m = self._io.read_f8le()
self._debug['m']['end'] = self._io.pos()
class PolygonM(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class RecordHeader(KaitaiStruct):
SEQ_FIELDS = ["record_number", "content_length"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['record_number']['start'] = self._io.pos()
self.record_number = self._io.read_s4be()
self._debug['record_number']['end'] = self._io.pos()
self._debug['content_length']['start'] = self._io.pos()
self.content_length = self._io.read_s4be()
self._debug['content_length']['end'] = self._io.pos()
class MultiPoint(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_points", "points"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
class FileHeader(KaitaiStruct):
SEQ_FIELDS = ["file_code", "unused_field_1", "unused_field_2", "unused_field_3", "unused_field_4", "unused_field_5", "file_length", "version", "shape_type", "bounding_box"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['file_code']['start'] = self._io.pos()
self.file_code = self._io.ensure_fixed_contents(b"\x00\x00\x27\x0A")
self._debug['file_code']['end'] = self._io.pos()
self._debug['unused_field_1']['start'] = self._io.pos()
self.unused_field_1 = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['unused_field_1']['end'] = self._io.pos()
self._debug['unused_field_2']['start'] = self._io.pos()
self.unused_field_2 = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['unused_field_2']['end'] = self._io.pos()
self._debug['unused_field_3']['start'] = self._io.pos()
self.unused_field_3 = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['unused_field_3']['end'] = self._io.pos()
self._debug['unused_field_4']['start'] = self._io.pos()
self.unused_field_4 = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['unused_field_4']['end'] = self._io.pos()
self._debug['unused_field_5']['start'] = self._io.pos()
self.unused_field_5 = self._io.ensure_fixed_contents(b"\x00\x00\x00\x00")
self._debug['unused_field_5']['end'] = self._io.pos()
self._debug['file_length']['start'] = self._io.pos()
self.file_length = self._io.read_s4be()
self._debug['file_length']['end'] = self._io.pos()
self._debug['version']['start'] = self._io.pos()
self.version = self._io.ensure_fixed_contents(b"\xE8\x03\x00\x00")
self._debug['version']['end'] = self._io.pos()
self._debug['shape_type']['start'] = self._io.pos()
self.shape_type = KaitaiStream.resolve_enum(self._root.ShapeType, self._io.read_s4le())
self._debug['shape_type']['end'] = self._io.pos()
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXYZM(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
class PointZ(KaitaiStruct):
SEQ_FIELDS = ["x", "y", "z", "m"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['x']['start'] = self._io.pos()
self.x = self._io.read_f8le()
self._debug['x']['end'] = self._io.pos()
self._debug['y']['start'] = self._io.pos()
self.y = self._io.read_f8le()
self._debug['y']['end'] = self._io.pos()
self._debug['z']['start'] = self._io.pos()
self.z = self._io.read_f8le()
self._debug['z']['end'] = self._io.pos()
self._debug['m']['start'] = self._io.pos()
self.m = self._io.read_f8le()
self._debug['m']['end'] = self._io.pos()
class Record(KaitaiStruct):
SEQ_FIELDS = ["header", "contents"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['header']['start'] = self._io.pos()
self.header = self._root.RecordHeader(self._io, self, self._root)
self.header._read()
self._debug['header']['end'] = self._io.pos()
self._debug['contents']['start'] = self._io.pos()
self.contents = self._root.RecordContents(self._io, self, self._root)
self.contents._read()
self._debug['contents']['end'] = self._io.pos()
class RecordContents(KaitaiStruct):
SEQ_FIELDS = ["shape_type", "shape_parameters"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['shape_type']['start'] = self._io.pos()
self.shape_type = KaitaiStream.resolve_enum(self._root.ShapeType, self._io.read_s4le())
self._debug['shape_type']['end'] = self._io.pos()
if self.shape_type != self._root.ShapeType.null_shape:
self._debug['shape_parameters']['start'] = self._io.pos()
_on = self.shape_type
if _on == self._root.ShapeType.poly_line_z:
self.shape_parameters = self._root.PolyLineZ(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.multi_patch:
self.shape_parameters = self._root.MultiPatch(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.poly_line_m:
self.shape_parameters = self._root.PolyLineM(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.polygon:
self.shape_parameters = self._root.Polygon(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.polygon_z:
self.shape_parameters = self._root.PolygonZ(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.point_z:
self.shape_parameters = self._root.PointZ(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.poly_line:
self.shape_parameters = self._root.PolyLine(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.point_m:
self.shape_parameters = self._root.PointM(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.polygon_m:
self.shape_parameters = self._root.PolygonM(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.multi_point:
self.shape_parameters = self._root.MultiPoint(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.point:
self.shape_parameters = self._root.Point(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.multi_point_m:
self.shape_parameters = self._root.MultiPointM(self._io, self, self._root)
self.shape_parameters._read()
elif _on == self._root.ShapeType.multi_point_z:
self.shape_parameters = self._root.MultiPointZ(self._io, self, self._root)
self.shape_parameters._read()
self._debug['shape_parameters']['end'] = self._io.pos()
class MultiPatch(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "part_types", "points", "z_range", "z_values", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['part_types']['start'] = self._io.pos()
self.part_types = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['part_types']:
self._debug['part_types']['arr'] = []
self._debug['part_types']['arr'].append({'start': self._io.pos()})
self.part_types[i] = KaitaiStream.resolve_enum(self._root.PartType, self._io.read_s4le())
self._debug['part_types']['arr'][i]['end'] = self._io.pos()
self._debug['part_types']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['z_range']['start'] = self._io.pos()
self.z_range = self._root.BoundsMinMax(self._io, self, self._root)
self.z_range._read()
self._debug['z_range']['end'] = self._io.pos()
self._debug['z_values']['start'] = self._io.pos()
self.z_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['z_values']:
self._debug['z_values']['arr'] = []
self._debug['z_values']['arr'].append({'start': self._io.pos()})
self.z_values[i] = self._io.read_f8le()
self._debug['z_values']['arr'][i]['end'] = self._io.pos()
self._debug['z_values']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
class PolyLineM(KaitaiStruct):
SEQ_FIELDS = ["bounding_box", "number_of_parts", "number_of_points", "parts", "points", "m_range", "m_values"]
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._debug = collections.defaultdict(dict)
def _read(self):
self._debug['bounding_box']['start'] = self._io.pos()
self.bounding_box = self._root.BoundingBoxXY(self._io, self, self._root)
self.bounding_box._read()
self._debug['bounding_box']['end'] = self._io.pos()
self._debug['number_of_parts']['start'] = self._io.pos()
self.number_of_parts = self._io.read_s4le()
self._debug['number_of_parts']['end'] = self._io.pos()
self._debug['number_of_points']['start'] = self._io.pos()
self.number_of_points = self._io.read_s4le()
self._debug['number_of_points']['end'] = self._io.pos()
self._debug['parts']['start'] = self._io.pos()
self.parts = [None] * (self.number_of_parts)
for i in range(self.number_of_parts):
if not 'arr' in self._debug['parts']:
self._debug['parts']['arr'] = []
self._debug['parts']['arr'].append({'start': self._io.pos()})
self.parts[i] = self._io.read_s4le()
self._debug['parts']['arr'][i]['end'] = self._io.pos()
self._debug['parts']['end'] = self._io.pos()
self._debug['points']['start'] = self._io.pos()
self.points = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['points']:
self._debug['points']['arr'] = []
self._debug['points']['arr'].append({'start': self._io.pos()})
_t_points = self._root.Point(self._io, self, self._root)
_t_points._read()
self.points[i] = _t_points
self._debug['points']['arr'][i]['end'] = self._io.pos()
self._debug['points']['end'] = self._io.pos()
self._debug['m_range']['start'] = self._io.pos()
self.m_range = self._root.BoundsMinMax(self._io, self, self._root)
self.m_range._read()
self._debug['m_range']['end'] = self._io.pos()
self._debug['m_values']['start'] = self._io.pos()
self.m_values = [None] * (self.number_of_points)
for i in range(self.number_of_points):
if not 'arr' in self._debug['m_values']:
self._debug['m_values']['arr'] = []
self._debug['m_values']['arr'].append({'start': self._io.pos()})
self.m_values[i] = self._io.read_f8le()
self._debug['m_values']['arr'][i]['end'] = self._io.pos()
self._debug['m_values']['end'] = self._io.pos()
| mit |
bzero/statsmodels | statsmodels/regression/tests/test_regression.py | 18 | 38246 | """
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal, assert_,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lcccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[0.025} & \\textbf{0.975]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 & 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 & 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 & -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 & -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 & 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 & 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 & -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
assert_equal(result.bse, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:int(n), 0]
exog = data[0:int(n), 1:(int(p)+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
def test_fvalue_implicit_constant():
nobs = 100
np.random.seed(2)
x = np.random.randn(nobs, 1)
x = ((x > 0) == [True, False]).astype(int)
y = x.sum(1) + np.random.randn(nobs)
w = 1 + 0.25 * np.random.rand(nobs)
from statsmodels.regression.linear_model import OLS, WLS
res = OLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
res = WLS(y, x).fit(cov_type='HC1')
assert_(np.isnan(res.fvalue))
assert_(np.isnan(res.f_pvalue))
res.summary()
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
| bsd-3-clause |
bayesimpact/bob-emploi | frontend/server/modules/test/create_your_company_test.py | 1 | 9054 | """Unit tests for the module create-your-company."""
import datetime
import unittest
from unittest import mock
from bob_emploi.common.python import now
from bob_emploi.frontend.api import user_pb2
from bob_emploi.frontend.server.test import base_test
from bob_emploi.frontend.server.test import scoring_test
class AdviceCreateYourCompanyTestCase(scoring_test.ScoringModelTestBase):
"""Unit tests for the "Create your company" scoring model."""
model_id = 'advice-create-your-company'
def test_atypic_profile(self) -> None:
"""Test the scoring function before the events with an atypic profile."""
persona = self._random_persona().clone()
persona.user_profile.frustrations.append(user_pb2.ATYPIC_PROFILE)
score = self._score_persona(persona)
self.assertEqual(2, score, msg=f'Fail for "{persona.name}"')
def test_not_really_needed_yet(self) -> None:
"""Test the scoring function for someone that has just started their search."""
persona = self._random_persona().clone()
self.now = datetime.datetime(2018, 1, 25)
del persona.user_profile.frustrations[:]
persona.project.job_search_has_not_started = False
persona.project.job_search_started_at.FromDatetime(datetime.datetime(2018, 12, 14))
score = self._score_persona(persona)
self.assertEqual(1, score, msg=f'Fail for "{persona.name}"')
class EndpointTestCase(base_test.ServerTestCase):
"""Unit tests for the project/.../create-your-company endpoint."""
def setUp(self) -> None:
super().setUp()
self._db.advice_modules.insert_one({
'adviceId': 'create-your-company',
'triggerScoringModel': 'advice-create-your-company',
'extraDataFieldName': 'create_your_company_data',
'isReadyForProd': True,
})
def test_close_to_city_with_events(self) -> None:
"""Test close to a city with multiple events."""
self._db.cities.insert_one({
'_id': '69266',
'latitude': 45.7667,
'longitude': 4.88333,
})
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Work as a freelance',
'cityName': 'Lyon',
},
{
'title': 'Entrepreneur in Paris',
'cityName': 'Paris',
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual('Lyon', data.get('closeByEvents', {}).get('city'))
self.assertEqual(
['Create your company', 'Work as a freelance'],
[event.get('title') for event in data.get('closeByEvents', {}).get('events')])
def test_related_testimonials(self) -> None:
"""Test when testimonials related to the user's project exist."""
self._db.adie_testimonials.insert_many([
{
'author_name': 'Bob',
'author_job_name': 'coach',
'link': 'www.here.org',
'image_link': 'www.image.org',
'description': 'I will help you',
'filters': [],
'preferred_job_group_ids': ['A1', 'B2'],
},
{
'author_name': 'Bill',
'author_job_name': 'witch',
'link': 'www.away.org',
'image_link': 'www.no-image.org',
'description': 'I will put a spell on you',
'filters': [],
'preferred_job_group_ids': ['A2', 'B1'],
},
{
'author_name': 'Lola',
'author_job_name': 'driver',
'link': 'www.there.org',
'image_link': 'www.this-image.org',
'description': 'I will try to help you',
'filters': [],
'preferred_job_group_ids': ['A12', 'B3'],
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"targetJob": {"jobGroup": {"romeId": "A1234"}}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual(2, len(data.get('relatedTestimonials', []).get('testimonials', [])))
self.assertEqual(
['Bob', 'Lola'],
[testimonial.get('authorName') for testimonial in data.get(
'relatedTestimonials', []).get('testimonials', [])])
def test_far_from_any_city_with_events(self) -> None:
"""Test far from any city with events."""
self._db.cities.insert_one({
'_id': '67462',
# Sélestat: closer to Dijon than to Lyon.
'latitude': 48.2667,
'longitude': 7.45,
})
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Entrepreneur in Dijon',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "67462"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
['Entrepreneur in Dijon', 'Create your company'],
[event.get('title') for event in data.get('closeByEvents', {}).get('events')])
def test_no_location(self) -> None:
"""Test city without no coordinates."""
self._db.adie_events.insert_many([
{
'title': 'Create your company',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
},
{
'title': 'Entrepreneur in Dijon',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
{'Entrepreneur in Dijon', 'Create your company'},
{event.get('title') for event in data.get('closeByEvents', {}).get('events')})
def test_no_events(self) -> None:
"""Test without any events."""
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertFalse(data.get('closeByEvents'))
@mock.patch(now.__name__ + '.get', mock.MagicMock(
return_value=datetime.datetime(2018, 5, 9)))
def test_start_date(self) -> None:
"""Test events with start dates."""
self._db.adie_events.insert_many([
{
'title': 'Past date',
'cityName': 'Lyon',
'latitude': 45.7589,
'longitude': 4.84139,
'startDate': '2018-05-02',
},
{
'title': 'No date',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
},
{
'title': 'Today',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
'startDate': '2018-05-09',
},
{
'title': 'Future date',
'cityName': 'Dijon',
'latitude': 47.322047,
'longitude': 5.04148,
'startDate': '2018-06-01',
},
])
response = self.app.post(
'/api/advice/create-your-company',
data='{"projects": [{"city": {"cityId": "69266"}}]}',
content_type='application/json')
data = self.json_from_response(response)
self.assertEqual({'closeByEvents'}, data.keys())
self.assertEqual(
{'No date', 'Today', 'Future date'},
{event.get('title') for event in data.get('closeByEvents', {}).get('events')})
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
MoRgUiJu/morguiju.repo | plugin.video.pelisalacarta/core/downloadtools.py | 2 | 51361 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# --------------------------------------------------------------------------------
# Download Tools - Original based from code of VideoMonkey XBMC Plugin
#---------------------------------------------------------------------------------
import os.path
import re
import socket
import sys
import time
import urllib
import urllib2
import config
import logger
entitydefs = {
'AElig': u'\u00C6', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1'
'Aacute': u'\u00C1', # latin capital letter A with acute, U+00C1 ISOlat1'
'Acirc': u'\u00C2', # latin capital letter A with circumflex, U+00C2 ISOlat1'
'Agrave': u'\u00C0', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1'
'Alpha': u'\u0391', # greek capital letter alpha, U+0391'
'Aring': u'\u00C5', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1'
'Atilde': u'\u00C3', # latin capital letter A with tilde, U+00C3 ISOlat1'
'Auml': u'\u00C4', # latin capital letter A with diaeresis, U+00C4 ISOlat1'
'Beta': u'\u0392', # greek capital letter beta, U+0392'
'Ccedil': u'\u00C7', # latin capital letter C with cedilla, U+00C7 ISOlat1'
'Chi': u'\u03A7', # greek capital letter chi, U+03A7'
'Dagger': u'\u2021', # double dagger, U+2021 ISOpub'
'Delta': u'\u0394', # greek capital letter delta, U+0394 ISOgrk3'
'ETH': u'\u00D0', # latin capital letter ETH, U+00D0 ISOlat1'
'Eacute': u'\u00C9', # latin capital letter E with acute, U+00C9 ISOlat1'
'Ecirc': u'\u00CA', # latin capital letter E with circumflex, U+00CA ISOlat1'
'Egrave': u'\u00C8', # latin capital letter E with grave, U+00C8 ISOlat1'
'Epsilon': u'\u0395', # grek capital letter epsilon, U+0395'
'Eta': u'\u0397', # greek capital letter eta, U+0397'
'Euml': u'\u00CB', # latin capital letter E with diaeresis, U+00CB ISOlat1'
'Gamma': u'\u0393', # greek capital letter gamma, U+0393 ISOgrk3'
'Iacute': u'\u00CD', # latin capital letter I with acute, U+00CD ISOlat1'
'Icirc': u'\u00CE', # latin capital letter I with circumflex, U+00CE ISOlat1'
'Igrave': u'\u00CC', # latin capital letter I with grave, U+00CC ISOlat1'
'Iota': u'\u0399', # greek capital letter iota, U+0399'
'Iuml': u'\u00CF', # latin capital letter I with diaeresis, U+00CF ISOlat1'
'Kappa': u'\u039A', # greek capital letter kappa, U+039A'
'Lambda': u'\u039B', # greek capital letter lambda, U+039B ISOgrk3'
'Mu': u'\u039C', # greek capital letter mu, U+039C'
'Ntilde': u'\u00D1', # latin capital letter N with tilde, U+00D1 ISOlat1'
'Nu': u'\u039D', # greek capital letter nu, U+039D'
'OElig': u'\u0152', # latin capital ligature OE, U+0152 ISOlat2'
'Oacute': u'\u00D3', # latin capital letter O with acute, U+00D3 ISOlat1'
'Ocirc': u'\u00D4', # latin capital letter O with circumflex, U+00D4 ISOlat1'
'Ograve': u'\u00D2', # latin capital letter O with grave, U+00D2 ISOlat1'
'Omega': u'\u03A9', # greek capital letter omega, U+03A9 ISOgrk3'
'Omicron': u'\u039F', # greek capital letter omicron, U+039F'
'Oslash': u'\u00D8', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1'
'Otilde': u'\u00D5', # latin capital letter O with tilde, U+00D5 ISOlat1'
'Ouml': u'\u00D6', # latin capital letter O with diaeresis, U+00D6 ISOlat1'
'Phi': u'\u03A6', # greek capital letter phi, U+03A6 ISOgrk3'
'Pi': u'\u03A0', # greek capital letter pi, U+03A0 ISOgrk3'
'Prime': u'\u2033', # double prime = seconds = inches, U+2033 ISOtech'
'Psi': u'\u03A8', # greek capital letter psi, U+03A8 ISOgrk3'
'Rho': u'\u03A1', # greek capital letter rho, U+03A1'
'Scaron': u'\u0160', # latin capital letter S with caron, U+0160 ISOlat2'
'Sigma': u'\u03A3', # greek capital letter sigma, U+03A3 ISOgrk3'
'THORN': u'\u00DE', # latin capital letter THORN, U+00DE ISOlat1'
'Tau': u'\u03A4', # greek capital letter tau, U+03A4'
'Theta': u'\u0398', # greek capital letter theta, U+0398 ISOgrk3'
'Uacute': u'\u00DA', # latin capital letter U with acute, U+00DA ISOlat1'
'Ucirc': u'\u00DB', # latin capital letter U with circumflex, U+00DB ISOlat1'
'Ugrave': u'\u00D9', # latin capital letter U with grave, U+00D9 ISOlat1'
'Upsilon': u'\u03A5', # greek capital letter upsilon, U+03A5 ISOgrk3'
'Uuml': u'\u00DC', # latin capital letter U with diaeresis, U+00DC ISOlat1'
'Xi': u'\u039E', # greek capital letter xi, U+039E ISOgrk3'
'Yacute': u'\u00DD', # latin capital letter Y with acute, U+00DD ISOlat1'
'Yuml': u'\u0178', # latin capital letter Y with diaeresis, U+0178 ISOlat2'
'Zeta': u'\u0396', # greek capital letter zeta, U+0396'
'aacute': u'\u00E1', # latin small letter a with acute, U+00E1 ISOlat1'
'acirc': u'\u00E2', # latin small letter a with circumflex, U+00E2 ISOlat1'
'acute': u'\u00B4', # acute accent = spacing acute, U+00B4 ISOdia'
'aelig': u'\u00E6', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1'
'agrave': u'\u00E0', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1'
'alefsym': u'\u2135', # alef symbol = first transfinite cardinal, U+2135 NEW'
'alpha': u'\u03B1', # greek small letter alpha, U+03B1 ISOgrk3'
'amp': u'\u0026', # ampersand, U+0026 ISOnum'
'and': u'\u2227', # logical and = wedge, U+2227 ISOtech'
'ang': u'\u2220', # angle, U+2220 ISOamso'
'aring': u'\u00E5', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1'
'asymp': u'\u2248', # almost equal to = asymptotic to, U+2248 ISOamsr'
'atilde': u'\u00E3', # latin small letter a with tilde, U+00E3 ISOlat1'
'auml': u'\u00E4', # latin small letter a with diaeresis, U+00E4 ISOlat1'
'bdquo': u'\u201E', # double low-9 quotation mark, U+201E NEW'
'beta': u'\u03B2', # greek small letter beta, U+03B2 ISOgrk3'
'brvbar': u'\u00A6', # broken bar = broken vertical bar, U+00A6 ISOnum'
'bull': u'\u2022', # bullet = black small circle, U+2022 ISOpub'
'cap': u'\u2229', # intersection = cap, U+2229 ISOtech'
'ccedil': u'\u00E7', # latin small letter c with cedilla, U+00E7 ISOlat1'
'cedil': u'\u00B8', # cedilla = spacing cedilla, U+00B8 ISOdia'
'cent': u'\u00A2', # cent sign, U+00A2 ISOnum'
'chi': u'\u03C7', # greek small letter chi, U+03C7 ISOgrk3'
'circ': u'\u02C6', # modifier letter circumflex accent, U+02C6 ISOpub'
'clubs': u'\u2663', # black club suit = shamrock, U+2663 ISOpub'
'cong': u'\u2245', # approximately equal to, U+2245 ISOtech'
'copy': u'\u00A9', # copyright sign, U+00A9 ISOnum'
'crarr': u'\u21B5', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW'
'cup': u'\u222A', # union = cup, U+222A ISOtech'
'curren': u'\u00A4', # currency sign, U+00A4 ISOnum'
'dArr': u'\u21D3', # downwards double arrow, U+21D3 ISOamsa'
'dagger': u'\u2020', # dagger, U+2020 ISOpub'
'darr': u'\u2193', # downwards arrow, U+2193 ISOnum'
'deg': u'\u00B0', # degree sign, U+00B0 ISOnum'
'delta': u'\u03B4', # greek small letter delta, U+03B4 ISOgrk3'
'diams': u'\u2666', # black diamond suit, U+2666 ISOpub'
'divide': u'\u00F7', # division sign, U+00F7 ISOnum'
'eacute': u'\u00E9', # latin small letter e with acute, U+00E9 ISOlat1'
'ecirc': u'\u00EA', # latin small letter e with circumflex, U+00EA ISOlat1'
'egrave': u'\u00E8', # latin small letter e with grave, U+00E8 ISOlat1'
'empty': u'\u2205', # empty set = null set = diameter, U+2205 ISOamso'
'emsp': u'\u2003', # em space, U+2003 ISOpub'
'ensp': u'\u2002', # en space, U+2002 ISOpub'
'epsilon': u'\u03B5', # greek small letter epsilon, U+03B5 ISOgrk3'
'equiv': u'\u2261', # identical to, U+2261 ISOtech'
'eta': u'\u03B7', # greek small letter eta, U+03B7 ISOgrk3'
'eth': u'\u00F0', # latin small letter eth, U+00F0 ISOlat1'
'euml': u'\u00EB', # latin small letter e with diaeresis, U+00EB ISOlat1'
'euro': u'\u20AC', # euro sign, U+20AC NEW'
'exist': u'\u2203', # there exists, U+2203 ISOtech'
'fnof': u'\u0192', # latin small f with hook = function = florin, U+0192 ISOtech'
'forall': u'\u2200', # for all, U+2200 ISOtech'
'frac12': u'\u00BD', # vulgar fraction one half = fraction one half, U+00BD ISOnum'
'frac14': u'\u00BC', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum'
'frac34': u'\u00BE', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum'
'frasl': u'\u2044', # fraction slash, U+2044 NEW'
'gamma': u'\u03B3', # greek small letter gamma, U+03B3 ISOgrk3'
'ge': u'\u2265', # greater-than or equal to, U+2265 ISOtech'
'gt': u'\u003E', # greater-than sign, U+003E ISOnum'
'hArr': u'\u21D4', # left right double arrow, U+21D4 ISOamsa'
'harr': u'\u2194', # left right arrow, U+2194 ISOamsa'
'hearts': u'\u2665', # black heart suit = valentine, U+2665 ISOpub'
'hellip': u'\u2026', # horizontal ellipsis = three dot leader, U+2026 ISOpub'
'iacute': u'\u00ED', # latin small letter i with acute, U+00ED ISOlat1'
'icirc': u'\u00EE', # latin small letter i with circumflex, U+00EE ISOlat1'
'iexcl': u'\u00A1', # inverted exclamation mark, U+00A1 ISOnum'
'igrave': u'\u00EC', # latin small letter i with grave, U+00EC ISOlat1'
'image': u'\u2111', # blackletter capital I = imaginary part, U+2111 ISOamso'
'infin': u'\u221E', # infinity, U+221E ISOtech'
'int': u'\u222B', # integral, U+222B ISOtech'
'iota': u'\u03B9', # greek small letter iota, U+03B9 ISOgrk3'
'iquest': u'\u00BF', # inverted question mark = turned question mark, U+00BF ISOnum'
'isin': u'\u2208', # element of, U+2208 ISOtech'
'iuml': u'\u00EF', # latin small letter i with diaeresis, U+00EF ISOlat1'
'kappa': u'\u03BA', # greek small letter kappa, U+03BA ISOgrk3'
'lArr': u'\u21D0', # leftwards double arrow, U+21D0 ISOtech'
'lambda': u'\u03BB', # greek small letter lambda, U+03BB ISOgrk3'
'lang': u'\u2329', # left-pointing angle bracket = bra, U+2329 ISOtech'
'laquo': u'\u00AB', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum'
'larr': u'\u2190', # leftwards arrow, U+2190 ISOnum'
'lceil': u'\u2308', # left ceiling = apl upstile, U+2308 ISOamsc'
'ldquo': u'\u201C', # left double quotation mark, U+201C ISOnum'
'le': u'\u2264', # less-than or equal to, U+2264 ISOtech'
'lfloor': u'\u230A', # left floor = apl downstile, U+230A ISOamsc'
'lowast': u'\u2217', # asterisk operator, U+2217 ISOtech'
'loz': u'\u25CA', # lozenge, U+25CA ISOpub'
'lrm': u'\u200E', # left-to-right mark, U+200E NEW RFC 2070'
'lsaquo': u'\u2039', # single left-pointing angle quotation mark, U+2039 ISO proposed'
'lsquo': u'\u2018', # left single quotation mark, U+2018 ISOnum'
'lt': u'\u003C', # less-than sign, U+003C ISOnum'
'macr': u'\u00AF', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia'
'mdash': u'\u2014', # em dash, U+2014 ISOpub'
'micro': u'\u00B5', # micro sign, U+00B5 ISOnum'
'middot': u'\u00B7', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum'
'minus': u'\u2212', # minus sign, U+2212 ISOtech'
'mu': u'\u03BC', # greek small letter mu, U+03BC ISOgrk3'
'nabla': u'\u2207', # nabla = backward difference, U+2207 ISOtech'
'nbsp': u'\u00A0', # no-break space = non-breaking space, U+00A0 ISOnum'
'ndash': u'\u2013', # en dash, U+2013 ISOpub'
'ne': u'\u2260', # not equal to, U+2260 ISOtech'
'ni': u'\u220B', # contains as member, U+220B ISOtech'
'not': u'\u00AC', # not sign, U+00AC ISOnum'
'notin': u'\u2209', # not an element of, U+2209 ISOtech'
'nsub': u'\u2284', # not a subset of, U+2284 ISOamsn'
'ntilde': u'\u00F1', # latin small letter n with tilde, U+00F1 ISOlat1'
'nu': u'\u03BD', # greek small letter nu, U+03BD ISOgrk3'
'oacute': u'\u00F3', # latin small letter o with acute, U+00F3 ISOlat1'
'ocirc': u'\u00F4', # latin small letter o with circumflex, U+00F4 ISOlat1'
'oelig': u'\u0153', # latin small ligature oe, U+0153 ISOlat2'
'ograve': u'\u00F2', # latin small letter o with grave, U+00F2 ISOlat1'
'oline': u'\u203E', # overline = spacing overscore, U+203E NEW'
'omega': u'\u03C9', # greek small letter omega, U+03C9 ISOgrk3'
'omicron': u'\u03BF', # greek small letter omicron, U+03BF NEW'
'oplus': u'\u2295', # circled plus = direct sum, U+2295 ISOamsb'
'or': u'\u2228', # logical or = vee, U+2228 ISOtech'
'ordf': u'\u00AA', # feminine ordinal indicator, U+00AA ISOnum'
'ordm': u'\u00BA', # masculine ordinal indicator, U+00BA ISOnum'
'oslash': u'\u00F8', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1'
'otilde': u'\u00F5', # latin small letter o with tilde, U+00F5 ISOlat1'
'otimes': u'\u2297', # circled times = vector product, U+2297 ISOamsb'
'ouml': u'\u00F6', # latin small letter o with diaeresis, U+00F6 ISOlat1'
'para': u'\u00B6', # pilcrow sign = paragraph sign, U+00B6 ISOnum'
'part': u'\u2202', # partial differential, U+2202 ISOtech'
'permil': u'\u2030', # per mille sign, U+2030 ISOtech'
'perp': u'\u22A5', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech'
'phi': u'\u03C6', # greek small letter phi, U+03C6 ISOgrk3'
'pi': u'\u03C0', # greek small letter pi, U+03C0 ISOgrk3'
'piv': u'\u03D6', # greek pi symbol, U+03D6 ISOgrk3'
'plusmn': u'\u00B1', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum'
'pound': u'\u00A3', # pound sign, U+00A3 ISOnum'
'prime': u'\u2032', # prime = minutes = feet, U+2032 ISOtech'
'prod': u'\u220F', # n-ary product = product sign, U+220F ISOamsb'
'prop': u'\u221D', # proportional to, U+221D ISOtech'
'psi': u'\u03C8', # greek small letter psi, U+03C8 ISOgrk3'
'quot': u'\u0022', # quotation mark = APL quote, U+0022 ISOnum'
'rArr': u'\u21D2', # rightwards double arrow, U+21D2 ISOtech'
'radic': u'\u221A', # square root = radical sign, U+221A ISOtech'
'rang': u'\u232A', # right-pointing angle bracket = ket, U+232A ISOtech'
'raquo': u'\u00BB', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum'
'rarr': u'\u2192', # rightwards arrow, U+2192 ISOnum'
'rceil': u'\u2309', # right ceiling, U+2309 ISOamsc'
'rdquo': u'\u201D', # right double quotation mark, U+201D ISOnum'
'real': u'\u211C', # blackletter capital R = real part symbol, U+211C ISOamso'
'reg': u'\u00AE', # registered sign = registered trade mark sign, U+00AE ISOnum'
'rfloor': u'\u230B', # right floor, U+230B ISOamsc'
'rho': u'\u03C1', # greek small letter rho, U+03C1 ISOgrk3'
'rlm': u'\u200F', # right-to-left mark, U+200F NEW RFC 2070'
'rsaquo': u'\u203A', # single right-pointing angle quotation mark, U+203A ISO proposed'
'rsquo': u'\u2019', # right single quotation mark, U+2019 ISOnum'
'sbquo': u'\u201A', # single low-9 quotation mark, U+201A NEW'
'scaron': u'\u0161', # latin small letter s with caron, U+0161 ISOlat2'
'sdot': u'\u22C5', # dot operator, U+22C5 ISOamsb'
'sect': u'\u00A7', # section sign, U+00A7 ISOnum'
'shy': u'\u00AD', # soft hyphen = discretionary hyphen, U+00AD ISOnum'
'sigma': u'\u03C3', # greek small letter sigma, U+03C3 ISOgrk3'
'sigmaf': u'\u03C2', # greek small letter final sigma, U+03C2 ISOgrk3'
'sim': u'\u223C', # tilde operator = varies with = similar to, U+223C ISOtech'
'spades': u'\u2660', # black spade suit, U+2660 ISOpub'
'sub': u'\u2282', # subset of, U+2282 ISOtech'
'sube': u'\u2286', # subset of or equal to, U+2286 ISOtech'
'sum': u'\u2211', # n-ary sumation, U+2211 ISOamsb'
'sup': u'\u2283', # superset of, U+2283 ISOtech'
'sup1': u'\u00B9', # superscript one = superscript digit one, U+00B9 ISOnum'
'sup2': u'\u00B2', # superscript two = superscript digit two = squared, U+00B2 ISOnum'
'sup3': u'\u00B3', # superscript three = superscript digit three = cubed, U+00B3 ISOnum'
'supe': u'\u2287', # superset of or equal to, U+2287 ISOtech'
'szlig': u'\u00DF', # latin small letter sharp s = ess-zed, U+00DF ISOlat1'
'tau': u'\u03C4', # greek small letter tau, U+03C4 ISOgrk3'
'there4': u'\u2234', # therefore, U+2234 ISOtech'
'theta': u'\u03B8', # greek small letter theta, U+03B8 ISOgrk3'
'thetasym': u'\u03D1', # greek small letter theta symbol, U+03D1 NEW'
'thinsp': u'\u2009', # thin space, U+2009 ISOpub'
'thorn': u'\u00FE', # latin small letter thorn with, U+00FE ISOlat1'
'tilde': u'\u02DC', # small tilde, U+02DC ISOdia'
'times': u'\u00D7', # multiplication sign, U+00D7 ISOnum'
'trade': u'\u2122', # trade mark sign, U+2122 ISOnum'
'uArr': u'\u21D1', # upwards double arrow, U+21D1 ISOamsa'
'uacute': u'\u00FA', # latin small letter u with acute, U+00FA ISOlat1'
'uarr': u'\u2191', # upwards arrow, U+2191 ISOnum'
'ucirc': u'\u00FB', # latin small letter u with circumflex, U+00FB ISOlat1'
'ugrave': u'\u00F9', # latin small letter u with grave, U+00F9 ISOlat1'
'uml': u'\u00A8', # diaeresis = spacing diaeresis, U+00A8 ISOdia'
'upsih': u'\u03D2', # greek upsilon with hook symbol, U+03D2 NEW'
'upsilon': u'\u03C5', # greek small letter upsilon, U+03C5 ISOgrk3'
'uuml': u'\u00FC', # latin small letter u with diaeresis, U+00FC ISOlat1'
'weierp': u'\u2118', # script capital P = power set = Weierstrass p, U+2118 ISOamso'
'xi': u'\u03BE', # greek small letter xi, U+03BE ISOgrk3'
'yacute': u'\u00FD', # latin small letter y with acute, U+00FD ISOlat1'
'yen': u'\u00A5', # yen sign = yuan sign, U+00A5 ISOnum'
'yuml': u'\u00FF', # latin small letter y with diaeresis, U+00FF ISOlat1'
'zeta': u'\u03B6', # greek small letter zeta, U+03B6 ISOgrk3'
'zwj': u'\u200D', # zero width joiner, U+200D NEW RFC 2070'
'zwnj': u'\u200C' # zero width non-joiner, U+200C NEW RFC 2070'
}
entitydefs2 = {
'$': '%24',
'&': '%26',
'+': '%2B',
',': '%2C',
'/': '%2F',
':': '%3A',
';': '%3B',
'=': '%3D',
'?': '%3F',
'@': '%40',
' ': '%20',
'"': '%22',
'<': '%3C',
'>': '%3E',
'#': '%23',
'%': '%25',
'{': '%7B',
'}': '%7D',
'|': '%7C',
'\\': '%5C',
'^': '%5E',
'~': '%7E',
'[': '%5B',
']': '%5D',
'`': '%60'
}
entitydefs3 = {
u'ÂÁÀÄÃÅ': u'A',
u'âáàäãå': u'a',
u'ÔÓÒÖÕ': u'O',
u'ôóòöõðø': u'o',
u'ÛÚÙÜ': u'U',
u'ûúùüµ': u'u',
u'ÊÉÈË': u'E',
u'êéèë': u'e',
u'ÎÍÌÏ': u'I',
u'îìíï': u'i',
u'ñ': u'n',
u'ß': u'B',
u'÷': u'%',
u'ç': u'c',
u'æ': u'ae'
}
def limpia_nombre_caracteres_especiales(s):
if not s:
return ''
badchars = '\\/:*?\"<>|'
for c in badchars:
s = s.replace(c, '')
return s;
def limpia_nombre_sin_acentos(s):
if not s:
return ''
for key, value in entitydefs3.iteritems():
for c in key:
s = s.replace(c, value)
return s;
def limpia_nombre_excepto_1(s):
if not s:
return ''
# Titulo de entrada
'''
try:
logger.info("s1="+urllib.quote_plus(s))
except:
logger.info("s1=no printable")
'''
# Convierte a unicode
try:
s = unicode( s, "utf-8" )
except:
#logger.info("no es utf-8")
try:
s = unicode( s, "iso-8859-1" )
except:
#logger.info("no es iso-8859-1")
pass
'''
try:
logger.info("s2="+urllib.quote_plus(s))
except:
logger.info("s2=no printable")
'''
# Elimina acentos
s = limpia_nombre_sin_acentos(s)
'''
try:
logger.info("s3="+urllib.quote_plus(s))
except:
logger.info("s3=no printable")
'''
# Elimina caracteres prohibidos
validchars = " ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!#$%&'()-@[]^_`{}~."
stripped = ''.join(c for c in s if c in validchars)
'''
try:
logger.info("s4="+urllib.quote_plus(stripped))
except:
logger.info("s4=no printable")
'''
# Convierte a iso
s = stripped.encode("iso-8859-1")
'''
try:
logger.info("s5="+urllib.quote_plus(s))
except:
logger.info("s5=no printable")
'''
return s;
def limpia_nombre_excepto_2(s):
if not s:
return ''
validchars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890."
stripped = ''.join(c for c in s if c in validchars)
return stripped;
def getfilefromtitle(url,title):
# Imprime en el log lo que va a descartar
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: title="+title )
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: url="+url )
#logger.info("pelisalacarta.core.downloadtools downloadtitle: title="+urllib.quote_plus( title ))
plataforma = config.get_system_platform();
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: plataforma="+plataforma)
#nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
import scrapertools
if plataforma=="xbox":
nombrefichero = title[:38] + scrapertools.get_filename_from_url(url)[-4:]
nombrefichero = limpia_nombre_excepto_1(nombrefichero)
else:
nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: nombrefichero=%s" % nombrefichero)
if "videobb" in url or "videozer" in url or "putlocker" in url:
nombrefichero = title + ".flv"
if "videobam" in url:
nombrefichero = title+"."+url.rsplit(".",1)[1][0:3]
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: nombrefichero=%s" % nombrefichero)
nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: nombrefichero=%s" % nombrefichero)
fullpath = os.path.join( config.get_setting("downloadpath") , nombrefichero )
logger.info("pelisalacarta.core.downloadtools getfilefromtitle: fullpath=%s" % fullpath)
if config.is_xbmc() and fullpath.startswith("special://"):
import xbmc
fullpath = xbmc.translatePath(fullpath)
return fullpath
def downloadtitle(url,title):
fullpath = getfilefromtitle(url,title)
return downloadfile(url,fullpath)
def downloadbest(video_urls,title,continuar=False):
logger.info("pelisalacarta.core.downloadtools downloadbest")
# Le da la vuelta, para poner el de más calidad primero ( list() es para que haga una copia )
invertida = list(video_urls)
invertida.reverse()
for elemento in invertida:
videotitle = elemento[0]
url = elemento[1]
logger.info("pelisalacarta.core.downloadtools Descargando opción "+title+" "+url.encode('ascii','ignore'))
# Calcula el fichero donde debe grabar
try:
fullpath = getfilefromtitle(url,title.strip())
# Si falla, es porque la URL no vale para nada
except:
import traceback
logger.info(traceback.format_exc())
continue
# Descarga
try:
ret = downloadfile(url,fullpath,continuar=continuar)
# Llegados a este punto, normalmente es un timeout
except urllib2.URLError, e:
import traceback
logger.info(traceback.format_exc())
ret = -2
# El usuario ha cancelado la descarga
if ret==-1:
return -1
else:
# El fichero ni siquiera existe
if not os.path.exists(fullpath):
logger.info("[downoadtools] -> No ha descargado nada, probando con la siguiente opción si existe")
# El fichero existe
else:
tamanyo = os.path.getsize(fullpath)
# Tiene tamaño 0
if tamanyo==0:
logger.info("[downoadtools] -> Descargado un fichero con tamaño 0, probando con la siguiente opción si existe")
os.remove(fullpath)
else:
logger.info("[downoadtools] -> Descargado un fichero con tamaño %d, lo da por bueno" % tamanyo)
return 0
return -2
def downloadfile(url,nombrefichero,headers=[],silent=False,continuar=False):
logger.info("pelisalacarta.core.downloadtools downloadfile: url="+url)
logger.info("pelisalacarta.core.downloadtools downloadfile: nombrefichero="+nombrefichero)
if config.is_xbmc() and nombrefichero.startswith("special://"):
import xbmc
nombrefichero = xbmc.translatePath(nombrefichero)
try:
# Si no es XBMC, siempre a "Silent"
from platformcode import platformtools
# antes
#f=open(nombrefichero,"wb")
try:
import xbmc
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
except:
pass
logger.info("pelisalacarta.core.downloadtools downloadfile: nombrefichero="+nombrefichero)
# El fichero existe y se quiere continuar
if os.path.exists(nombrefichero) and continuar:
#try:
# import xbmcvfs
# f = xbmcvfs.File(nombrefichero)
# existSize = f.size(nombrefichero)
#except:
f = open(nombrefichero, 'r+b')
existSize = os.path.getsize(nombrefichero)
logger.info("pelisalacarta.core.downloadtools downloadfile: el fichero existe, size=%d" % existSize)
grabado = existSize
f.seek(existSize)
# el fichero ya existe y no se quiere continuar, se aborta
elif os.path.exists(nombrefichero) and not continuar:
logger.info("pelisalacarta.core.downloadtools downloadfile: el fichero existe, no se descarga de nuevo")
return
# el fichero no existe
else:
existSize = 0
logger.info("pelisalacarta.core.downloadtools downloadfile: el fichero no existe")
#try:
# import xbmcvfs
# f = xbmcvfs.File(nombrefichero,"w")
#except:
f = open(nombrefichero, 'wb')
grabado = 0
# Crea el diálogo de progreso
if not silent:
progreso = platformtools.dialog_progress( "plugin" , "Descargando..." , url , nombrefichero )
# Si la plataforma no devuelve un cuadro de diálogo válido, asume modo silencio
if progreso is None:
silent = True
if "|" in url:
additional_headers = url.split("|")[1]
if "&" in additional_headers:
additional_headers = additional_headers.split("&")
else:
additional_headers = [ additional_headers ]
for additional_header in additional_headers:
logger.info("pelisalacarta.core.downloadtools additional_header: "+additional_header)
name = re.findall( "(.*?)=.*?" , additional_header )[0]
value = urllib.unquote_plus(re.findall( ".*?=(.*?)$" , additional_header )[0])
headers.append( [ name,value ] )
url = url.split("|")[0]
logger.info("pelisalacarta.core.downloadtools downloadfile: url="+url)
# Timeout del socket a 60 segundos
socket.setdefaulttimeout(60)
h=urllib2.HTTPHandler(debuglevel=0)
request = urllib2.Request(url)
for header in headers:
logger.info("pelisalacarta.core.downloadtools Header="+header[0]+": "+header[1])
request.add_header(header[0],header[1])
if existSize > 0:
request.add_header('Range', 'bytes=%d-' % (existSize, ))
opener = urllib2.build_opener(h)
urllib2.install_opener(opener)
try:
connexion = opener.open(request)
except urllib2.HTTPError,e:
logger.info("pelisalacarta.core.downloadtools downloadfile: error %d (%s) al abrir la url %s" % (e.code,e.msg,url))
#print e.code
#print e.msg
#print e.hdrs
#print e.fp
f.close()
if not silent:
progreso.close()
# El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo
if e.code==416:
return 0
else:
return -2
try:
totalfichero = int(connexion.headers["Content-Length"])
except:
totalfichero = 1
if existSize > 0:
totalfichero = totalfichero + existSize
logger.info("Content-Length=%s" % totalfichero)
blocksize = 100*1024
bloqueleido = connexion.read(blocksize)
logger.info("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido))
maxreintentos = 10
while len(bloqueleido)>0:
try:
# Escribe el bloque leido
#try:
# import xbmcvfs
# f.write( bloqueleido )
#except:
f.write(bloqueleido)
grabado = grabado + len(bloqueleido)
percent = int(float(grabado)*100/float(totalfichero))
totalmb = float(float(totalfichero)/(1024*1024))
descargadosmb = float(float(grabado)/(1024*1024))
# Lee el siguiente bloque, reintentando para no parar todo al primer timeout
reintentos = 0
while reintentos <= maxreintentos:
try:
before = time.time()
bloqueleido = connexion.read(blocksize)
after = time.time()
if (after - before) > 0:
velocidad=len(bloqueleido)/((after - before))
falta=totalfichero-grabado
if velocidad>0:
tiempofalta=falta/velocidad
else:
tiempofalta=0
#logger.info(sec_to_hms(tiempofalta))
if not silent:
#progreso.update( percent , "Descargando %.2fMB de %.2fMB (%d%%)" % ( descargadosmb , totalmb , percent),"Falta %s - Velocidad %.2f Kb/s" % ( sec_to_hms(tiempofalta) , velocidad/1024 ), os.path.basename(nombrefichero) )
progreso.update( percent , "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % ( descargadosmb , totalmb , percent , velocidad/1024 , sec_to_hms(tiempofalta)))
break
except:
reintentos = reintentos + 1
logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos)
import traceback
logger.error( traceback.print_exc() )
# El usuario cancelo la descarga
try:
if progreso.iscanceled():
logger.info("Descarga del fichero cancelada")
f.close()
progreso.close()
return -1
except:
pass
# Ha habido un error en la descarga
if reintentos > maxreintentos:
logger.info("ERROR en la descarga del fichero")
f.close()
if not silent:
progreso.close()
return -2
except:
import traceback
logger.error( traceback.print_exc() )
f.close()
if not silent:
progreso.close()
#platformtools.dialog_ok('Error al descargar' , 'Se ha producido un error' , 'al descargar el archivo')
return -2
except:
if url.startswith("rtmp") and not silent:
from platformcode import platformtools
advertencia = platformtools.dialog_ok( "No puedes descargar ese vídeo","Las descargas en RTMP aún no","están soportadas")
else:
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
try:
f.close()
except:
pass
if not silent:
try:
progreso.close()
except:
pass
logger.info("Fin descarga del fichero")
def downloadfileGzipped(url,pathfichero):
logger.info("pelisalacarta.core.downloadtools downloadfileGzipped: url="+url)
nombrefichero = pathfichero
logger.info("pelisalacarta.core.downloadtools downloadfileGzipped: nombrefichero="+nombrefichero)
import xbmc
nombrefichero = xbmc.makeLegalFilename(nombrefichero)
logger.info("pelisalacarta.core.downloadtools downloadfileGzipped: nombrefichero="+nombrefichero)
patron = "(http://[^/]+)/.+"
matches = re.compile(patron,re.DOTALL).findall(url)
if len(matches):
logger.info("pelisalacarta.core.downloadtools URL principal :"+matches[0])
url1= matches[0]
else:
url1 = url
txheaders = {'User-Agent':'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language':'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3',
'Accept-Encoding':'gzip,deflate',
'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive':'115',
'Connection':'keep-alive',
'Referer':url1,
}
txdata = ""
# Crea el diálogo de progreso
from platformcode import platformtools
progreso = platformtools.dialog_progress( "addon" , "Descargando..." , url.split("|")[0] , nombrefichero )
# Timeout del socket a 60 segundos
socket.setdefaulttimeout(10)
h=urllib2.HTTPHandler(debuglevel=0)
request = urllib2.Request(url, txdata, txheaders)
#if existSize > 0:
# request.add_header('Range', 'bytes=%d-' % (existSize, ))
opener = urllib2.build_opener(h)
urllib2.install_opener(opener)
try:
connexion = opener.open(request)
except urllib2.HTTPError,e:
logger.info("pelisalacarta.core.downloadtools downloadfile: error %d (%s) al abrir la url %s" % (e.code,e.msg,url))
#print e.code
#print e.msg
#print e.hdrs
#print e.fp
progreso.close()
# El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo
if e.code==416:
return 0
else:
return -2
nombreficheroBase = os.path.basename(nombrefichero)
if len(nombreficheroBase) == 0:
logger.info("Buscando nombre en el Headers de respuesta")
nombreBase = connexion.headers["Content-Disposition"]
logger.info(nombreBase)
patron = 'filename="([^"]+)"'
matches = re.compile(patron,re.DOTALL).findall(nombreBase)
if len(matches)>0:
titulo = matches[0]
titulo = GetTitleFromFile(titulo)
nombrefichero = os.path.join(pathfichero,titulo)
else:
logger.info("Nombre del fichero no encontrado, Colocando nombre temporal :sin_nombre.txt")
titulo = "sin_nombre.txt"
nombrefichero = os.path.join(pathfichero,titulo)
totalfichero = int(connexion.headers["Content-Length"])
# despues
f = open(nombrefichero, 'w')
existSize = 0
logger.info("pelisalacarta.core.downloadtools downloadfileGzipped: fichero nuevo abierto")
#if existSize > 0:
# totalfichero = totalfichero + existSize
grabado = 0
logger.info("Content-Length=%s" % totalfichero)
blocksize = 100*1024
bloqueleido = connexion.read(blocksize)
try:
import StringIO
compressedstream = StringIO.StringIO(bloqueleido)
import gzip
gzipper = gzip.GzipFile(fileobj=compressedstream)
bloquedata = gzipper.read()
gzipper.close()
xbmc.log("Iniciando descarga del fichero, bloqueleido=%s" % len(bloqueleido))
except:
xbmc.log( "ERROR : El archivo a descargar no esta comprimido con Gzip")
f.close()
progreso.close()
return -2
maxreintentos = 10
while len(bloqueleido)>0:
try:
# Escribe el bloque leido
f.write(bloquedata)
grabado = grabado + len(bloqueleido)
percent = int(float(grabado)*100/float(totalfichero))
totalmb = float(float(totalfichero)/(1024*1024))
descargadosmb = float(float(grabado)/(1024*1024))
# Lee el siguiente bloque, reintentando para no parar todo al primer timeout
reintentos = 0
while reintentos <= maxreintentos:
try:
before = time.time()
bloqueleido = connexion.read(blocksize)
import gzip
import StringIO
compressedstream = StringIO.StringIO(bloqueleido)
gzipper = gzip.GzipFile(fileobj=compressedstream)
bloquedata = gzipper.read()
gzipper.close()
after = time.time()
if (after - before) > 0:
velocidad=len(bloqueleido)/((after - before))
falta=totalfichero-grabado
if velocidad>0:
tiempofalta=falta/velocidad
else:
tiempofalta=0
logger.info(sec_to_hms(tiempofalta))
progreso.update( percent , "%.2fMB/%.2fMB (%d%%) %.2f Kb/s %s falta " % ( descargadosmb , totalmb , percent , velocidad/1024 , sec_to_hms(tiempofalta)))
break
except:
reintentos = reintentos + 1
logger.info("ERROR en la descarga del bloque, reintento %d" % reintentos)
for line in sys.exc_info():
logger.error( "%s" % line )
# El usuario cancelo la descarga
if progreso.iscanceled():
logger.info("Descarga del fichero cancelada")
f.close()
progreso.close()
return -1
# Ha habido un error en la descarga
if reintentos > maxreintentos:
logger.info("ERROR en la descarga del fichero")
f.close()
progreso.close()
return -2
except:
logger.info("ERROR en la descarga del fichero")
for line in sys.exc_info():
logger.error( "%s" % line )
f.close()
progreso.close()
return -2
f.close()
#print data
progreso.close()
logger.info("Fin descarga del fichero")
return nombrefichero
def GetTitleFromFile(title):
# Imprime en el log lo que va a descartar
logger.info("pelisalacarta.core.downloadtools GetTitleFromFile: titulo="+title )
#logger.info("pelisalacarta.core.downloadtools downloadtitle: title="+urllib.quote_plus( title ))
plataforma = config.get_system_platform();
logger.info("pelisalacarta.core.downloadtools GetTitleFromFile: plataforma="+plataforma)
#nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
if plataforma=="xbox":
nombrefichero = title[:38] + title[-4:]
nombrefichero = limpia_nombre_excepto_1(nombrefichero)
else:
nombrefichero = title
return nombrefichero
def sec_to_hms(seconds):
m,s = divmod(int(seconds), 60)
h,m = divmod(m, 60)
return ("%02d:%02d:%02d" % ( h , m ,s ))
def downloadIfNotModifiedSince(url,timestamp):
logger.info("pelisalacarta.core.downloadtools downloadIfNotModifiedSince("+url+","+time.ctime(timestamp)+")")
# Convierte la fecha a GMT
fechaFormateada = time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(timestamp))
logger.info("fechaFormateada=%s" % fechaFormateada)
# Comprueba si ha cambiado
inicio = time.clock()
req = urllib2.Request(url)
req.add_header('If-Modified-Since', fechaFormateada)
req.add_header('User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12')
updated = False
try:
response = urllib2.urlopen(req)
data = response.read()
#info = response.info()
#logger.info( info.headers )
# Si llega hasta aquí, es que ha cambiado
updated = True
response.close()
except urllib2.URLError,e:
# Si devuelve 304 es que no ha cambiado
if hasattr(e,'code'):
logger.info("Codigo de respuesta HTTP : %d" %e.code)
if e.code == 304:
logger.info("No ha cambiado")
updated = False
# Agarra los errores con codigo de respuesta del servidor externo solicitado
else:
for line in sys.exc_info():
logger.error( "%s" % line )
data=""
fin = time.clock()
logger.info("Descargado en %d segundos " % (fin-inicio+1))
return updated,data
def download_all_episodes(item,channel,first_episode="",preferred_server="vidspot",filter_language=""):
logger.info("pelisalacarta.core.downloadtools download_all_episodes, show="+item.show)
show_title = item.show
# Obtiene el listado desde el que se llamó
action = item.extra
# Esta marca es porque el item tiene algo más aparte en el atributo "extra"
if "###" in item.extra:
action = item.extra.split("###")[0]
item.extra = item.extra.split("###")[1]
episode_itemlist = getattr(channel, action)(item)
# Ordena los episodios para que funcione el filtro de first_episode
episode_itemlist = sorted(episode_itemlist, key=lambda Item: Item.title)
from core import servertools
from core import downloadtools
from core import scrapertools
best_server = preferred_server
worst_server = "moevideos"
# Para cada episodio
if first_episode=="":
empezar = True
else:
empezar = False
for episode_item in episode_itemlist:
try:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, episode="+episode_item.title)
episode_title = scrapertools.get_match(episode_item.title,"(\d+x\d+)")
logger.info("pelisalacarta.core.downloadtools download_all_episodes, episode="+episode_title)
except:
import traceback
logger.info(traceback.format_exc())
continue
if first_episode!="" and episode_title==first_episode:
empezar = True
if episodio_ya_descargado(show_title,episode_title):
continue
if not empezar:
continue
# Extrae los mirrors
try:
mirrors_itemlist = channel.findvideos(episode_item)
except:
mirrors_itemlist = servertools.find_video_items(episode_item)
print mirrors_itemlist
descargado = False
new_mirror_itemlist_1 = []
new_mirror_itemlist_2 = []
new_mirror_itemlist_3 = []
new_mirror_itemlist_4 = []
new_mirror_itemlist_5 = []
new_mirror_itemlist_6 = []
for mirror_item in mirrors_itemlist:
# Si está en español va al principio, si no va al final
if "(Español)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_1.append(mirror_item)
else:
new_mirror_itemlist_2.append(mirror_item)
elif "(Latino)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_3.append(mirror_item)
else:
new_mirror_itemlist_4.append(mirror_item)
elif "(VOS)" in mirror_item.title:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_3.append(mirror_item)
else:
new_mirror_itemlist_4.append(mirror_item)
else:
if best_server in mirror_item.title.lower():
new_mirror_itemlist_5.append(mirror_item)
else:
new_mirror_itemlist_6.append(mirror_item)
mirrors_itemlist = new_mirror_itemlist_1 + new_mirror_itemlist_2 + new_mirror_itemlist_3 + new_mirror_itemlist_4 + new_mirror_itemlist_5 + new_mirror_itemlist_6
for mirror_item in mirrors_itemlist:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, mirror="+mirror_item.title)
if "(Español)" in mirror_item.title:
idioma="(Español)"
codigo_idioma="es"
elif "(Latino)" in mirror_item.title:
idioma="(Latino)"
codigo_idioma="lat"
elif "(VOS)" in mirror_item.title:
idioma="(VOS)"
codigo_idioma="vos"
elif "(VO)" in mirror_item.title:
idioma="(VO)"
codigo_idioma="vo"
else:
idioma="(Desconocido)"
codigo_idioma="desconocido"
logger.info("pelisalacarta.core.downloadtools filter_language=#"+filter_language+"#, codigo_idioma=#"+codigo_idioma+"#")
if filter_language=="" or (filter_language!="" and filter_language==codigo_idioma):
logger.info("pelisalacarta.core.downloadtools download_all_episodes, downloading mirror")
else:
logger.info("pelisalacarta.core.downloadtools language "+codigo_idioma+" filtered, skipping")
continue
if hasattr(channel, 'play'):
video_items = channel.play(mirror_item)
else:
video_items = [mirror_item]
if len(video_items)>0:
video_item = video_items[0]
# Comprueba que está disponible
video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( video_item.server , video_item.url , video_password="" , muestra_dialogo=False)
# Lo añade a la lista de descargas
if puedes:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, downloading mirror started...")
# El vídeo de más calidad es el último
mediaurl = video_urls[len(video_urls)-1][1]
devuelve = downloadtools.downloadbest(video_urls,show_title+" "+episode_title+" "+idioma+" ["+video_item.server+"]",continuar=False)
if devuelve==0:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, download ok")
descargado = True
break
elif devuelve==-1:
try:
import xbmcgui
advertencia = xbmcgui.Dialog()
resultado = advertencia.ok("plugin" , "Descarga abortada")
except:
pass
return
else:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, download error, try another mirror")
continue
else:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, downloading mirror not available... trying next")
if not descargado:
logger.info("pelisalacarta.core.downloadtools download_all_episodes, EPISODIO NO DESCARGADO "+episode_title)
def episodio_ya_descargado(show_title,episode_title):
import scrapertools
ficheros = os.listdir( "." )
for fichero in ficheros:
#logger.info("fichero="+fichero)
if fichero.lower().startswith(show_title.lower()) and scrapertools.find_single_match(fichero,"(\d+x\d+)")==episode_title:
logger.info("encontrado!")
return True
return False
| gpl-2.0 |
st135yle/django-site | dbenv/lib/python3.4/site-packages/django/shortcuts.py | 117 | 5429 | """
This module collects helper functions and classes that "span" multiple levels
of MVC. In other words, these functions/classes introduce controlled coupling
for convenience's sake.
"""
from django.http import (
Http404, HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.template import loader
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import Promise
def render_to_response(template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, using=using)
return HttpResponse(content, content_type, status)
def render(request, template_name, context=None, content_type=None, status=None, using=None):
"""
Returns a HttpResponse whose content is filled with the result of calling
django.template.loader.render_to_string() with the passed arguments.
"""
content = loader.render_to_string(template_name, context, request, using=using)
return HttpResponse(content, content_type, status)
def redirect(to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate URL for the arguments
passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be used as-is for the redirect location.
By default issues a temporary redirect; pass permanent=True to issue a
permanent redirect
"""
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
return redirect_class(resolve_url(to, *args, **kwargs))
def _get_queryset(klass):
"""
Return a QuerySet or a Manager.
Duck typing in action: any class with a `get()` method (for
get_object_or_404) or a `filter()` method (for get_list_or_404) might do
the job.
"""
# If it is a model class or anything else with ._default_manager
if hasattr(klass, '_default_manager'):
return klass._default_manager.all()
return klass
def get_object_or_404(klass, *args, **kwargs):
"""
Uses get() to return an object, or raises a Http404 exception if the object
does not exist.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the get() query.
Note: Like with get(), an MultipleObjectsReturned will be raised if more than one
object is found.
"""
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_object_or_404() must be a Model, Manager, "
"or QuerySet, not '%s'." % klass__name
)
except queryset.model.DoesNotExist:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
def get_list_or_404(klass, *args, **kwargs):
"""
Uses filter() to return a list of objects, or raise a Http404 exception if
the list is empty.
klass may be a Model, Manager, or QuerySet object. All other passed
arguments and keyword arguments are used in the filter() query.
"""
queryset = _get_queryset(klass)
try:
obj_list = list(queryset.filter(*args, **kwargs))
except AttributeError:
klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__
raise ValueError(
"First argument to get_list_or_404() must be a Model, Manager, or "
"QuerySet, not '%s'." % klass__name
)
if not obj_list:
raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)
return obj_list
def resolve_url(to, *args, **kwargs):
"""
Return a URL appropriate for the arguments passed.
The arguments could be:
* A model: the model's `get_absolute_url()` function will be called.
* A view name, possibly with arguments: `urls.reverse()` will be used
to reverse-resolve the name.
* A URL, which will be returned as-is.
"""
# If it's a model, use get_absolute_url()
if hasattr(to, 'get_absolute_url'):
return to.get_absolute_url()
if isinstance(to, Promise):
# Expand the lazy instance, as it can cause issues when it is passed
# further to some Python functions like urlparse.
to = force_text(to)
if isinstance(to, six.string_types):
# Handle relative URLs
if to.startswith(('./', '../')):
return to
# Next try a reverse URL resolution.
try:
return reverse(to, args=args, kwargs=kwargs)
except NoReverseMatch:
# If this is a callable, re-raise.
if callable(to):
raise
# If this doesn't "feel" like a URL, re-raise.
if '/' not in to and '.' not in to:
raise
# Finally, fall back and assume it's a URL
return to
| mit |
13xforever/webserver | qa/164-RuleOr1.py | 8 | 1057 | from base import *
MAGIC = "Rule OR: matches"
DIR = "DirOr1"
FILE = "test.cgi"
CONF = """
vserver!1!rule!1640!match = directory
vserver!1!rule!1640!match!directory = /%s
vserver!1!rule!1640!handler = file
vserver!1!rule!1641!match = or
vserver!1!rule!1641!match!left = directory
vserver!1!rule!1641!match!left!directory = /%s
vserver!1!rule!1641!match!right = extensions
vserver!1!rule!1641!match!right!extensions = not_cgi,other
vserver!1!rule!1641!handler = cgi
"""
CGI = """#!/bin/sh
echo "Content-Type: text/plain"
echo
echo "%s"
""" % (MAGIC)
class Test (TestBase):
def __init__ (self):
TestBase.__init__ (self, __file__)
self.name = "Rule or: match"
self.request = "GET /%s/%s HTTP/1.0\r\n" % (DIR, FILE)
self.expected_error = 200
self.expected_content = MAGIC
self.forbidden_content = ["/bin/sh", "echo"]
self.conf = CONF % (DIR, DIR)
def Prepare (self, www):
d = self.Mkdir (www, DIR)
f = self.WriteFile (d, FILE, 0755, CGI)
| gpl-2.0 |
abaldwin1/thumbor | thumbor/error_handlers/file.py | 11 | 3024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import logging.handlers
import json
import re
import time
from thumbor import __version__
class ErrorHandler(object):
def __init__(self, config):
if not config.ERROR_FILE_LOGGER:
raise RuntimeError(
"If you set USE_CUSTOM_ERROR_HANDLING to True, and you are using thumbor_file_logger.logger, " +
"then you must specify the file path to log to with the ERROR_FILE_LOGGER configuration."
)
if (config.ERROR_FILE_NAME_USE_CONTEXT and not re.search('^(\w+\.)?\w+$', config.ERROR_FILE_NAME_USE_CONTEXT)):
raise RuntimeError(
"ERROR_FILE_NAME_USE_CONTEXT config must reffer an attribute of context "
"object and be form of ^(\w+.)?\w+$ : %s" % config.ERROR_FILE_NAME_USE_CONTEXT
)
self.file_name = config.ERROR_FILE_LOGGER
if config.ERROR_FILE_NAME_USE_CONTEXT:
self.use_context = config.ERROR_FILE_NAME_USE_CONTEXT
else:
self.use_context = None
self.logger = None
def handle_error(self, context, handler, exception):
# create log file if not existing
if not self.logger:
if self.use_context:
if '.' in self.use_context:
parts = self.use_context.split('.')
obj = getattr(context, parts[0], None)
obj = reduce(getattr, parts[1:], obj)
else:
obj = getattr(context, self.use_context, None)
file = self.file_name % obj
else:
file = self.file_name
self.logger = logging.getLogger('error_handler')
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.handlers.WatchedFileHandler(file))
req = handler.request
extra = {
'thumbor-version': __version__,
'timestamp': time.time()
}
extra.update({
'Headers': req.headers
})
cookies_header = extra.get('Headers', {}).get('Cookie', {})
if isinstance(cookies_header, basestring):
cookies = {}
for cookie in cookies_header.split(';'):
if not cookie:
continue
values = cookie.strip().split('=')
key, val = values[0], "".join(values[1:])
cookies[key] = val
else:
cookies = cookies_header
extra['Headers']['Cookie'] = cookies
data = {
'Http': {
'url': req.full_url(),
'method': req.method,
'data': req.arguments,
'body': req.body,
'query_string': req.query
},
'interfaces.User': {
'ip': req.remote_ip,
},
'exception': str(exception),
'extra': extra
}
self.logger.error(json.dumps(data))
| mit |
alex8866/cinder | cinder/tests/api/contrib/test_volume_transfer.py | 6 | 26454 | # Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for volume transfer code.
"""
import json
from xml.dom import minidom
import webob
from cinder.api.contrib import volume_transfer
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
from cinder.transfer import API
import cinder.volume
LOG = logging.getLogger(__name__)
class VolumeTransferAPITestCase(test.TestCase):
"""Test Case for transfers API."""
def setUp(self):
super(VolumeTransferAPITestCase, self).setUp()
self.volume_transfer_api = API()
self.controller = volume_transfer.VolumeTransferController()
def _create_transfer(self, volume_id=1,
display_name='test_transfer'):
"""Create a transfer object."""
return self.volume_transfer_api.create(context.get_admin_context(),
volume_id,
display_name)
@staticmethod
def _create_volume(display_name='test_volume',
display_description='this is a test volume',
status='available',
size=1,
project_id='fake'):
"""Create a volume object."""
vol = {}
vol['host'] = 'fake_host'
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = project_id
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = status
return db.volume_create(context.get_admin_context(), vol)['id']
def test_show_transfer(self):
volume_id = self._create_volume(size=5)
transfer = self._create_transfer(volume_id)
LOG.debug('Created transfer with id %s' % transfer)
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' %
transfer['id'])
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['transfer']['name'], 'test_transfer')
self.assertEqual(res_dict['transfer']['id'], transfer['id'])
self.assertEqual(res_dict['transfer']['volume_id'], volume_id)
db.transfer_destroy(context.get_admin_context(), transfer['id'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_transfer_xml_content_type(self):
volume_id = self._create_volume(size=5)
transfer = self._create_transfer(volume_id)
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' %
transfer['id'])
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
transfer_xml = dom.getElementsByTagName('transfer')
name = transfer_xml.item(0).getAttribute('name')
self.assertEqual(name.strip(), "test_transfer")
db.transfer_destroy(context.get_admin_context(), transfer['id'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_transfer_with_transfer_NotFound(self):
req = webob.Request.blank('/v2/fake/os-volume-transfer/1234')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Transfer 1234 could not be found.')
def test_list_transfers_json(self):
volume_id_1 = self._create_volume(size=5)
volume_id_2 = self._create_volume(size=5)
transfer1 = self._create_transfer(volume_id_1)
transfer2 = self._create_transfer(volume_id_2)
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['transfers'][0]), 4)
self.assertEqual(res_dict['transfers'][0]['id'], transfer1['id'])
self.assertEqual(res_dict['transfers'][0]['name'], 'test_transfer')
self.assertEqual(len(res_dict['transfers'][1]), 4)
self.assertEqual(res_dict['transfers'][1]['name'], 'test_transfer')
db.transfer_destroy(context.get_admin_context(), transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer1['id'])
db.volume_destroy(context.get_admin_context(), volume_id_1)
db.volume_destroy(context.get_admin_context(), volume_id_2)
def test_list_transfers_xml(self):
volume_id_1 = self._create_volume(size=5)
volume_id_2 = self._create_volume(size=5)
transfer1 = self._create_transfer(volume_id_1)
transfer2 = self._create_transfer(volume_id_2)
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
transfer_list = dom.getElementsByTagName('transfer')
self.assertEqual(transfer_list.item(0).attributes.length, 3)
self.assertEqual(transfer_list.item(0).getAttribute('id'),
transfer1['id'])
self.assertEqual(transfer_list.item(1).attributes.length, 3)
self.assertEqual(transfer_list.item(1).getAttribute('id'),
transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer1['id'])
db.volume_destroy(context.get_admin_context(), volume_id_2)
db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_list_transfers_detail_json(self):
volume_id_1 = self._create_volume(size=5)
volume_id_2 = self._create_volume(size=5)
transfer1 = self._create_transfer(volume_id_1)
transfer2 = self._create_transfer(volume_id_2)
req = webob.Request.blank('/v2/fake/os-volume-transfer/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['transfers'][0]), 5)
self.assertEqual(res_dict['transfers'][0]['name'],
'test_transfer')
self.assertEqual(res_dict['transfers'][0]['id'], transfer1['id'])
self.assertEqual(res_dict['transfers'][0]['volume_id'], volume_id_1)
self.assertEqual(len(res_dict['transfers'][1]), 5)
self.assertEqual(res_dict['transfers'][1]['name'],
'test_transfer')
self.assertEqual(res_dict['transfers'][1]['id'], transfer2['id'])
self.assertEqual(res_dict['transfers'][1]['volume_id'], volume_id_2)
db.transfer_destroy(context.get_admin_context(), transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer1['id'])
db.volume_destroy(context.get_admin_context(), volume_id_2)
db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_list_transfers_detail_xml(self):
volume_id_1 = self._create_volume(size=5)
volume_id_2 = self._create_volume(size=5)
transfer1 = self._create_transfer(volume_id_1)
transfer2 = self._create_transfer(volume_id_2)
req = webob.Request.blank('/v2/fake/os-volume-transfer/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
transfer_detail = dom.getElementsByTagName('transfer')
self.assertEqual(transfer_detail.item(0).attributes.length, 4)
self.assertEqual(
transfer_detail.item(0).getAttribute('name'), 'test_transfer')
self.assertEqual(
transfer_detail.item(0).getAttribute('id'), transfer1['id'])
self.assertEqual(transfer_detail.item(0).getAttribute('volume_id'),
volume_id_1)
self.assertEqual(transfer_detail.item(1).attributes.length, 4)
self.assertEqual(
transfer_detail.item(1).getAttribute('name'), 'test_transfer')
self.assertEqual(
transfer_detail.item(1).getAttribute('id'), transfer2['id'])
self.assertEqual(transfer_detail.item(1).getAttribute('volume_id'),
volume_id_2)
db.transfer_destroy(context.get_admin_context(), transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer1['id'])
db.volume_destroy(context.get_admin_context(), volume_id_2)
db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_list_transfers_with_all_tenants(self):
volume_id_1 = self._create_volume(size=5)
volume_id_2 = self._create_volume(size=5, project_id='fake1')
transfer1 = self._create_transfer(volume_id_1)
transfer2 = self._create_transfer(volume_id_2)
req = fakes.HTTPRequest.blank('/v2/fake/os-volume-transfer?'
'all_tenants=1',
use_admin_context=True)
res_dict = self.controller.index(req)
expected = [(transfer1['id'], 'test_transfer'),
(transfer2['id'], 'test_transfer')]
ret = []
for item in res_dict['transfers']:
ret.append((item['id'], item['name']))
self.assertEqual(set(expected), set(ret))
db.transfer_destroy(context.get_admin_context(), transfer2['id'])
db.transfer_destroy(context.get_admin_context(), transfer1['id'])
db.volume_destroy(context.get_admin_context(), volume_id_1)
def test_create_transfer_json(self):
volume_id = self._create_volume(status='available', size=5)
body = {"transfer": {"display_name": "transfer1",
"volume_id": volume_id}}
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(res.status_int, 202)
self.assertIn('id', res_dict['transfer'])
self.assertIn('auth_key', res_dict['transfer'])
self.assertIn('created_at', res_dict['transfer'])
self.assertIn('name', res_dict['transfer'])
self.assertIn('volume_id', res_dict['transfer'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_transfer_xml(self):
volume_size = 2
volume_id = self._create_volume(status='available', size=volume_size)
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.body = ('<transfer name="transfer-001" '
'volume_id="%s"/>' % volume_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
transfer = dom.getElementsByTagName('transfer')
self.assertTrue(transfer.item(0).hasAttribute('id'))
self.assertTrue(transfer.item(0).hasAttribute('auth_key'))
self.assertTrue(transfer.item(0).hasAttribute('created_at'))
self.assertEqual(transfer.item(0).getAttribute('name'), 'transfer-001')
self.assertTrue(transfer.item(0).hasAttribute('volume_id'))
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_transfer_with_no_body(self):
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
def test_create_transfer_with_body_KeyError(self):
body = {"transfer": {"display_name": "transfer1"}}
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
def test_create_transfer_with_VolumeNotFound(self):
body = {"transfer": {"display_name": "transfer1",
"volume_id": 1234}}
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Volume 1234 could not be found.')
def test_create_transfer_with_InvalidVolume(self):
volume_id = self._create_volume(status='attached')
body = {"transfer": {"display_name": "transfer1",
"volume_id": volume_id}}
req = webob.Request.blank('/v2/fake/os-volume-transfer')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: status must be available')
db.volume_destroy(context.get_admin_context(), volume_id)
def test_delete_transfer_awaiting_transfer(self):
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' %
transfer['id'])
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
# verify transfer has been deleted
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s' %
transfer['id'])
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Transfer %s could not be found.' % transfer['id'])
self.assertEqual(db.volume_get(context.get_admin_context(),
volume_id)['status'], 'available')
db.volume_destroy(context.get_admin_context(), volume_id)
def test_delete_transfer_with_transfer_NotFound(self):
req = webob.Request.blank('/v2/fake/os-volume-transfer/9999')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Transfer 9999 could not be found.')
def test_accept_transfer_volume_id_specified_json(self):
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
svc = self.start_service('volume', host='fake_host')
body = {"accept": {"id": transfer['id'],
"auth_key": transfer['auth_key']}}
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['transfer']['id'], transfer['id'])
self.assertEqual(res_dict['transfer']['volume_id'], volume_id)
# cleanup
svc.stop()
def test_accept_transfer_volume_id_specified_xml(self):
volume_id = self._create_volume(size=5)
transfer = self._create_transfer(volume_id)
svc = self.start_service('volume', host='fake_host')
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.body = '<accept auth_key="%s"/>' % transfer['auth_key']
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
accept = dom.getElementsByTagName('transfer')
self.assertEqual(accept.item(0).getAttribute('id'),
transfer['id'])
self.assertEqual(accept.item(0).getAttribute('volume_id'), volume_id)
db.volume_destroy(context.get_admin_context(), volume_id)
# cleanup
svc.stop()
def test_accept_transfer_with_no_body(self):
volume_id = self._create_volume(size=5)
transfer = self._create_transfer(volume_id)
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
db.volume_destroy(context.get_admin_context(), volume_id)
def test_accept_transfer_with_body_KeyError(self):
volume_id = self._create_volume(size=5)
transfer = self._create_transfer(volume_id)
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
body = {"": {}}
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
def test_accept_transfer_invalid_id_auth_key(self):
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
body = {"accept": {"id": transfer['id'],
"auth_key": 1}}
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid auth key: Attempt to transfer %s with '
'invalid auth key.' % transfer['id'])
db.transfer_destroy(context.get_admin_context(), transfer['id'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_accept_transfer_with_invalid_transfer(self):
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
body = {"accept": {"id": transfer['id'],
"auth_key": 1}}
req = webob.Request.blank('/v2/fake/os-volume-transfer/1/accept')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'TransferNotFound: Transfer 1 could not be found.')
db.transfer_destroy(context.get_admin_context(), transfer['id'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_accept_transfer_with_VolumeSizeExceedsAvailableQuota(self):
def fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota(
cls, context, transfer, volume_id):
raise exception.VolumeSizeExceedsAvailableQuota(requested='2',
consumed='2',
quota='3')
self.stubs.Set(
cinder.transfer.API,
'accept',
fake_transfer_api_accept_throwing_VolumeSizeExceedsAvailableQuota)
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
body = {"accept": {"id": transfer['id'],
"auth_key": transfer['auth_key']}}
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'Requested volume or snapshot exceeds allowed '
'Gigabytes quota. Requested 2G, quota is 3G and '
'2G has been consumed.')
def test_accept_transfer_with_VolumeLimitExceeded(self):
def fake_transfer_api_accept_throwing_VolumeLimitExceeded(cls,
context,
transfer,
volume_id):
raise exception.VolumeLimitExceeded(allowed=1)
self.stubs.Set(cinder.transfer.API, 'accept',
fake_transfer_api_accept_throwing_VolumeLimitExceeded)
volume_id = self._create_volume()
transfer = self._create_transfer(volume_id)
body = {"accept": {"id": transfer['id'],
"auth_key": transfer['auth_key']}}
req = webob.Request.blank('/v2/fake/os-volume-transfer/%s/accept' %
transfer['id'])
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'VolumeLimitExceeded: Maximum number of volumes '
'allowed (1) exceeded')
| apache-2.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/maskrcnn/implementations/implementation_closed/maskrcnn_benchmark/data/datasets/voc.py | 4 | 4121 | import os
import torch
import torch.utils.data
from PIL import Image
import sys
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
from maskrcnn_benchmark.structures.bounding_box import BoxList
class PascalVOCDataset(torch.utils.data.Dataset):
CLASSES = (
"__background__ ",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
)
def __init__(self, data_dir, split, use_difficult=False, transforms=None):
self.root = data_dir
self.image_set = split
self.keep_difficult = use_difficult
self.transforms = transforms
self._annopath = os.path.join(self.root, "Annotations", "%s.xml")
self._imgpath = os.path.join(self.root, "JPEGImages", "%s.jpg")
self._imgsetpath = os.path.join(self.root, "ImageSets", "Main", "%s.txt")
with open(self._imgsetpath % self.image_set) as f:
self.ids = f.readlines()
self.ids = [x.strip("\n") for x in self.ids]
self.id_to_img_map = {k: v for k, v in enumerate(self.ids)}
cls = PascalVOCDataset.CLASSES
self.class_to_ind = dict(zip(cls, range(len(cls))))
def __getitem__(self, index):
img_id = self.ids[index]
img = Image.open(self._imgpath % img_id).convert("RGB")
target = self.get_groundtruth(index)
target = target.clip_to_image(remove_empty=True)
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target, index
def __len__(self):
return len(self.ids)
def get_groundtruth(self, index):
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
anno = self._preprocess_annotation(anno)
height, width = anno["im_info"]
target = BoxList(anno["boxes"], (width, height), mode="xyxy")
target.add_field("labels", anno["labels"])
target.add_field("difficult", anno["difficult"])
return target
def _preprocess_annotation(self, target):
boxes = []
gt_classes = []
difficult_boxes = []
TO_REMOVE = 1
for obj in target.iter("object"):
difficult = int(obj.find("difficult").text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.lower().strip()
bb = obj.find("bndbox")
# Make pixel indexes 0-based
# Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211"
box = [
bb.find("xmin").text,
bb.find("ymin").text,
bb.find("xmax").text,
bb.find("ymax").text,
]
bndbox = tuple(
map(lambda x: x - TO_REMOVE, list(map(int, box)))
)
boxes.append(bndbox)
gt_classes.append(self.class_to_ind[name])
difficult_boxes.append(difficult)
size = target.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
res = {
"boxes": torch.tensor(boxes, dtype=torch.float32),
"labels": torch.tensor(gt_classes),
"difficult": torch.tensor(difficult_boxes),
"im_info": im_info,
}
return res
def get_img_info(self, index):
img_id = self.ids[index]
anno = ET.parse(self._annopath % img_id).getroot()
size = anno.find("size")
im_info = tuple(map(int, (size.find("height").text, size.find("width").text)))
return {"height": im_info[0], "width": im_info[1]}
def map_class_id_to_class_name(self, class_id):
return PascalVOCDataset.CLASSES[class_id]
| apache-2.0 |
wangpanjun/django-rest-framework | rest_framework/utils/field_mapping.py | 39 | 9402 | """
Helper functions for mapping model fields to a dictionary of default
keyword arguments that should be used for their equivelent serializer fields.
"""
import inspect
from django.core import validators
from django.db import models
from django.utils.text import capfirst
from rest_framework.compat import clean_manytomany_helptext
from rest_framework.validators import UniqueValidator
NUMERIC_FIELD_TYPES = (
models.IntegerField, models.FloatField, models.DecimalField
)
class ClassLookupDict(object):
"""
Takes a dictionary with classes as keys.
Lookups against this object will traverses the object's inheritance
hierarchy in method resolution order, and returns the first matching value
from the dictionary or raises a KeyError if nothing matches.
"""
def __init__(self, mapping):
self.mapping = mapping
def __getitem__(self, key):
if hasattr(key, '_proxy_class'):
# Deal with proxy classes. Ie. BoundField behaves as if it
# is a Field instance when using ClassLookupDict.
base_class = key._proxy_class
else:
base_class = key.__class__
for cls in inspect.getmro(base_class):
if cls in self.mapping:
return self.mapping[cls]
raise KeyError('Class %s not found in lookup.' % base_class.__name__)
def __setitem__(self, key, value):
self.mapping[key] = value
def needs_label(model_field, field_name):
"""
Returns `True` if the label based on the model's verbose name
is not equal to the default label it would have based on it's field name.
"""
default_label = field_name.replace('_', ' ').capitalize()
return capfirst(model_field.verbose_name) != default_label
def get_detail_view_name(model):
"""
Given a model class, return the view name to use for URL relationships
that refer to instances of the model.
"""
return '%(model_name)s-detail' % {
'app_label': model._meta.app_label,
'model_name': model._meta.object_name.lower()
}
def get_field_kwargs(field_name, model_field):
"""
Creates a default instance of a basic non-relational field.
"""
kwargs = {}
validator_kwarg = list(model_field.validators)
# The following will only be used by ModelField classes.
# Gets removed for everything else.
kwargs['model_field'] = model_field
if model_field.verbose_name and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if model_field.help_text:
kwargs['help_text'] = model_field.help_text
max_digits = getattr(model_field, 'max_digits', None)
if max_digits is not None:
kwargs['max_digits'] = max_digits
decimal_places = getattr(model_field, 'decimal_places', None)
if decimal_places is not None:
kwargs['decimal_places'] = decimal_places
if isinstance(model_field, models.TextField):
kwargs['style'] = {'base_template': 'textarea.html'}
if isinstance(model_field, models.AutoField) or not model_field.editable:
# If this field is read-only, then return early.
# Further keyword arguments are not valid.
kwargs['read_only'] = True
return kwargs
if model_field.has_default() or model_field.blank or model_field.null:
kwargs['required'] = False
if model_field.null and not isinstance(model_field, models.NullBooleanField):
kwargs['allow_null'] = True
if model_field.blank and (isinstance(model_field, models.CharField) or
isinstance(model_field, models.TextField)):
kwargs['allow_blank'] = True
if model_field.choices:
# If this model field contains choices, then return early.
# Further keyword arguments are not valid.
kwargs['choices'] = model_field.choices
return kwargs
# Ensure that max_length is passed explicitly as a keyword arg,
# rather than as a validator.
max_length = getattr(model_field, 'max_length', None)
if max_length is not None and isinstance(model_field, models.CharField):
kwargs['max_length'] = max_length
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MaxLengthValidator)
]
# Ensure that min_length is passed explicitly as a keyword arg,
# rather than as a validator.
min_length = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MinLengthValidator)
), None)
if min_length is not None and isinstance(model_field, models.CharField):
kwargs['min_length'] = min_length
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MinLengthValidator)
]
# Ensure that max_value is passed explicitly as a keyword arg,
# rather than as a validator.
max_value = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MaxValueValidator)
), None)
if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['max_value'] = max_value
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MaxValueValidator)
]
# Ensure that max_value is passed explicitly as a keyword arg,
# rather than as a validator.
min_value = next((
validator.limit_value for validator in validator_kwarg
if isinstance(validator, validators.MinValueValidator)
), None)
if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['min_value'] = min_value
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.MinValueValidator)
]
# URLField does not need to include the URLValidator argument,
# as it is explicitly added in.
if isinstance(model_field, models.URLField):
validator_kwarg = [
validator for validator in validator_kwarg
if not isinstance(validator, validators.URLValidator)
]
# EmailField does not need to include the validate_email argument,
# as it is explicitly added in.
if isinstance(model_field, models.EmailField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_email
]
# SlugField do not need to include the 'validate_slug' argument,
if isinstance(model_field, models.SlugField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_slug
]
# IPAddressField do not need to include the 'validate_ipv46_address' argument,
if isinstance(model_field, models.GenericIPAddressField):
validator_kwarg = [
validator for validator in validator_kwarg
if validator is not validators.validate_ipv46_address
]
if getattr(model_field, 'unique', False):
validator = UniqueValidator(queryset=model_field.model._default_manager)
validator_kwarg.append(validator)
if validator_kwarg:
kwargs['validators'] = validator_kwarg
return kwargs
def get_relation_kwargs(field_name, relation_info):
"""
Creates a default instance of a flat relational field.
"""
model_field, related_model, to_many, has_through_model = relation_info
kwargs = {
'queryset': related_model._default_manager,
'view_name': get_detail_view_name(related_model)
}
if to_many:
kwargs['many'] = True
if has_through_model:
kwargs['read_only'] = True
kwargs.pop('queryset', None)
if model_field:
if model_field.verbose_name and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
help_text = clean_manytomany_helptext(model_field.help_text)
if help_text:
kwargs['help_text'] = help_text
if not model_field.editable:
kwargs['read_only'] = True
kwargs.pop('queryset', None)
if kwargs.get('read_only', False):
# If this field is read-only, then return early.
# No further keyword arguments are valid.
return kwargs
if model_field.has_default() or model_field.blank or model_field.null:
kwargs['required'] = False
if model_field.null:
kwargs['allow_null'] = True
if model_field.validators:
kwargs['validators'] = model_field.validators
if getattr(model_field, 'unique', False):
validator = UniqueValidator(queryset=model_field.model._default_manager)
kwargs['validators'] = kwargs.get('validators', []) + [validator]
if to_many and not model_field.blank:
kwargs['allow_empty'] = False
return kwargs
def get_nested_relation_kwargs(relation_info):
kwargs = {'read_only': True}
if relation_info.to_many:
kwargs['many'] = True
return kwargs
def get_url_kwargs(model_field):
return {
'view_name': get_detail_view_name(model_field)
}
| bsd-2-clause |
HiroIshikawa/21playground | thumblelog/myproject/lib/python3.5/site-packages/pip/_vendor/cachecontrol/adapter.py | 469 | 4196 | import functools
from pip._vendor.requests.adapters import HTTPAdapter
from .controller import CacheController
from .cache import DictCache
from .filewrapper import CallbackFileWrapper
class CacheControlAdapter(HTTPAdapter):
invalidating_methods = set(['PUT', 'DELETE'])
def __init__(self, cache=None,
cache_etags=True,
controller_class=None,
serializer=None,
heuristic=None,
*args, **kw):
super(CacheControlAdapter, self).__init__(*args, **kw)
self.cache = cache or DictCache()
self.heuristic = heuristic
controller_factory = controller_class or CacheController
self.controller = controller_factory(
self.cache,
cache_etags=cache_etags,
serializer=serializer,
)
def send(self, request, **kw):
"""
Send a request. Use the request information to see if it
exists in the cache and cache the response if we need to and can.
"""
if request.method == 'GET':
cached_response = self.controller.cached_request(request)
if cached_response:
return self.build_response(request, cached_response,
from_cache=True)
# check for etags and add headers if appropriate
request.headers.update(
self.controller.conditional_headers(request)
)
resp = super(CacheControlAdapter, self).send(request, **kw)
return resp
def build_response(self, request, response, from_cache=False):
"""
Build a response by making a request or using the cache.
This will end up calling send and returning a potentially
cached response
"""
if not from_cache and request.method == 'GET':
# apply any expiration heuristics
if response.status == 304:
# We must have sent an ETag request. This could mean
# that we've been expired already or that we simply
# have an etag. In either case, we want to try and
# update the cache if that is the case.
cached_response = self.controller.update_cached_response(
request, response
)
if cached_response is not response:
from_cache = True
# We are done with the server response, read a
# possible response body (compliant servers will
# not return one, but we cannot be 100% sure) and
# release the connection back to the pool.
response.read(decode_content=False)
response.release_conn()
response = cached_response
# We always cache the 301 responses
elif response.status == 301:
self.controller.cache_response(request, response)
else:
# Check for any heuristics that might update headers
# before trying to cache.
if self.heuristic:
response = self.heuristic.apply(response)
# Wrap the response file with a wrapper that will cache the
# response when the stream has been consumed.
response._fp = CallbackFileWrapper(
response._fp,
functools.partial(
self.controller.cache_response,
request,
response,
)
)
resp = super(CacheControlAdapter, self).build_response(
request, response
)
# See if we should invalidate the cache.
if request.method in self.invalidating_methods and resp.ok:
cache_url = self.controller.cache_url(request.url)
self.cache.delete(cache_url)
# Give the request a from_cache attr to let people use it
resp.from_cache = from_cache
return resp
def close(self):
self.cache.close()
super(CacheControlAdapter, self).close()
| mit |
odicraig/kodi2odi | addons/plugin.video.cartoons8/requests/packages/chardet/euckrprober.py | 2931 | 1675 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import EUCKRSMModel
class EUCKRProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCKRSMModel)
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-KR"
| gpl-3.0 |
RobertoMalatesta/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py | 124 | 3148 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.models.test_failures import *
class TestFailuresTest(unittest.TestCase):
def assert_loads(self, cls):
failure_obj = cls()
s = failure_obj.dumps()
new_failure_obj = TestFailure.loads(s)
self.assertIsInstance(new_failure_obj, cls)
self.assertEqual(failure_obj, new_failure_obj)
# Also test that != is implemented.
self.assertFalse(failure_obj != new_failure_obj)
def test_unknown_failure_type(self):
class UnknownFailure(TestFailure):
def message(self):
return ''
failure_obj = UnknownFailure()
self.assertRaises(ValueError, determine_result_type, [failure_obj])
def test_message_is_virtual(self):
failure_obj = TestFailure()
self.assertRaises(NotImplementedError, failure_obj.message)
def test_loads(self):
for c in ALL_FAILURE_CLASSES:
self.assert_loads(c)
def test_equals(self):
self.assertEqual(FailureCrash(), FailureCrash())
self.assertNotEqual(FailureCrash(), FailureTimeout())
crash_set = set([FailureCrash(), FailureCrash()])
self.assertEqual(len(crash_set), 1)
# The hash happens to be the name of the class, but sets still work:
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
def test_crashes(self):
self.assertEqual(FailureCrash().message(), 'DumpRenderTree crashed')
self.assertEqual(FailureCrash(process_name='foo', pid=1234).message(), 'foo crashed [pid=1234]')
| bsd-3-clause |
CS-SI/QGIS | python/plugins/processing/algs/grass7/ext/i_pca.py | 21 | 1257 | # -*- coding: utf-8 -*-
"""
***************************************************************************
i_pca.py
--------
Date : March 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'March 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .i import verifyRasterNum
def checkParameterValuesBeforeExecuting(alg, parameters, context):
return verifyRasterNum(alg, parameters, context, 'input', 2)
| gpl-2.0 |
Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/trial/reporter.py | 3 | 40087 | # -*- test-case-name: twisted.trial.test.test_reporter -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
"""
Defines classes that handle the results of tests.
"""
from __future__ import division, absolute_import
import sys
import os
import time
import warnings
import unittest as pyunit
from collections import OrderedDict
from zope.interface import implementer
from twisted.python import reflect, log
from twisted.python.components import proxyForInterface
from twisted.python.failure import Failure
from twisted.python.util import untilConcludes
from twisted.python.compat import _PY3, items
from twisted.trial import itrial, util
from twisted.trial.unittest import makeTodo
try:
from subunit import TestProtocolClient
except ImportError:
TestProtocolClient = None
class BrokenTestCaseWarning(Warning):
"""
Emitted as a warning when an exception occurs in one of setUp or tearDown.
"""
class SafeStream(object):
"""
Wraps a stream object so that all C{write} calls are wrapped in
L{untilConcludes<twisted.python.util.untilConcludes>}.
"""
def __init__(self, original):
self.original = original
def __getattr__(self, name):
return getattr(self.original, name)
def write(self, *a, **kw):
return untilConcludes(self.original.write, *a, **kw)
@implementer(itrial.IReporter)
class TestResult(pyunit.TestResult, object):
"""
Accumulates the results of several L{twisted.trial.unittest.TestCase}s.
@ivar successes: count the number of successes achieved by the test run.
@type successes: C{int}
"""
# Used when no todo provided to addExpectedFailure or addUnexpectedSuccess.
_DEFAULT_TODO = 'Test expected to fail'
def __init__(self):
super(TestResult, self).__init__()
self.skips = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.successes = 0
self._timings = []
def __repr__(self):
return ('<%s run=%d errors=%d failures=%d todos=%d dones=%d skips=%d>'
% (reflect.qual(self.__class__), self.testsRun,
len(self.errors), len(self.failures),
len(self.expectedFailures), len(self.skips),
len(self.unexpectedSuccesses)))
def _getTime(self):
return time.time()
def _getFailure(self, error):
"""
Convert a C{sys.exc_info()}-style tuple to a L{Failure}, if necessary.
"""
if isinstance(error, tuple):
return Failure(error[1], error[0], error[2])
return error
def startTest(self, test):
"""
This must be called before the given test is commenced.
@type test: L{pyunit.TestCase}
"""
super(TestResult, self).startTest(test)
self._testStarted = self._getTime()
def stopTest(self, test):
"""
This must be called after the given test is completed.
@type test: L{pyunit.TestCase}
"""
super(TestResult, self).stopTest(test)
self._lastTime = self._getTime() - self._testStarted
def addFailure(self, test, fail):
"""
Report a failed assertion for the given test.
@type test: L{pyunit.TestCase}
@type fail: L{Failure} or L{tuple}
"""
self.failures.append((test, self._getFailure(fail)))
def addError(self, test, error):
"""
Report an error that occurred while running the given test.
@type test: L{pyunit.TestCase}
@type error: L{Failure} or L{tuple}
"""
self.errors.append((test, self._getFailure(error)))
def addSkip(self, test, reason):
"""
Report that the given test was skipped.
In Trial, tests can be 'skipped'. Tests are skipped mostly because
there is some platform or configuration issue that prevents them from
being run correctly.
@type test: L{pyunit.TestCase}
@type reason: L{str}
"""
self.skips.append((test, reason))
def addUnexpectedSuccess(self, test, todo=None):
"""
Report that the given test succeeded against expectations.
In Trial, tests can be marked 'todo'. That is, they are expected to
fail. When a test that is expected to fail instead succeeds, it should
call this method to report the unexpected success.
@type test: L{pyunit.TestCase}
@type todo: L{unittest.Todo}, or C{None}, in which case a default todo
message is provided.
"""
if todo is None:
todo = makeTodo(self._DEFAULT_TODO)
self.unexpectedSuccesses.append((test, todo))
def addExpectedFailure(self, test, error, todo=None):
"""
Report that the given test failed, and was expected to do so.
In Trial, tests can be marked 'todo'. That is, they are expected to
fail.
@type test: L{pyunit.TestCase}
@type error: L{Failure}
@type todo: L{unittest.Todo}, or C{None}, in which case a default todo
message is provided.
"""
if todo is None:
todo = makeTodo(self._DEFAULT_TODO)
self.expectedFailures.append((test, error, todo))
def addSuccess(self, test):
"""
Report that the given test succeeded.
@type test: L{pyunit.TestCase}
"""
self.successes += 1
def wasSuccessful(self):
"""
Report whether or not this test suite was successful or not.
The behaviour of this method changed in L{pyunit} in Python 3.4 to
fail if there are any errors, failures, or unexpected successes.
Previous to 3.4, it was only if there were errors or failures. This
method implements the old behaviour for backwards compatibility reasons,
checking just for errors and failures.
@rtype: L{bool}
"""
return len(self.failures) == len(self.errors) == 0
def done(self):
"""
The test suite has finished running.
"""
@implementer(itrial.IReporter)
class TestResultDecorator(proxyForInterface(itrial.IReporter,
"_originalReporter")):
"""
Base class for TestResult decorators.
@ivar _originalReporter: The wrapped instance of reporter.
@type _originalReporter: A provider of L{itrial.IReporter}
"""
@implementer(itrial.IReporter)
class UncleanWarningsReporterWrapper(TestResultDecorator):
"""
A wrapper for a reporter that converts L{util.DirtyReactorAggregateError}s
to warnings.
"""
def addError(self, test, error):
"""
If the error is a L{util.DirtyReactorAggregateError}, instead of
reporting it as a normal error, throw a warning.
"""
if (isinstance(error, Failure)
and error.check(util.DirtyReactorAggregateError)):
warnings.warn(error.getErrorMessage())
else:
self._originalReporter.addError(test, error)
@implementer(itrial.IReporter)
class _ExitWrapper(TestResultDecorator):
"""
A wrapper for a reporter that causes the reporter to stop after
unsuccessful tests.
"""
def addError(self, *args, **kwargs):
self.shouldStop = True
return self._originalReporter.addError(*args, **kwargs)
def addFailure(self, *args, **kwargs):
self.shouldStop = True
return self._originalReporter.addFailure(*args, **kwargs)
class _AdaptedReporter(TestResultDecorator):
"""
TestResult decorator that makes sure that addError only gets tests that
have been adapted with a particular test adapter.
"""
def __init__(self, original, testAdapter):
"""
Construct an L{_AdaptedReporter}.
@param original: An {itrial.IReporter}.
@param testAdapter: A callable that returns an L{itrial.ITestCase}.
"""
TestResultDecorator.__init__(self, original)
self.testAdapter = testAdapter
def addError(self, test, error):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addError(test, error)
def addExpectedFailure(self, test, failure, todo=None):
"""
See L{itrial.IReporter}.
@type test: A L{pyunit.TestCase}.
@type failure: A L{failure.Failure} or L{exceptions.AssertionError}
@type todo: A L{unittest.Todo} or None
When C{todo} is C{None} a generic C{unittest.Todo} is built.
L{pyunit.TestCase}'s C{run()} calls this with 3 positional arguments
(without C{todo}).
"""
return self._originalReporter.addExpectedFailure(
self.testAdapter(test), failure, todo)
def addFailure(self, test, failure):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addFailure(test, failure)
def addSkip(self, test, skip):
"""
See L{itrial.IReporter}.
"""
test = self.testAdapter(test)
return self._originalReporter.addSkip(test, skip)
def addUnexpectedSuccess(self, test, todo=None):
"""
See L{itrial.IReporter}.
@type test: A L{pyunit.TestCase}.
@type todo: A L{unittest.Todo} or None
When C{todo} is C{None} a generic C{unittest.Todo} is built.
L{pyunit.TestCase}'s C{run()} calls this with 2 positional arguments
(without C{todo}).
"""
test = self.testAdapter(test)
return self._originalReporter.addUnexpectedSuccess(test, todo)
def startTest(self, test):
"""
See L{itrial.IReporter}.
"""
return self._originalReporter.startTest(self.testAdapter(test))
def stopTest(self, test):
"""
See L{itrial.IReporter}.
"""
return self._originalReporter.stopTest(self.testAdapter(test))
@implementer(itrial.IReporter)
class Reporter(TestResult):
"""
A basic L{TestResult} with support for writing to a stream.
@ivar _startTime: The time when the first test was started. It defaults to
C{None}, which means that no test was actually launched.
@type _startTime: C{float} or C{NoneType}
@ivar _warningCache: A C{set} of tuples of warning message (file, line,
text, category) which have already been written to the output stream
during the currently executing test. This is used to avoid writing
duplicates of the same warning to the output stream.
@type _warningCache: C{set}
@ivar _publisher: The log publisher which will be observed for warning
events.
@type _publisher: L{twisted.python.log.LogPublisher}
"""
_separator = '-' * 79
_doubleSeparator = '=' * 79
def __init__(self, stream=sys.stdout, tbformat='default', realtime=False,
publisher=None):
super(Reporter, self).__init__()
self._stream = SafeStream(stream)
self.tbformat = tbformat
self.realtime = realtime
self._startTime = None
self._warningCache = set()
# Start observing log events so as to be able to report warnings.
self._publisher = publisher
if publisher is not None:
publisher.addObserver(self._observeWarnings)
def _observeWarnings(self, event):
"""
Observe warning events and write them to C{self._stream}.
This method is a log observer which will be registered with
C{self._publisher.addObserver}.
@param event: A C{dict} from the logging system. If it has a
C{'warning'} key, a logged warning will be extracted from it and
possibly written to C{self.stream}.
"""
if 'warning' in event:
key = (event['filename'], event['lineno'],
event['category'].split('.')[-1],
str(event['warning']))
if key not in self._warningCache:
self._warningCache.add(key)
self._stream.write('%s:%s: %s: %s\n' % key)
def startTest(self, test):
"""
Called when a test begins to run. Records the time when it was first
called and resets the warning cache.
@param test: L{ITestCase}
"""
super(Reporter, self).startTest(test)
if self._startTime is None:
self._startTime = self._getTime()
self._warningCache = set()
def addFailure(self, test, fail):
"""
Called when a test fails. If C{realtime} is set, then it prints the
error to the stream.
@param test: L{ITestCase} that failed.
@param fail: L{failure.Failure} containing the error.
"""
super(Reporter, self).addFailure(test, fail)
if self.realtime:
fail = self.failures[-1][1] # guarantee it's a Failure
self._write(self._formatFailureTraceback(fail))
def addError(self, test, error):
"""
Called when a test raises an error. If C{realtime} is set, then it
prints the error to the stream.
@param test: L{ITestCase} that raised the error.
@param error: L{failure.Failure} containing the error.
"""
error = self._getFailure(error)
super(Reporter, self).addError(test, error)
if self.realtime:
error = self.errors[-1][1] # guarantee it's a Failure
self._write(self._formatFailureTraceback(error))
def _write(self, format, *args):
"""
Safely write to the reporter's stream.
@param format: A format string to write.
@param *args: The arguments for the format string.
"""
s = str(format)
assert isinstance(s, type(''))
if args:
self._stream.write(s % args)
else:
self._stream.write(s)
untilConcludes(self._stream.flush)
def _writeln(self, format, *args):
"""
Safely write a line to the reporter's stream. Newline is appended to
the format string.
@param format: A format string to write.
@param *args: The arguments for the format string.
"""
self._write(format, *args)
self._write('\n')
def upDownError(self, method, error, warn, printStatus):
super(Reporter, self).upDownError(method, error, warn, printStatus)
if warn:
tbStr = self._formatFailureTraceback(error)
log.msg(tbStr)
msg = ("caught exception in %s, your TestCase is broken\n\n%s"
% (method, tbStr))
warnings.warn(msg, BrokenTestCaseWarning, stacklevel=2)
def cleanupErrors(self, errs):
super(Reporter, self).cleanupErrors(errs)
warnings.warn("%s\n%s" % ("REACTOR UNCLEAN! traceback(s) follow: ",
self._formatFailureTraceback(errs)),
BrokenTestCaseWarning)
def _trimFrames(self, frames):
"""
Trim frames to remove internal paths.
When a C{SynchronousTestCase} method fails synchronously, the stack
looks like this:
- [0]: C{SynchronousTestCase._run}
- [1]: C{util.runWithWarningsSuppressed}
- [2:-2]: code in the test method which failed
- [-1]: C{_synctest.fail}
When a C{TestCase} method fails synchronously, the stack looks like
this:
- [0]: C{defer.maybeDeferred}
- [1]: C{utils.runWithWarningsSuppressed}
- [2]: C{utils.runWithWarningsSuppressed}
- [3:-2]: code in the test method which failed
- [-1]: C{_synctest.fail}
When a method fails inside a C{Deferred} (i.e., when the test method
returns a C{Deferred}, and that C{Deferred}'s errback fires), the stack
captured inside the resulting C{Failure} looks like this:
- [0]: C{defer.Deferred._runCallbacks}
- [1:-2]: code in the testmethod which failed
- [-1]: C{_synctest.fail}
As a result, we want to trim either [maybeDeferred, runWWS, runWWS] or
[Deferred._runCallbacks] or [SynchronousTestCase._run, runWWS] from the
front, and trim the [unittest.fail] from the end.
There is also another case, when the test method is badly defined and
contains extra arguments.
If it doesn't recognize one of these cases, it just returns the
original frames.
@param frames: The C{list} of frames from the test failure.
@return: The C{list} of frames to display.
"""
newFrames = list(frames)
if len(frames) < 2:
return newFrames
firstMethod = newFrames[0][0]
firstFile = os.path.splitext(os.path.basename(newFrames[0][1]))[0]
secondMethod = newFrames[1][0]
secondFile = os.path.splitext(os.path.basename(newFrames[1][1]))[0]
syncCase = (("_run", "_synctest"),
("runWithWarningsSuppressed", "util"))
asyncCase = (("maybeDeferred", "defer"),
("runWithWarningsSuppressed", "utils"))
twoFrames = ((firstMethod, firstFile), (secondMethod, secondFile))
if _PY3:
# On PY3, we have an extra frame which is reraising the exception
for frame in newFrames:
frameFile = os.path.splitext(os.path.basename(frame[1]))[0]
if frameFile == "compat" and frame[0] == "reraise":
# If it's in the compat module and is reraise, BLAM IT
newFrames.pop(newFrames.index(frame))
if twoFrames == syncCase:
newFrames = newFrames[2:]
elif twoFrames == asyncCase:
newFrames = newFrames[3:]
elif (firstMethod, firstFile) == ("_runCallbacks", "defer"):
newFrames = newFrames[1:]
if not newFrames:
# The method fails before getting called, probably an argument
# problem
return newFrames
last = newFrames[-1]
if (last[0].startswith('fail')
and os.path.splitext(os.path.basename(last[1]))[0] == '_synctest'):
newFrames = newFrames[:-1]
return newFrames
def _formatFailureTraceback(self, fail):
if isinstance(fail, str):
return fail.rstrip() + '\n'
fail.frames, frames = self._trimFrames(fail.frames), fail.frames
result = fail.getTraceback(detail=self.tbformat,
elideFrameworkCode=True)
fail.frames = frames
return result
def _groupResults(self, results, formatter):
"""
Group tests together based on their results.
@param results: An iterable of tuples of two or more elements. The
first element of each tuple is a test case. The remaining
elements describe the outcome of that test case.
@param formatter: A callable which turns a test case result into a
string. The elements after the first of the tuples in
C{results} will be passed as positional arguments to
C{formatter}.
@return: A C{list} of two-tuples. The first element of each tuple
is a unique string describing one result from at least one of
the test cases in C{results}. The second element is a list of
the test cases which had that result.
"""
groups = OrderedDict()
for content in results:
case = content[0]
outcome = content[1:]
key = formatter(*outcome)
groups.setdefault(key, []).append(case)
return items(groups)
def _printResults(self, flavor, errors, formatter):
"""
Print a group of errors to the stream.
@param flavor: A string indicating the kind of error (e.g. 'TODO').
@param errors: A list of errors, often L{failure.Failure}s, but
sometimes 'todo' errors.
@param formatter: A callable that knows how to format the errors.
"""
for reason, cases in self._groupResults(errors, formatter):
self._writeln(self._doubleSeparator)
self._writeln(flavor)
self._write(reason)
self._writeln('')
for case in cases:
self._writeln(case.id())
def _printExpectedFailure(self, error, todo):
return 'Reason: %r\n%s' % (todo.reason,
self._formatFailureTraceback(error))
def _printUnexpectedSuccess(self, todo):
ret = 'Reason: %r\n' % (todo.reason,)
if todo.errors:
ret += 'Expected errors: %s\n' % (', '.join(todo.errors),)
return ret
def _printErrors(self):
"""
Print all of the non-success results to the stream in full.
"""
self._write('\n')
self._printResults('[SKIPPED]', self.skips, lambda x: '%s\n' % x)
self._printResults('[TODO]', self.expectedFailures,
self._printExpectedFailure)
self._printResults('[FAIL]', self.failures,
self._formatFailureTraceback)
self._printResults('[ERROR]', self.errors,
self._formatFailureTraceback)
self._printResults('[SUCCESS!?!]', self.unexpectedSuccesses,
self._printUnexpectedSuccess)
def _getSummary(self):
"""
Return a formatted count of tests status results.
"""
summaries = []
for stat in ("skips", "expectedFailures", "failures", "errors",
"unexpectedSuccesses"):
num = len(getattr(self, stat))
if num:
summaries.append('%s=%d' % (stat, num))
if self.successes:
summaries.append('successes=%d' % (self.successes,))
summary = (summaries and ' (' + ', '.join(summaries) + ')') or ''
return summary
def _printSummary(self):
"""
Print a line summarising the test results to the stream.
"""
summary = self._getSummary()
if self.wasSuccessful():
status = "PASSED"
else:
status = "FAILED"
self._write("%s%s\n", status, summary)
def done(self):
"""
Summarize the result of the test run.
The summary includes a report of all of the errors, todos, skips and
so forth that occurred during the run. It also includes the number of
tests that were run and how long it took to run them (not including
load time).
Expects that C{_printErrors}, C{_writeln}, C{_write}, C{_printSummary}
and C{_separator} are all implemented.
"""
if self._publisher is not None:
self._publisher.removeObserver(self._observeWarnings)
self._printErrors()
self._writeln(self._separator)
if self._startTime is not None:
self._writeln('Ran %d tests in %.3fs', self.testsRun,
time.time() - self._startTime)
self._write('\n')
self._printSummary()
class MinimalReporter(Reporter):
"""
A minimalist reporter that prints only a summary of the test result, in
the form of (timeTaken, #tests, #tests, #errors, #failures, #skips).
"""
def _printErrors(self):
"""
Don't print a detailed summary of errors. We only care about the
counts.
"""
def _printSummary(self):
"""
Print out a one-line summary of the form:
'%(runtime) %(number_of_tests) %(number_of_tests) %(num_errors)
%(num_failures) %(num_skips)'
"""
numTests = self.testsRun
if self._startTime is not None:
timing = self._getTime() - self._startTime
else:
timing = 0
t = (timing, numTests, numTests,
len(self.errors), len(self.failures), len(self.skips))
self._writeln(' '.join(map(str, t)))
class TextReporter(Reporter):
"""
Simple reporter that prints a single character for each test as it runs,
along with the standard Trial summary text.
"""
def addSuccess(self, test):
super(TextReporter, self).addSuccess(test)
self._write('.')
def addError(self, *args):
super(TextReporter, self).addError(*args)
self._write('E')
def addFailure(self, *args):
super(TextReporter, self).addFailure(*args)
self._write('F')
def addSkip(self, *args):
super(TextReporter, self).addSkip(*args)
self._write('S')
def addExpectedFailure(self, *args):
super(TextReporter, self).addExpectedFailure(*args)
self._write('T')
def addUnexpectedSuccess(self, *args):
super(TextReporter, self).addUnexpectedSuccess(*args)
self._write('!')
class VerboseTextReporter(Reporter):
"""
A verbose reporter that prints the name of each test as it is running.
Each line is printed with the name of the test, followed by the result of
that test.
"""
# This is actually the bwverbose option
def startTest(self, tm):
self._write('%s ... ', tm.id())
super(VerboseTextReporter, self).startTest(tm)
def addSuccess(self, test):
super(VerboseTextReporter, self).addSuccess(test)
self._write('[OK]')
def addError(self, *args):
super(VerboseTextReporter, self).addError(*args)
self._write('[ERROR]')
def addFailure(self, *args):
super(VerboseTextReporter, self).addFailure(*args)
self._write('[FAILURE]')
def addSkip(self, *args):
super(VerboseTextReporter, self).addSkip(*args)
self._write('[SKIPPED]')
def addExpectedFailure(self, *args):
super(VerboseTextReporter, self).addExpectedFailure(*args)
self._write('[TODO]')
def addUnexpectedSuccess(self, *args):
super(VerboseTextReporter, self).addUnexpectedSuccess(*args)
self._write('[SUCCESS!?!]')
def stopTest(self, test):
super(VerboseTextReporter, self).stopTest(test)
self._write('\n')
class TimingTextReporter(VerboseTextReporter):
"""
Prints out each test as it is running, followed by the time taken for each
test to run.
"""
def stopTest(self, method):
"""
Mark the test as stopped, and write the time it took to run the test
to the stream.
"""
super(TimingTextReporter, self).stopTest(method)
self._write("(%.03f secs)\n" % self._lastTime)
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
from win32console import GetStdHandle, STD_OUTPUT_HANDLE, \
FOREGROUND_RED, FOREGROUND_BLUE, FOREGROUND_GREEN, \
FOREGROUND_INTENSITY
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = GetStdHandle(STD_OUTPUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUTPUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
@implementer(itrial.IReporter)
class SubunitReporter(object):
"""
Reports test output via Subunit.
@ivar _subunit: The subunit protocol client that we are wrapping.
@ivar _successful: An internal variable, used to track whether we have
received only successful results.
@since: 10.0
"""
def __init__(self, stream=sys.stdout, tbformat='default',
realtime=False, publisher=None):
"""
Construct a L{SubunitReporter}.
@param stream: A file-like object representing the stream to print
output to. Defaults to stdout.
@param tbformat: The format for tracebacks. Ignored, since subunit
always uses Python's standard format.
@param realtime: Whether or not to print exceptions in the middle
of the test results. Ignored, since subunit always does this.
@param publisher: The log publisher which will be preserved for
reporting events. Ignored, as it's not relevant to subunit.
"""
if TestProtocolClient is None:
raise Exception("Subunit not available")
self._subunit = TestProtocolClient(stream)
self._successful = True
def done(self):
"""
Record that the entire test suite run is finished.
We do nothing, since a summary clause is irrelevant to the subunit
protocol.
"""
pass
def shouldStop(self):
"""
Whether or not the test runner should stop running tests.
"""
return self._subunit.shouldStop
shouldStop = property(shouldStop)
def stop(self):
"""
Signal that the test runner should stop running tests.
"""
return self._subunit.stop()
def wasSuccessful(self):
"""
Has the test run been successful so far?
@return: C{True} if we have received no reports of errors or failures,
C{False} otherwise.
"""
# Subunit has a bug in its implementation of wasSuccessful, see
# https://bugs.edge.launchpad.net/subunit/+bug/491090, so we can't
# simply forward it on.
return self._successful
def startTest(self, test):
"""
Record that C{test} has started.
"""
return self._subunit.startTest(test)
def stopTest(self, test):
"""
Record that C{test} has completed.
"""
return self._subunit.stopTest(test)
def addSuccess(self, test):
"""
Record that C{test} was successful.
"""
return self._subunit.addSuccess(test)
def addSkip(self, test, reason):
"""
Record that C{test} was skipped for C{reason}.
Some versions of subunit don't have support for addSkip. In those
cases, the skip is reported as a success.
@param test: A unittest-compatible C{TestCase}.
@param reason: The reason for it being skipped. The C{str()} of this
object will be included in the subunit output stream.
"""
addSkip = getattr(self._subunit, 'addSkip', None)
if addSkip is None:
self.addSuccess(test)
else:
self._subunit.addSkip(test, reason)
def addError(self, test, err):
"""
Record that C{test} failed with an unexpected error C{err}.
Also marks the run as being unsuccessful, causing
L{SubunitReporter.wasSuccessful} to return C{False}.
"""
self._successful = False
return self._subunit.addError(
test, util.excInfoOrFailureToExcInfo(err))
def addFailure(self, test, err):
"""
Record that C{test} failed an assertion with the error C{err}.
Also marks the run as being unsuccessful, causing
L{SubunitReporter.wasSuccessful} to return C{False}.
"""
self._successful = False
return self._subunit.addFailure(
test, util.excInfoOrFailureToExcInfo(err))
def addExpectedFailure(self, test, failure, todo):
"""
Record an expected failure from a test.
Some versions of subunit do not implement this. For those versions, we
record a success.
"""
failure = util.excInfoOrFailureToExcInfo(failure)
addExpectedFailure = getattr(self._subunit, 'addExpectedFailure', None)
if addExpectedFailure is None:
self.addSuccess(test)
else:
addExpectedFailure(test, failure)
def addUnexpectedSuccess(self, test, todo=None):
"""
Record an unexpected success.
Since subunit has no way of expressing this concept, we record a
success on the subunit stream.
"""
# Not represented in pyunit/subunit.
self.addSuccess(test)
class TreeReporter(Reporter):
"""
Print out the tests in the form a tree.
Tests are indented according to which class and module they belong.
Results are printed in ANSI color.
"""
currentLine = ''
indent = ' '
columns = 79
FAILURE = 'red'
ERROR = 'red'
TODO = 'blue'
SKIP = 'blue'
TODONE = 'red'
SUCCESS = 'green'
def __init__(self, stream=sys.stdout, *args, **kwargs):
super(TreeReporter, self).__init__(stream, *args, **kwargs)
self._lastTest = []
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported(stream):
self._colorizer = colorizer(stream)
break
def getDescription(self, test):
"""
Return the name of the method which 'test' represents. This is
what gets displayed in the leaves of the tree.
e.g. getDescription(TestCase('test_foo')) ==> test_foo
"""
return test.id().split('.')[-1]
def addSuccess(self, test):
super(TreeReporter, self).addSuccess(test)
self.endLine('[OK]', self.SUCCESS)
def addError(self, *args):
super(TreeReporter, self).addError(*args)
self.endLine('[ERROR]', self.ERROR)
def addFailure(self, *args):
super(TreeReporter, self).addFailure(*args)
self.endLine('[FAIL]', self.FAILURE)
def addSkip(self, *args):
super(TreeReporter, self).addSkip(*args)
self.endLine('[SKIPPED]', self.SKIP)
def addExpectedFailure(self, *args):
super(TreeReporter, self).addExpectedFailure(*args)
self.endLine('[TODO]', self.TODO)
def addUnexpectedSuccess(self, *args):
super(TreeReporter, self).addUnexpectedSuccess(*args)
self.endLine('[SUCCESS!?!]', self.TODONE)
def _write(self, format, *args):
if args:
format = format % args
self.currentLine = format
super(TreeReporter, self)._write(self.currentLine)
def _getPreludeSegments(self, testID):
"""
Return a list of all non-leaf segments to display in the tree.
Normally this is the module and class name.
"""
segments = testID.split('.')[:-1]
if len(segments) == 0:
return segments
segments = [
seg for seg in ('.'.join(segments[:-1]), segments[-1])
if len(seg) > 0]
return segments
def _testPrelude(self, testID):
"""
Write the name of the test to the stream, indenting it appropriately.
If the test is the first test in a new 'branch' of the tree, also
write all of the parents in that branch.
"""
segments = self._getPreludeSegments(testID)
indentLevel = 0
for seg in segments:
if indentLevel < len(self._lastTest):
if seg != self._lastTest[indentLevel]:
self._write('%s%s\n' % (self.indent * indentLevel, seg))
else:
self._write('%s%s\n' % (self.indent * indentLevel, seg))
indentLevel += 1
self._lastTest = segments
def cleanupErrors(self, errs):
self._colorizer.write(' cleanup errors', self.ERROR)
self.endLine('[ERROR]', self.ERROR)
super(TreeReporter, self).cleanupErrors(errs)
def upDownError(self, method, error, warn, printStatus):
self._colorizer.write(" %s" % method, self.ERROR)
if printStatus:
self.endLine('[ERROR]', self.ERROR)
super(TreeReporter, self).upDownError(method, error, warn, printStatus)
def startTest(self, test):
"""
Called when C{test} starts. Writes the tests name to the stream using
a tree format.
"""
self._testPrelude(test.id())
self._write('%s%s ... ' % (self.indent * (len(self._lastTest)),
self.getDescription(test)))
super(TreeReporter, self).startTest(test)
def endLine(self, message, color):
"""
Print 'message' in the given color.
@param message: A string message, usually '[OK]' or something similar.
@param color: A string color, 'red', 'green' and so forth.
"""
spaces = ' ' * (self.columns - len(self.currentLine) - len(message))
super(TreeReporter, self)._write(spaces)
self._colorizer.write(message, color)
super(TreeReporter, self)._write("\n")
def _printSummary(self):
"""
Print a line summarising the test results to the stream, and color the
status result.
"""
summary = self._getSummary()
if self.wasSuccessful():
status = "PASSED"
color = self.SUCCESS
else:
status = "FAILED"
color = self.FAILURE
self._colorizer.write(status, color)
self._write("%s\n", summary)
| gpl-3.0 |
openilabs/falconlab | env/lib/python2.7/site-packages/Cython/Build/Tests/TestStripLiterals.py | 130 | 1552 | from Cython.Build.Dependencies import strip_string_literals
from Cython.TestUtils import CythonTest
class TestStripLiterals(CythonTest):
def t(self, before, expected):
actual, literals = strip_string_literals(before, prefix="_L")
self.assertEquals(expected, actual)
for key, value in literals.items():
actual = actual.replace(key, value)
self.assertEquals(before, actual)
def test_empty(self):
self.t("", "")
def test_single_quote(self):
self.t("'x'", "'_L1_'")
def test_double_quote(self):
self.t('"x"', '"_L1_"')
def test_nested_quotes(self):
self.t(""" '"' "'" """, """ '_L1_' "_L2_" """)
def test_triple_quote(self):
self.t(" '''a\n''' ", " '''_L1_''' ")
def test_backslash(self):
self.t(r"'a\'b'", "'_L1_'")
self.t(r"'a\\'", "'_L1_'")
self.t(r"'a\\\'b'", "'_L1_'")
def test_unicode(self):
self.t("u'abc'", "u'_L1_'")
def test_raw(self):
self.t(r"r'abc\\'", "r'_L1_'")
def test_raw_unicode(self):
self.t(r"ru'abc\\'", "ru'_L1_'")
def test_comment(self):
self.t("abc # foo", "abc #_L1_")
def test_comment_and_quote(self):
self.t("abc # 'x'", "abc #_L1_")
self.t("'abc#'", "'_L1_'")
def test_include(self):
self.t("include 'a.pxi' # something here",
"include '_L1_' #_L2_")
def test_extern(self):
self.t("cdef extern from 'a.h': # comment",
"cdef extern from '_L1_': #_L2_")
| mit |
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/multiprocessing/__init__.py | 5 | 7740 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| lgpl-3.0 |
Griffon26/scrumboardtracker | scrumboardtracker/processioprotocol.py | 2 | 1682 | # Copyright 2016 Maurice van der Pot <griffon26@kfk4ever.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from twisted.internet import error, protocol
class ProcessIOProtocol(protocol.ProcessProtocol):
def __init__(self, stdin, process_success_cb, process_failure_cb, print_stderr=False):
self.stdin = stdin
self.stdout = ''
self.stderr = ''
self.status = None
self.print_stderr = print_stderr
self.process_success_cb = process_success_cb
self.process_failure_cb = process_failure_cb
def connectionMade(self):
self.transport.write(self.stdin)
self.transport.closeStdin()
def outReceived(self, data):
self.stdout += data
def errReceived(self, data):
self.stderr += data
if self.print_stderr:
sys.stderr.write(data)
def processEnded(self, reason):
if isinstance(reason.value, error.ProcessDone):
self.process_success_cb(self.stdout, self.stderr)
else:
self.process_failure_cb(self.stdout, self.stderr)
| gpl-3.0 |
adzanette/scf-extractor | scf-extractor/reader/NILCLexiconIterator.py | 1 | 1264 |
from modules.Configuration import config
from models.scf import Sentence
from CorpusIterator import *
from FileCorpusIterator import FileCorpusIterator
import re
## Reads a NILC lexicon file
# @author Adriano Zanette
# @version 0.1
class Iterator(CorpusIterator):
## Class constructor
# @author Adriano Zanette
# @version 0.1
# @return Iterator
def __init__(self):
self.reVerbs = re.compile(r'^(?P<verb>.+)=<V\.\[(?P<subs>[A-Z.]+)\].+N\.\[(?P<preps>[^\]]*)\]', re.L)
self.id = 1
path = config.corpora.path
self.corpus = open(path)
## Create a new sentence
# @author Adriano Zanette
# @version 0.1
# @return Sentence
def makeSentence(self, raw, parsed):
sentence = Sentence()
sentence.id = self.id
sentence.raw = raw
sentence.parsed = parsed
self.id = self.id + 1
return sentence
## It gets the next sentence
# @author Adriano Zanette
# @version 0.1
# @return Sentence
def next(self):
line = self.corpus.readline()
if not line:
raise StopIteration
else:
isVerb = re.search(self.reVerbs, line)
if isVerb:
return self.makeSentence(line, line)
else:
return self.next()
| mit |
vkosuri/pyang | pyang/types.py | 1 | 29954 | """YANG built-in types"""
from .error import err_add
from . import util
from . import syntax
import base64
from xml.sax.saxutils import quoteattr
from xml.sax.saxutils import escape
try:
# python 2
from StringIO import StringIO
except ImportError:
# python 3
from io import StringIO
class Abort(Exception):
pass
class TypeSpec(object):
def __init__(self, name):
self.definition = ""
self.name = name
self.base = None
def str_to_val(self, errors, pos, str):
return str;
def validate(self, errors, pos, val, errstr=''):
return True;
def restrictions(self):
return []
class IntTypeSpec(TypeSpec):
def __init__(self, name, min, max):
TypeSpec.__init__(self, name)
self.min = min
self.max = max
def str_to_val(self, errors, pos, s):
try:
if s in ['min', 'max']:
return s
if syntax.re_integer.search(s) is None:
raise ValueError
return int(s, 0)
except ValueError:
err_add(errors, pos, 'TYPE_VALUE',
(s, self.definition, 'not an integer'))
return None
def validate(self, errors, pos, val, errstr = ''):
if val < self.min or val > self.max:
err_add(errors, pos, 'TYPE_VALUE',
(str(val), self.definition, 'range error' + errstr))
return False
else:
return True
def restrictions(self):
return ['range']
class Decimal64Value(object):
def __init__(self, value, s=None, fd=None):
# must set s (string repr) OR fd (fraction-digits)
self.value = value
self.s = s
if s == None and fd is not None:
s = str(value)
self.s = s[:-fd] + "." + s[-fd:]
def __str__(self):
return self.s
def __cmp__(self, other):
if not isinstance(other, Decimal64Value):
return -1
if self.value < other.value:
return -1;
elif self.value == other.value:
return 0;
else:
return 1
def __eq__(self, other):
if not isinstance(other, Decimal64Value):
return False
return self.value == other.value
def __ne__(self, other):
if not isinstance(other, Decimal64Value):
return True
return self.value != other.value
def __lt__(self, other):
if not isinstance(other, Decimal64Value):
return True
return self.value < other.value
def __le__(self, other):
if not isinstance(other, Decimal64Value):
return True
return self.value <= other.value
def __gt__(self, other):
if not isinstance(other, Decimal64Value):
return False
return self.value > other.value
def __ge__(self, other):
if not isinstance(other, Decimal64Value):
return False
return self.value >= other.value
class Decimal64TypeSpec(TypeSpec):
def __init__(self, fraction_digits):
TypeSpec.__init__(self, 'decimal64')
self.fraction_digits = int(fraction_digits.arg)
self.min = Decimal64Value(-9223372036854775808, fd=self.fraction_digits)
self.max = Decimal64Value(9223372036854775807, fd=self.fraction_digits)
def str_to_val(self, errors, pos, s0):
if s0 in ('min', 'max'):
return s0
# make sure it is syntactically correct
if syntax.re_decimal.search(s0) is None:
err_add(errors, pos, 'TYPE_VALUE',
(s0, self.definition, 'not a decimal'))
return None
if s0[0] == '-':
is_negative = True
s = s0[1:]
else:
is_negative = False
s = s0
p = s.find('.')
if p == -1:
v = int(s)
i = self.fraction_digits
while i > 0:
v = v * 10
i -= 1
else:
v = int(s[:p])
i = self.fraction_digits
j = p + 1
# slen = len(s.rstrip('0')) # ignore trailing zeroes
# No, do not ignore trailing zeroes!
slen = len(s)
while i > 0:
v *= 10
i -= 1
if j < slen:
v += int(s[j])
j += 1
if j < slen:
err_add(errors, pos, 'TYPE_VALUE',
(s, self.definition, 'too many fraction digits'))
return None
if is_negative:
v = -v
return Decimal64Value(v, s=s0)
def validate(self, errors, pos, val, errstr = ''):
if val < self.min or val > self.max:
err_add(errors, pos, 'TYPE_VALUE',
(str(val), self.definition, 'range error' + errstr))
return False
else:
return True
def restrictions(self):
return ['range']
class BooleanTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'boolean')
def str_to_val(self, errors, pos, str):
if str == 'true': return True;
elif str == 'false': return False
else:
err_add(errors, pos, 'TYPE_VALUE',
(str, self.definition, 'not a boolean'))
return None
class StringTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'string')
def restrictions(self):
return ['pattern', 'length']
class BinaryTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'binary')
def str_to_val(self, errors, pos, s):
try:
return base64.b64decode(s)
except:
err_add(errors, pos, 'TYPE_VALUE',
(s, '', 'bad base64 value'))
def restrictions(self):
return ['length']
class EmptyTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'empty')
def str_to_val(self, errors, pos, str):
err_add(errors, pos, 'BAD_DEFAULT_VALUE', 'empty')
return None
class IdentityrefTypeSpec(TypeSpec):
def __init__(self, idbases):
TypeSpec.__init__(self, 'identityref')
self.idbases = idbases
def str_to_val(self, errors, pos, s):
if s.find(":") == -1:
prefix = None
name = s
else:
[prefix, name] = s.split(':', 1)
if prefix is None or self.idbases[0].i_module.i_prefix == prefix:
# check local identities
pmodule = self.idbases[0].i_module
else:
# this is a prefixed name, check the imported modules
pmodule = util.prefix_to_module(self.idbases[0].i_module, prefix,
pos, errors)
if pmodule is None:
return None
if name not in pmodule.i_identities:
err_add(errors, pos, 'TYPE_VALUE',
(s, self.definition, 'identityref not found'))
return None
val = pmodule.i_identities[name]
for idbase in self.idbases:
my_identity = idbase.i_identity
if not is_derived_from(val, my_identity):
err_add(errors, pos, 'TYPE_VALUE',
(s, self.definition,
'identityref not derived from %s' % \
my_identity.arg))
return None
else:
return val
def is_derived_from(a, b):
if a == b:
# an identity is not derived from itself
return False
else:
return is_derived_from_or_self(a, b, [])
def is_derived_from_or_self(a, b, visited):
# return True if a is derived from b
if a == b:
return True
for p in a.search('base'):
val = p.i_identity
if val not in visited:
visited.append(val)
if is_derived_from_or_self(val, b, visited):
return True
return False
## type restrictions
def validate_range_expr(errors, stmt, type_):
# break the expression apart
def f(lostr, histr):
if histr == '':
# this means that a single number was in the range, e.g.
# "4 | 5..6".
return (type_.i_type_spec.str_to_val(errors, stmt.pos, lostr),
None)
return (type_.i_type_spec.str_to_val(errors, stmt.pos, lostr),
type_.i_type_spec.str_to_val(errors, stmt.pos, histr))
ranges = [f(m[1], m[6]) for m in syntax.re_range_part.findall(stmt.arg)]
return validate_ranges(errors, stmt.pos, ranges, type_)
def validate_ranges(errors, pos, ranges, type_):
# make sure the range values are of correct type and increasing
cur_lo = None
for (lo, hi) in ranges:
if lo != 'min' and lo != 'max' and lo != None:
type_.i_type_spec.validate(errors, pos, lo)
if hi != 'min' and hi != 'max' and hi != None:
type_.i_type_spec.validate(errors, pos, hi)
# check that cur_lo < lo < hi
if not is_smaller(cur_lo, lo):
err_add(errors, pos, 'RANGE_BOUNDS', (str(lo), cur_lo))
return None
if not is_smaller(lo, hi):
err_add(errors, pos, 'RANGE_BOUNDS', (str(hi), str(lo)))
return None
if hi == None:
cur_lo = lo
else:
cur_lo = hi
return (ranges, pos)
class RangeTypeSpec(TypeSpec):
def __init__(self, base, range_spec):
TypeSpec.__init__(self, base.name)
self.base = base
(ranges, ranges_pos) = range_spec
self.ranges = ranges
self.ranges_pos = ranges_pos
if ranges != []:
self.min = ranges[0][0]
if self.min == 'min':
self.min = base.min
self.max = ranges[-1][1]
if self.max == None: # single range
self.max = ranges[-1][0]
if self.max == 'max':
self.max = base.max
else:
self.min = base.min
self.max = base.max
if hasattr(base, 'fraction_digits'):
self.fraction_digits = base.fraction_digits
def str_to_val(self, errors, pos, str):
return self.base.str_to_val(errors, pos, str)
def validate(self, errors, pos, val, errstr=''):
if self.base.validate(errors, pos, val, errstr) == False:
return False
for (lo, hi) in self.ranges:
if ((lo == 'min' or lo == 'max' or val >= lo) and
((hi is None and val == lo) or hi == 'max' or \
(hi is not None and val <= hi))):
return True
err_add(errors, pos, 'TYPE_VALUE',
(str(val), self.definition, 'range error' + errstr +
' for range defined at ' + str(self.ranges_pos)))
return False
def restrictions(self):
return self.base.restrictions()
def validate_length_expr(errors, stmt):
def f(lostr, histr):
try:
if lostr in ['min', 'max']:
lo = lostr
else:
lo = int(lostr)
except ValueError:
err_add(errors, stmt.pos, 'TYPE_VALUE',
(lostr, '', 'not an integer'))
return (None, None)
try:
if histr == '':
# this means that a single number was in the length, e.g.
# "4 | 5..6".
return (lo, None)
if histr in ['min', 'max']:
hi = histr
else:
hi = int(histr)
except ValueError:
err_add(errors, stmt.pos, 'TYPE_VALUE',
(histr, '', 'not an integer'))
return None
return (lo, hi)
lengths = [f(m[1], m[3]) for m in syntax.re_length_part.findall(stmt.arg)]
# make sure the length values are of correct type and increasing
cur_lo = None
for (lo, hi) in lengths:
# check that cur_lo < lo < hi
if not is_smaller(cur_lo, lo):
err_add(errors, stmt.pos, 'LENGTH_BOUNDS', (str(lo), cur_lo))
return None
if not is_smaller(lo, hi):
err_add(errors, stmt.pos, 'LENGTH_BOUNDS', (str(hi), str(lo)))
return None
# FIXME: we should check that the lengths are just restrictions
# of any base type's lengths. Have to figure out some way to do
# that... currently we can't check just length values; we'd have
# to pass just the length integer to typespec.validate(). Or
# something...
if hi == None:
cur_lo = lo
else:
cur_lo = hi
if type(cur_lo) == type(0) and cur_lo > 18446744073709551615:
err_add(errors, stmt.pos, 'LENGTH_VALUE', str(cur_lo))
return None
return (lengths, stmt.pos)
class LengthTypeSpec(TypeSpec):
def __init__(self, base, length_spec):
TypeSpec.__init__(self, base.name)
self.base = base
(lengths, length_pos) = length_spec
self.lengths = lengths
self.length_pos = length_pos
def str_to_val(self, errors, pos, str):
return self.base.str_to_val(errors, pos, str)
def validate(self, errors, pos, val, errstr=''):
if self.base.validate(errors, pos, val, errstr) == False:
return False
vallen = len(val)
for (lo, hi) in self.lengths:
if ((lo == 'min' or vallen >= lo) and
((hi is None and vallen == lo) or hi == 'max' or
(hi is not None and vallen <= hi))):
return True
err_add(errors, pos, 'TYPE_VALUE',
(val, self.definition, 'length error' + errstr +
' for length defined at ' + str(self.length_pos)))
return False
def restrictions(self):
return self.base.restrictions()
def _validate_pattern_libxml2(errors, stmt, invert_match):
try:
import libxml2
try:
re = libxml2.regexpCompile(stmt.arg)
return ('libxml2', re, stmt.pos, invert_match)
except libxml2.treeError as v:
err_add(errors, stmt.pos, 'PATTERN_ERROR', str(v))
return None
except ImportError:
## Do not report a warning in this case. Maybe we should add some
## flag to turn on this warning...
# err_add(errors, stmt.pos, 'PATTERN_FAILURE',
# "Could not import python module libxml2 "
# "(see http://xmlsoft.org for installation help)")
return False
def _validate_pattern_lxml(errors, stmt, invert_match):
try:
import lxml.etree
doc = StringIO(
'<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema">' \
' <xsd:element name="a" type="x"/>' \
' <xsd:simpleType name="x">' \
' <xsd:restriction base="xsd:string">' \
' <xsd:pattern value=%s/>' \
' </xsd:restriction>' \
' </xsd:simpleType>' \
' </xsd:schema>' % quoteattr(stmt.arg))
try:
sch = lxml.etree.XMLSchema(lxml.etree.parse(doc))
return ('lxml', sch, stmt.pos, invert_match)
except lxml.etree.XMLSchemaParseError as v:
err_add(errors, stmt.pos, 'PATTERN_ERROR', str(v))
return None
except ImportError:
return False
def validate_pattern_expr(errors, stmt):
invert_match = False
if stmt.search_one('modifier', arg='invert-match') is not None:
invert_match = True
## check that it's syntactically correct
# First try with lxml
res = _validate_pattern_lxml(errors, stmt, invert_match)
if res is not False:
return res
# Then try with libxml2
res = _validate_pattern_libxml2(errors, stmt, invert_match)
if res is not False:
return res
# Otherwise we can't validate patterns :(
return None
class PatternTypeSpec(TypeSpec):
def __init__(self, base, pattern_specs):
TypeSpec.__init__(self, base.name)
self.base = base
self.res = pattern_specs
def str_to_val(self, errors, pos, str):
return self.base.str_to_val(errors, pos, str)
def validate(self, errors, pos, val, errstr=''):
if self.base.validate(errors, pos, val, errstr) == False:
return False
for (type_, re, re_pos, invert_match) in self.res:
if type_ == 'libxml2':
is_valid = re.regexpExec(val) == 1
elif type_ == 'lxml':
import lxml
doc = StringIO('<a>%s</a>' % escape(val))
is_valid = re.validate(lxml.etree.parse(doc))
if ((not is_valid and not invert_match) or
(is_valid and invert_match)):
err_add(errors, pos, 'TYPE_VALUE',
(val, self.definition, 'pattern mismatch' + errstr +
' for pattern defined at ' + str(re_pos)))
return False
return True
def restrictions(self):
return self.base.restrictions()
def validate_enums(errors, enums, stmt):
# make sure all names and values given are unique
names = {}
values = {}
next = 0
for e in enums:
# for derived enumerations, make sure the enum is defined
# in the base
stmt.i_type_spec.validate(errors, e.pos, e.arg)
e.i_value = None
value = e.search_one('value')
if value is not None:
try:
x = int(value.arg)
# for derived enumerations, make sure the value isn't changed
oldval = stmt.i_type_spec.get_value(e.arg)
if oldval is not None and oldval != x:
err_add(errors, value.pos, 'BAD_ENUM_VALUE',
(value.arg, oldval))
e.i_value = x
if x < -2147483648 or x > 2147483647:
raise ValueError
if x >= next:
next = x + 1
if x in values:
err_add(errors, value.pos, 'DUPLICATE_ENUM_VALUE',
(x, values[x]))
else:
values[x] = value.pos
except ValueError:
err_add(errors, value.pos, 'ENUM_VALUE', value.arg)
else:
# auto-assign a value
values[next] = e.pos
if next > 2147483647:
err_add(errors, e.pos, 'ENUM_VALUE', str(next))
e.i_value = next
next = next + 1
if e.arg in names:
err_add(errors, e.pos, 'DUPLICATE_ENUM_NAME', (e.arg, names[e.arg]))
else:
names[e.arg] = e.pos
# check status (here??)
return enums
class EnumerationTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'enumeration')
def get_value(self, val):
return None
def restrictions(self):
return ['enum']
class EnumTypeSpec(TypeSpec):
def __init__(self, base, enums):
TypeSpec.__init__(self, base.name)
self.base = base
self.enums = [(e.arg, e.i_value) for e in enums]
def validate(self, errors, pos, val, errstr = ''):
if util.keysearch(val, 0, self.enums) == None:
err_add(errors, pos, 'TYPE_VALUE',
(val, self.definition, 'enum not defined' + errstr))
return False
else:
return True
def get_value(self, val):
r = util.keysearch(val, 0, self.enums)
if r is not None:
return r[1]
else:
return None
def restrictions(self):
return self.base.restrictions()
def validate_bits(errors, bits, stmt):
# make sure all names and positions given are unique
names = {}
values = {}
next = 0
for b in bits:
# for derived bits, make sure the bit is defined
# in the base
stmt.i_type_spec.validate(errors, b.pos, [b.arg])
position = b.search_one('position')
if position is not None:
try:
x = int(position.arg)
# for derived bits, make sure the position isn't changed
oldpos = stmt.i_type_spec.get_position(b.arg)
if oldpos is not None and oldpos != x:
err_add(errors, position.pos, 'BAD_BIT_POSITION',
(position.arg, oldpos))
b.i_position = x
if x < 0 or x > 4294967295:
raise ValueError
if x >= next:
next = x + 1
if x in values:
err_add(errors, position.pos, 'DUPLICATE_BIT_POSITION',
(x, values[x]))
else:
values[x] = position.pos
except ValueError:
err_add(errors, position.pos, 'BIT_POSITION', position.arg)
else:
# auto-assign a value
values[next] = b.pos
b.i_position = next
next = next + 1
if b.arg in names:
err_add(errors, b.pos, 'DUPLICATE_BIT_NAME', (b.arg, names[b.arg]))
else:
names[b.arg] = b.pos
# check status (here??)
return bits
class BitsTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'bits')
def get_position(self, bit):
return None
def restrictions(self):
return ['bit']
class BitTypeSpec(TypeSpec):
def __init__(self, base, bits):
TypeSpec.__init__(self, base.name)
self.base = base
self.bits = [(b.arg, b.i_position) for b in bits]
def str_to_val(self, errors, pos, str):
return str.split()
def validate(self, errors, pos, val, errstr = ''):
for v in val:
if util.keysearch(v, 0, self.bits) == None:
err_add(errors, pos, 'TYPE_VALUE',
(v, self.definition, 'bit not defined' + errstr))
return False
return True
def get_position(self, bit):
r = util.keysearch(bit, 0, self.bits)
if r is not None:
return r[1]
else:
return None
def restrictions(self):
return self.base.restrictions()
def validate_path_expr(errors, path):
# FIXME: rewrite using the new xpath tokenizer
# PRE: s matches syntax.path_arg
# -type dn [identifier | ('predicate', identifier, up::int(), [identifier])]
# Ret: (up::int(),
# dn::dn(),
# derefup::int(),
# derefdn::dn())
def parse_keypath(s):
def parse_dot_dot(s):
up = 0
i = 0
while True:
if s[i] == '.' and s[i+1] == '.':
up = up + 1
i = i + 3 # skip the '/'
elif s[i] == '/':
i = i + 1 # skip the '/'
if up == 0: # absolute path
up = -1
break
elif s[i].isspace():
i = i + 1
else:
# s points to an identifier
break
return (up, s[i:])
def skip_space(s):
if len(s) == 0:
return s
i = 0
while s[i].isspace():
i = i + 1
return s[i:]
def parse_identifier(s):
m = syntax.re_keyword_start.match(s)
if m is None:
raise Abort
s = s[m.end():]
if m.group(2) is None:
# no prefix
return (m.group(3), s)
else:
prefix = m.group(2)
mod = util.prefix_to_module(path.i_module, prefix,
path.pos, errors)
if mod is not None:
return ((m.group(2), m.group(3)), s)
else:
raise Abort
def parse_key_predicate(s):
s = s[1:] # skip '['
s = skip_space(s)
(identifier, s) = parse_identifier(s)
s = skip_space(s)
s = s[1:] # skip '='
s = skip_space(s)
if s[:7] == 'current':
s = s[7:] # skip 'current'
s = skip_space(s)
s = s[1:] # skip '('
s = skip_space(s)
s = s[1:] # skip ')'
s = skip_space(s)
s = s[1:] # skip '/'
s = skip_space(s)
(up, s) = parse_dot_dot(s)
s = skip_space(s)
else:
up = -1
b = s.find(']') + 1
s = s[b:]
if len(s) > 0 and s[0] == '/':
s = s[1:] # skip '/'
dn = []
while len(s) > 0:
(xidentifier, s) = parse_identifier(s)
dn.append(xidentifier)
s = skip_space(s)
if len(s) == 0:
break
if s[0] == '/':
s = s[1:] # skip '/'
elif s[0] == ']':
s = s[1:] # skip ']'
break
return (('predicate', identifier, up, dn), s)
def parse_descendant(s):
dn = []
# all '..'s are now parsed
while len(s) > 0 and (not s[0].isspace()) and s[0] != ')':
(identifier, s) = parse_identifier(s)
dn.append(identifier)
s = skip_space(s)
if len(s) == 0:
break
while len(s) > 0 and s[0] == '[':
(pred, s) = parse_key_predicate(s)
dn.append(pred)
s = skip_space(s)
if len(s) > 0 and s[0] == '/':
s = s[1:] # skip '/'
return (dn, s)
derefup = 0
derefdn = None
if s.startswith('deref'):
s = s[5:] # skip 'deref'
s = skip_space(s)
s = s[1:] # skip '('
s = skip_space(s)
(derefup, s) = parse_dot_dot(s)
(derefdn, s) = parse_descendant(s)
s = skip_space(s)
s = s[1:] # skip ')'
s = skip_space(s)
s = s[1:] # skip '/'
(up, s) = parse_dot_dot(s)
(dn, s) = parse_descendant(s)
return (up, dn, derefup, derefdn)
try:
return parse_keypath(path.arg)
except Abort:
return None
class LeafrefTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'leafref')
self.require_instance = True
def restrictions(self):
return ['path', 'require-instance']
class InstanceIdentifierTypeSpec(TypeSpec):
def __init__(self):
TypeSpec.__init__(self, 'instance-identifier')
self.require_instance = True
def restrictions(self):
return ['require-instance']
class PathTypeSpec(TypeSpec):
def __init__(self, base, path_spec, path, pos):
TypeSpec.__init__(self, base.name)
self.require_instance = True
self.base = base
self.path_spec = path_spec
self.path_ = path
self.pos = pos
def str_to_val(self, errors, pos, str_):
if hasattr(self, 'i_target_node'):
return self.i_target_node.search_one('type').\
i_type_spec.str_to_val(errors, pos, str_)
else:
# if a default value is verified
return str_
def validate(self, errors, pos, val, errstr = ''):
if hasattr(self, 'i_target_node'):
return self.i_target_node.search_one('type').\
i_type_spec.validate(errors, pos, val)
else:
# if a default value is verified
return True
def restrictions(self):
return ['require-instance']
class UnionTypeSpec(TypeSpec):
def __init__(self, types):
TypeSpec.__init__(self, 'union')
# no base - no restrictions allowed
self.types = types
def str_to_val(self, errors, pos, str):
return str
def validate(self, errors, pos, str, errstr = ''):
# try to validate against each membertype
for t in self.types:
if t.i_type_spec != None:
val = t.i_type_spec.str_to_val([], pos, str)
if val != None:
if t.i_type_spec.validate([], pos, val):
return True;
err_add(errors, pos, 'TYPE_VALUE',
(str, self.definition, 'no member type matched' + errstr))
return False
yang_type_specs = \
{'int8':IntTypeSpec('int8', -128, 127),
'int16':IntTypeSpec('int16', -32768, 32767),
'int32':IntTypeSpec('int32', -2147483648, 2147483647),
'int64':IntTypeSpec('int64', -9223372036854775808, 9223372036854775807),
'uint8':IntTypeSpec('uint8', 0, 255),
'uint16':IntTypeSpec('uint16', 0, 65535),
'uint32':IntTypeSpec('uint32', 0, 4294967295),
'uint64':IntTypeSpec('uint64', 0, 18446744073709551615),
'decimal64':TypeSpec('decimal64'),
'string':StringTypeSpec(),
'boolean':BooleanTypeSpec(),
'enumeration':EnumerationTypeSpec(),
'bits':BitsTypeSpec(),
'binary':BinaryTypeSpec(),
'leafref':LeafrefTypeSpec(),
'identityref':TypeSpec('identityref'),
'instance-identifier':InstanceIdentifierTypeSpec(),
'empty':EmptyTypeSpec(),
'union':TypeSpec('union'),
}
def is_base_type(typename):
return typename in yang_type_specs
def is_smaller(lo, hi):
if lo == None:
return True;
if lo == 'min' and hi != 'min':
return True;
if lo == 'max' and hi != None:
return False
if hi == 'min':
return False
if hi == None:
return True
if hi == 'max':
return True
return lo < hi
| isc |
fiji-flo/servo | tests/wpt/web-platform-tests/tools/third_party/pytest/testing/test_pytester.py | 12 | 4256 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import pytest
import os
from _pytest.pytester import HookRecorder
from _pytest.config import PytestPluginManager
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED
def test_make_hook_recorder(testdir):
item = testdir.getitem("def test_func(): pass")
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
assert not recorder.getfailures()
pytest.xfail("internal reportrecorder tests need refactoring")
class rep(object):
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(report=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep(object):
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(report=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(report=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(report=rep)
pytest.raises(ValueError, "recorder.getfailures()")
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != pytest.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def make_holder():
class apiclass(object):
def pytest_xyz(self, arg):
"x"
def pytest_xyz_noarg(self):
"x"
apimod = type(os)('api')
def pytest_xyz(arg):
"x"
def pytest_xyz_noarg():
"x"
apimod.pytest_xyz = pytest_xyz
apimod.pytest_xyz_noarg = pytest_xyz_noarg
return apiclass, apimod
@pytest.mark.parametrize("holder", make_holder())
def test_hookrecorder_basic(holder):
pm = PytestPluginManager()
pm.addhooks(holder)
rec = HookRecorder(pm)
pm.hook.pytest_xyz(arg=123)
call = rec.popcall("pytest_xyz")
assert call.arg == 123
assert call._name == "pytest_xyz"
pytest.raises(pytest.fail.Exception, "rec.popcall('abc')")
pm.hook.pytest_xyz_noarg()
call = rec.popcall("pytest_xyz_noarg")
assert call._name == "pytest_xyz_noarg"
def test_makepyfile_unicode(testdir):
global unichr
try:
unichr(65)
except NameError:
unichr = chr
testdir.makepyfile(unichr(0xfffd))
def test_makepyfile_utf8(testdir):
"""Ensure makepyfile accepts utf-8 bytes as input (#2738)"""
utf8_contents = u"""
def setup_function(function):
mixed_encoding = u'São Paulo'
""".encode('utf-8')
p = testdir.makepyfile(utf8_contents)
assert u"mixed_encoding = u'São Paulo'".encode('utf-8') in p.read('rb')
def test_inline_run_clean_modules(testdir):
test_mod = testdir.makepyfile("def test_foo(): assert True")
result = testdir.inline_run(str(test_mod))
assert result.ret == EXIT_OK
# rewrite module, now test should fail if module was re-imported
test_mod.write("def test_foo(): assert False")
result2 = testdir.inline_run(str(test_mod))
assert result2.ret == EXIT_TESTSFAILED
def test_assert_outcomes_after_pytest_erro(testdir):
testdir.makepyfile("def test_foo(): assert True")
result = testdir.runpytest('--unexpected-argument')
with pytest.raises(ValueError, message="Pytest terminal report not found"):
result.assert_outcomes(passed=0)
| mpl-2.0 |
yohanko88/gem5-DC | src/python/m5/util/smartdict.py | 88 | 5995 | # Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
# The SmartDict class fixes a couple of issues with using the content
# of os.environ or similar dicts of strings as Python variables:
#
# 1) Undefined variables should return False rather than raising KeyError.
#
# 2) String values of 'False', '0', etc., should evaluate to False
# (not just the empty string).
#
# #1 is solved by overriding __getitem__, and #2 is solved by using a
# proxy class for values and overriding __nonzero__ on the proxy.
# Everything else is just to (a) make proxies behave like normal
# values otherwise, (b) make sure any dict operation returns a proxy
# rather than a normal value, and (c) coerce values written to the
# dict to be strings.
from convert import *
from attrdict import attrdict
class Variable(str):
"""Intelligent proxy class for SmartDict. Variable will use the
various convert functions to attempt to convert values to useable
types"""
def __int__(self):
return toInteger(str(self))
def __long__(self):
return toLong(str(self))
def __float__(self):
return toFloat(str(self))
def __nonzero__(self):
return toBool(str(self))
def convert(self, other):
t = type(other)
if t == bool:
return bool(self)
if t == int:
return int(self)
if t == long:
return long(self)
if t == float:
return float(self)
return str(self)
def __lt__(self, other):
return self.convert(other) < other
def __le__(self, other):
return self.convert(other) <= other
def __eq__(self, other):
return self.convert(other) == other
def __ne__(self, other):
return self.convert(other) != other
def __gt__(self, other):
return self.convert(other) > other
def __ge__(self, other):
return self.convert(other) >= other
def __add__(self, other):
return self.convert(other) + other
def __sub__(self, other):
return self.convert(other) - other
def __mul__(self, other):
return self.convert(other) * other
def __div__(self, other):
return self.convert(other) / other
def __truediv__(self, other):
return self.convert(other) / other
def __radd__(self, other):
return other + self.convert(other)
def __rsub__(self, other):
return other - self.convert(other)
def __rmul__(self, other):
return other * self.convert(other)
def __rdiv__(self, other):
return other / self.convert(other)
def __rtruediv__(self, other):
return other / self.convert(other)
class UndefinedVariable(object):
"""Placeholder class to represent undefined variables. Will
generally cause an exception whenever it is used, but evaluates to
zero for boolean truth testing such as in an if statement"""
def __nonzero__(self):
return False
class SmartDict(attrdict):
"""Dictionary class that holds strings, but intelligently converts
those strings to other types depending on their usage"""
def __getitem__(self, key):
"""returns a Variable proxy if the values exists in the database and
returns an UndefinedVariable otherwise"""
if key in self:
return Variable(dict.get(self, key))
else:
# Note that this does *not* change the contents of the dict,
# so that even after we call env['foo'] we still get a
# meaningful answer from "'foo' in env" (which
# calls dict.__contains__, which we do not override).
return UndefinedVariable()
def __setitem__(self, key, item):
"""intercept the setting of any variable so that we always
store strings in the dict"""
dict.__setitem__(self, key, str(item))
def values(self):
return [ Variable(v) for v in dict.values(self) ]
def itervalues(self):
for value in dict.itervalues(self):
yield Variable(value)
def items(self):
return [ (k, Variable(v)) for k,v in dict.items(self) ]
def iteritems(self):
for key,value in dict.iteritems(self):
yield key, Variable(value)
def get(self, key, default='False'):
return Variable(dict.get(self, key, str(default)))
def setdefault(self, key, default='False'):
return Variable(dict.setdefault(self, key, str(default)))
__all__ = [ 'SmartDict' ]
| bsd-3-clause |
medallia/aurora | src/main/python/apache/aurora/executor/common/health_checker.py | 5 | 16309 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import math
import os
import pwd
import threading
import time
import traceback
from mesos.interface.mesos_pb2 import TaskState
from pystachio import Environment, String
from twitter.common import log
from twitter.common.exceptions import ExceptionalThread
from twitter.common.metrics import LambdaGauge
from apache.aurora.common.health_check.http_signaler import HttpSignaler
from apache.aurora.common.health_check.shell import ShellHealthCheck
from apache.aurora.config.schema.base import MesosContext
from apache.thermos.common.process_util import wrap_with_mesos_containerizer
from apache.thermos.config.schema import ThermosContext
from .status_checker import StatusChecker, StatusCheckerProvider, StatusResult
from .task_info import mesos_task_instance_from_assigned_task, resolve_ports
HTTP_HEALTH_CHECK = 'http'
SHELL_HEALTH_CHECK = 'shell'
class ThreadedHealthChecker(ExceptionalThread):
"""Perform a health check to determine if a service is healthy or not
health_checker should be a callable returning a tuple of (boolean, reason), indicating
respectively the health of the service and the reason for its failure (or None if the service is
still healthy).
Health-check failures are ignored during the first `math.ceil(grace_period_secs/interval_secs)`
attempts. Status becomes `TASK_RUNNING` if `min_consecutive_successes` consecutive health
check successes are seen, within `math.ceil(grace_period_secs/interval_secs) +
min_consecutive_successes` attempts. (Converting time to attempts, accounts for slight
discrepancies in sleep intervals do not cost an attempt, and unceremoniously end performing
health checks and marking as unhealthy.)
"""
def __init__(self,
health_checker,
sandbox,
interval_secs,
grace_period_secs,
max_consecutive_failures,
min_consecutive_successes,
clock):
"""
:param health_checker: health checker to confirm service health
:type health_checker: function that returns (boolean, <string>)
:param sandbox: Sandbox of the task corresponding to this health check.
:type sandbox: DirectorySandbox
:param interval_secs: delay between checks
:type interval_secs: int
:param grace_period_secs: initial period during which failed health-checks are ignored
:type grace_period_secs: int
:param max_consecutive_failures: number of failures to allow before marking dead
:type max_consecutive_failures: int
:param min_consecutive_successes: number of successes needed before marking healthy
:type min_consecutive_successes: int
:param clock: time module available to be mocked for testing
:type clock: time module
"""
self.checker = health_checker
self.sandbox = sandbox
self.clock = clock
self.current_consecutive_failures = 0
self.current_consecutive_successes = 0
self.dead = threading.Event()
self.interval = interval_secs
self.max_consecutive_failures = max_consecutive_failures
self.min_consecutive_successes = min_consecutive_successes
self.snooze_file = None
self.snoozed = False
if self.sandbox and self.sandbox.exists():
self.snooze_file = os.path.join(self.sandbox.root, '.healthchecksnooze')
if grace_period_secs is not None:
self.grace_period_secs = grace_period_secs
else:
self.grace_period_secs = interval_secs * 2
self.attempts = 0
# Compute the number of attempts that can be fit into the grace_period_secs,
# to guarantee the number of health checks during the grace period.
# Relying on time might cause non-deterministic behavior since the
# health checks can be spaced apart by interval_secs + epsilon.
self.forgiving_attempts = math.ceil(self.grace_period_secs / float(self.interval))
# In the older version (without min_consecutive_successes) it is possible for a task
# to make limping progress where the health checks fail all the time but never breach
# the max_consecutive_failures limit and end up updated successfully.
# Also a task can survive failures during initial_interval_secs and an additional
# max_consecutive_failures and still update successfully.
# Although initial_interval_secs is supposed to count for the task warm up time, to be
# backward compatible add max_consecutive_failures to the max_attempts_to_running.
self.max_attempts_to_running = (self.forgiving_attempts
+ self.max_consecutive_failures
+ self.min_consecutive_successes)
self.running = False
self.healthy, self.reason = True, None
super(ThreadedHealthChecker, self).__init__()
self.daemon = True
def _perform_check_if_not_disabled(self):
if self.snooze_file and os.path.isfile(self.snooze_file):
self.snoozed = True
log.info("Health check snooze file found at %s. Health checks disabled.", self.snooze_file)
return True, None
self.snoozed = False
log.debug("Health checks enabled. Performing health check.")
try:
return self.checker()
except Exception as e:
log.error('Internal error in health check:')
log.error(traceback.format_exc())
return False, 'Internal health check error: %s' % e
def _maybe_update_health_check_count(self, is_healthy, reason):
if not is_healthy:
log.warning('Health check failure: %s' % reason)
if self.current_consecutive_successes > 0:
log.debug('Reset consecutive successes counter.')
self.current_consecutive_successes = 0
if self._should_ignore_failure():
return
if self._should_fail_fast():
log.warning('Not enough attempts left prove health, failing fast.')
self.healthy = False
self.reason = reason
self.current_consecutive_failures += 1
if self.current_consecutive_failures > self.max_consecutive_failures:
log.warning('Reached consecutive failure limit.')
self.healthy = False
self.reason = reason
else:
self.current_consecutive_successes += 1
if not self.running:
if self.current_consecutive_successes >= self.min_consecutive_successes:
log.info('Reached consecutive success limit.')
self.running = True
if self.current_consecutive_failures > 0:
log.debug('Reset consecutive failures counter.')
self.current_consecutive_failures = 0
def _should_fail_fast(self):
if not self.running:
attempts_remaining = self.max_attempts_to_running - self.attempts
successes_needed = self.min_consecutive_successes - self.current_consecutive_successes
if attempts_remaining > 1 and successes_needed > attempts_remaining:
return True
return False
def _should_ignore_failure(self):
if self.attempts <= self.forgiving_attempts:
log.warning('Ignoring failure of attempt: %s' % self.attempts)
return True
return False
def _should_enforce_deadline(self):
if not self.running:
if self.attempts > self.max_attempts_to_running:
return True
return False
def _do_health_check(self):
if self._should_enforce_deadline():
# This is needed otherwise it is possible to flap between
# successful health-checks and failed health-checks, never
# really satisfying the criteria for either healthy or unhealthy.
log.warning('Exhausted attempts before satisfying liveness criteria.')
self.healthy = False
self.reason = 'Not enough successful health checks in time.'
return self.healthy, self.reason
is_healthy, reason = self._perform_check_if_not_disabled()
if self.attempts <= self.max_attempts_to_running:
self.attempts += 1
self._maybe_update_health_check_count(is_healthy, reason)
return is_healthy, reason
def run(self):
log.debug('Health checker thread started.')
while not self.dead.is_set():
is_healthy, reason = self._do_health_check()
self.clock.sleep(self.interval)
def start(self):
ExceptionalThread.start(self)
def stop(self):
log.debug('Health checker thread stopped.')
self.dead.set()
class HealthChecker(StatusChecker):
"""Generic StatusChecker-conforming class which uses a thread for arbitrary periodic health checks
health_checker should be a callable returning a tuple of (boolean, reason), indicating
respectively the health of the service and the reason for its failure (or None if the service is
still healthy).
Exported metrics:
health_checker.consecutive_failures: Number of consecutive failures observed. Resets
to zero on successful health check.
health_checker.snoozed: Returns 1 if the health checker is snoozed, 0 if not.
health_checker.total_latency_secs: Total time waiting for the health checker to respond in
seconds. To get average latency, use health_checker.total_latency / health_checker.checks.
health_checker.checks: Total number of health checks performed.
"""
def __init__(self,
health_checker,
sandbox=None,
interval_secs=10,
grace_period_secs=None,
max_consecutive_failures=0,
min_consecutive_successes=1,
clock=time):
self._health_checks = 0
self._total_latency = 0
self._stats_lock = threading.Lock()
self._clock = clock
self.threaded_health_checker = ThreadedHealthChecker(
self._timing_wrapper(health_checker),
sandbox,
interval_secs,
grace_period_secs,
max_consecutive_failures,
min_consecutive_successes,
clock)
self.metrics.register(LambdaGauge('consecutive_failures',
lambda: self.threaded_health_checker.current_consecutive_failures))
self.metrics.register(LambdaGauge('snoozed', lambda: int(self.threaded_health_checker.snoozed)))
self.metrics.register(LambdaGauge('total_latency_secs', lambda: self._total_latency))
self.metrics.register(LambdaGauge('checks', lambda: self._health_checks))
def _timing_wrapper(self, closure):
"""A wrapper around the health check closure that times the health check duration."""
def wrapper(*args, **kw):
start = self._clock.time()
success, failure_reason = closure(*args, **kw)
stop = self._clock.time()
with self._stats_lock:
self._health_checks += 1
self._total_latency += stop - start
return (success, failure_reason)
return wrapper
@property
def status(self):
if self.threaded_health_checker.healthy:
if self.threaded_health_checker.running:
return StatusResult('Task is healthy.', TaskState.Value('TASK_RUNNING'))
else:
return StatusResult(None, TaskState.Value('TASK_STARTING'))
return StatusResult('Failed health check! %s' % self.threaded_health_checker.reason,
TaskState.Value('TASK_FAILED'))
def name(self):
return 'health_checker'
def start(self):
super(HealthChecker, self).start()
self.threaded_health_checker.start()
def stop(self):
self.threaded_health_checker.stop()
class NoopHealthChecker(StatusChecker):
"""
A health checker that will always report healthy status. This will be the
stand-in health checker when no health checker is configured. Since there is
no liveness requirement specified, the status is always `TASK_RUNNING`.
"""
def __init__(self):
self._status = StatusResult('No health-check defined, task is assumed healthy.',
TaskState.Value('TASK_RUNNING'))
@property
def status(self):
return self._status
class HealthCheckerProvider(StatusCheckerProvider):
def __init__(self, nosetuid_health_checks=False, mesos_containerizer_path=None):
self._nosetuid_health_checks = nosetuid_health_checks
self._mesos_containerizer_path = mesos_containerizer_path
@staticmethod
def interpolate_cmd(task, cmd):
"""
:param task: Assigned task passed from Mesos Agent
:param cmd: Command defined inside shell_command inside config.
:return: Interpolated cmd with filled in values, for example ports.
"""
thermos_namespace = ThermosContext(
task_id=task.taskId,
ports=task.assignedPorts)
mesos_namespace = MesosContext(instance=task.instanceId)
command = String(cmd) % Environment(
thermos=thermos_namespace,
mesos=mesos_namespace
)
return command.get()
def from_assigned_task(self, assigned_task, sandbox):
"""
:param assigned_task:
:param sandbox:
:return: Instance of a HealthChecker.
"""
mesos_task = mesos_task_instance_from_assigned_task(assigned_task)
health_check_config = mesos_task.health_check_config().get()
health_checker = health_check_config.get('health_checker', {})
timeout_secs = health_check_config.get('timeout_secs')
if SHELL_HEALTH_CHECK in health_checker:
shell_command = health_checker.get(SHELL_HEALTH_CHECK, {}).get('shell_command')
# Filling in variables e.g. thermos.ports[http] that could have been passed in as part of
# shell_command.
interpolated_command = HealthCheckerProvider.interpolate_cmd(
task=assigned_task,
cmd=shell_command)
# If we do not want the health check to execute as the user from the job's role
# --nosetuid-health-checks should be passed as an argument to the executor.
demote_to_job_role_user = None
if not self._nosetuid_health_checks and not sandbox.is_filesystem_image:
pw_entry = pwd.getpwnam(assigned_task.task.job.role)
def demote_to_job_role_user():
os.setgid(pw_entry.pw_gid)
os.setuid(pw_entry.pw_uid)
# If the task is executing in an isolated filesystem we'll want to wrap the health check
# command within a mesos-containerizer invocation so that it's executed within that
# filesystem.
if sandbox.is_filesystem_image:
health_check_user = (getpass.getuser() if self._nosetuid_health_checks
else assigned_task.task.job.role)
wrapped_cmd = wrap_with_mesos_containerizer(
interpolated_command,
health_check_user,
sandbox.container_root,
self._mesos_containerizer_path)
else:
wrapped_cmd = ['/bin/bash', '-c', interpolated_command]
shell_signaler = ShellHealthCheck(
raw_cmd=interpolated_command,
wrapped_cmd=wrapped_cmd,
preexec_fn=demote_to_job_role_user,
timeout_secs=timeout_secs)
a_health_checker = lambda: shell_signaler()
else:
portmap = resolve_ports(mesos_task, assigned_task.assignedPorts)
if 'health' not in portmap:
log.warning('No health-checks defined, will use a no-op health-checker.')
return NoopHealthChecker()
http_config = health_checker.get(HTTP_HEALTH_CHECK, {})
http_endpoint = http_config.get('endpoint')
http_expected_response = http_config.get('expected_response')
http_expected_response_code = http_config.get('expected_response_code')
http_signaler = HttpSignaler(
portmap['health'],
timeout_secs=timeout_secs)
a_health_checker = lambda: http_signaler(
endpoint=http_endpoint,
expected_response=http_expected_response,
expected_response_code=http_expected_response_code
)
health_checker = HealthChecker(
a_health_checker,
sandbox,
interval_secs=health_check_config.get('interval_secs'),
grace_period_secs=health_check_config.get('initial_interval_secs'),
max_consecutive_failures=health_check_config.get('max_consecutive_failures'),
min_consecutive_successes=health_check_config.get('min_consecutive_successes'))
return health_checker
| apache-2.0 |
umkay/zulip | zerver/tests/test_report.py | 5 | 4616 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any, Callable, Iterable, Tuple
from zerver.lib.test_helpers import (
ZulipTestCase,
)
from zerver.lib.utils import statsd
import mock
import ujson
def fix_params(raw_params):
# type: (Dict[str, Any]) -> Dict[str, str]
# A few of our few legacy endpoints need their
# individual parameters serialized as JSON.
return {k: ujson.dumps(v) for k, v in raw_params.items()}
class StatsMock(object):
def __init__(self, settings):
# type: (Callable) -> None
self.settings = settings
self.real_impl = statsd
self.func_calls = [] # type: List[Tuple[str, Iterable[Any]]]
def __getattr__(self, name):
# type: (str) -> Callable
def f(*args):
# type: (*Any) -> None
with self.settings(STATSD_HOST=''):
getattr(self.real_impl, name)(*args)
self.func_calls.append((name, args))
return f
class TestReport(ZulipTestCase):
def test_send_time(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
params = dict(
time=5,
received=6,
displayed=7,
locally_echoed='true',
rendered_content_disparity='true',
)
stats_mock = StatsMock(self.settings)
with mock.patch('zerver.views.report.statsd', wraps=stats_mock):
result = self.client_post("/json/report_send_time", params)
self.assert_json_success(result)
expected_calls = [
('timing', ('endtoend.send_time.zulip_com', 5)),
('timing', ('endtoend.receive_time.zulip_com', 6)),
('timing', ('endtoend.displayed_time.zulip_com', 7)),
('incr', ('locally_echoed',)),
('incr', ('render_disparity',)),
]
self.assertEqual(stats_mock.func_calls, expected_calls)
def test_narrow_time(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
params = dict(
initial_core=5,
initial_free=6,
network=7,
)
stats_mock = StatsMock(self.settings)
with mock.patch('zerver.views.report.statsd', wraps=stats_mock):
result = self.client_post("/json/report_narrow_time", params)
self.assert_json_success(result)
expected_calls = [
('timing', ('narrow.initial_core.zulip_com', 5)),
('timing', ('narrow.initial_free.zulip_com', 6)),
('timing', ('narrow.network.zulip_com', 7)),
]
self.assertEqual(stats_mock.func_calls, expected_calls)
def test_unnarrow_time(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
params = dict(
initial_core=5,
initial_free=6,
)
stats_mock = StatsMock(self.settings)
with mock.patch('zerver.views.report.statsd', wraps=stats_mock):
result = self.client_post("/json/report_unnarrow_time", params)
self.assert_json_success(result)
expected_calls = [
('timing', ('unnarrow.initial_core.zulip_com', 5)),
('timing', ('unnarrow.initial_free.zulip_com', 6)),
]
self.assertEqual(stats_mock.func_calls, expected_calls)
def test_report_error(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
params = fix_params(dict(
message='hello',
stacktrace='trace',
ui_message=True,
user_agent='agent',
href='href',
log='log',
more_info=dict(foo='bar'),
))
publish_mock = mock.patch('zerver.views.report.queue_json_publish')
subprocess_mock = mock.patch(
'zerver.views.report.subprocess.check_output',
side_effect=KeyError('foo')
)
with publish_mock as m, subprocess_mock:
result = self.client_post("/json/report_error", params)
self.assert_json_success(result)
report = m.call_args[0][1]['report']
for k in set(params) - set(['ui_message', 'more_info']):
self.assertEqual(report[k], params[k])
self.assertEqual(report['more_info'], dict(foo='bar'))
self.assertEqual(report['user_email'], email)
with self.settings(ERROR_REPORTING=False):
result = self.client_post("/json/report_error", params)
self.assert_json_success(result)
| apache-2.0 |
willingc/oh-mainline | vendor/packages/celery/celery/tests/test_events/test_events_state.py | 18 | 10779 | from __future__ import absolute_import
from time import time
from itertools import count
from celery import states
from celery.events import Event
from celery.events.state import State, Worker, Task, HEARTBEAT_EXPIRE
from celery.utils import uuid
from celery.tests.utils import unittest
class replay(object):
def __init__(self, state):
self.state = state
self.rewind()
self.setup()
def setup(self):
pass
def __iter__(self):
return self
def __next__(self):
try:
self.state.event(self.events[self.position()])
except IndexError:
raise StopIteration()
next = __next__
def rewind(self):
self.position = count(0).next
return self
def play(self):
for _ in self:
pass
class ev_worker_online_offline(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-offline", hostname="utest1"),
]
class ev_worker_heartbeats(replay):
def setup(self):
self.events = [
Event("worker-heartbeat", hostname="utest1",
timestamp=time() - HEARTBEAT_EXPIRE * 2),
Event("worker-heartbeat", hostname="utest1"),
]
class ev_task_states(replay):
def setup(self):
tid = self.tid = uuid()
self.events = [
Event("task-received", uuid=tid, name="task1",
args="(2, 2)", kwargs="{'foo': 'bar'}",
retries=0, eta=None, hostname="utest1"),
Event("task-started", uuid=tid, hostname="utest1"),
Event("task-revoked", uuid=tid, hostname="utest1"),
Event("task-retried", uuid=tid, exception="KeyError('bar')",
traceback="line 2 at main", hostname="utest1"),
Event("task-failed", uuid=tid, exception="KeyError('foo')",
traceback="line 1 at main", hostname="utest1"),
Event("task-succeeded", uuid=tid, result="4",
runtime=0.1234, hostname="utest1"),
]
class ev_snapshot(replay):
def setup(self):
self.events = [
Event("worker-online", hostname="utest1"),
Event("worker-online", hostname="utest2"),
Event("worker-online", hostname="utest3"),
]
for i in range(20):
worker = not i % 2 and "utest2" or "utest1"
type = not i % 2 and "task2" or "task1"
self.events.append(Event("task-received", name=type,
uuid=uuid(), hostname=worker))
class test_Worker(unittest.TestCase):
def test_survives_missing_timestamp(self):
worker = Worker(hostname="foo")
worker.on_heartbeat(timestamp=None)
self.assertEqual(worker.heartbeats, [])
def test_repr(self):
self.assertTrue(repr(Worker(hostname="foo")))
class test_Task(unittest.TestCase):
def test_info(self):
task = Task(uuid="abcdefg",
name="tasks.add",
args="(2, 2)",
kwargs="{}",
retries=2,
result=42,
eta=1,
runtime=0.0001,
expires=1,
exception=1,
received=time() - 10,
started=time() - 8,
succeeded=time())
self.assertEqual(sorted(list(task._info_fields)),
sorted(task.info().keys()))
self.assertEqual(sorted(list(task._info_fields + ("received", ))),
sorted(task.info(extra=("received", ))))
self.assertEqual(sorted(["args", "kwargs"]),
sorted(task.info(["args", "kwargs"]).keys()))
def test_ready(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_received(timestamp=time())
self.assertFalse(task.ready)
task.on_succeeded(timestamp=time())
self.assertTrue(task.ready)
def test_sent(self):
task = Task(uuid="abcdefg",
name="tasks.add")
task.on_sent(timestamp=time())
self.assertEqual(task.state, states.PENDING)
def test_merge(self):
task = Task()
task.on_failed(timestamp=time())
task.on_started(timestamp=time())
task.on_received(timestamp=time(), name="tasks.add", args=(2, 2))
self.assertEqual(task.state, states.FAILURE)
self.assertEqual(task.name, "tasks.add")
self.assertTupleEqual(task.args, (2, 2))
task.on_retried(timestamp=time())
self.assertEqual(task.state, states.RETRY)
def test_repr(self):
self.assertTrue(repr(Task(uuid="xxx", name="tasks.add")))
class test_State(unittest.TestCase):
def test_repr(self):
self.assertTrue(repr(State()))
def test_worker_online_offline(self):
r = ev_worker_online_offline(State())
r.next()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
r.play()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
def test_worker_heartbeat_expire(self):
r = ev_worker_heartbeats(State())
r.next()
self.assertFalse(r.state.alive_workers())
self.assertFalse(r.state.workers["utest1"].alive)
r.play()
self.assertTrue(r.state.alive_workers())
self.assertTrue(r.state.workers["utest1"].alive)
def test_task_states(self):
r = ev_task_states(State())
# RECEIVED
r.next()
self.assertTrue(r.tid in r.state.tasks)
task = r.state.tasks[r.tid]
self.assertEqual(task.state, states.RECEIVED)
self.assertTrue(task.received)
self.assertEqual(task.timestamp, task.received)
self.assertEqual(task.worker.hostname, "utest1")
# STARTED
r.next()
self.assertTrue(r.state.workers["utest1"].alive,
"any task event adds worker heartbeat")
self.assertEqual(task.state, states.STARTED)
self.assertTrue(task.started)
self.assertEqual(task.timestamp, task.started)
self.assertEqual(task.worker.hostname, "utest1")
# REVOKED
r.next()
self.assertEqual(task.state, states.REVOKED)
self.assertTrue(task.revoked)
self.assertEqual(task.timestamp, task.revoked)
self.assertEqual(task.worker.hostname, "utest1")
# RETRY
r.next()
self.assertEqual(task.state, states.RETRY)
self.assertTrue(task.retried)
self.assertEqual(task.timestamp, task.retried)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('bar')")
self.assertEqual(task.traceback, "line 2 at main")
# FAILURE
r.next()
self.assertEqual(task.state, states.FAILURE)
self.assertTrue(task.failed)
self.assertEqual(task.timestamp, task.failed)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.exception, "KeyError('foo')")
self.assertEqual(task.traceback, "line 1 at main")
# SUCCESS
r.next()
self.assertEqual(task.state, states.SUCCESS)
self.assertTrue(task.succeeded)
self.assertEqual(task.timestamp, task.succeeded)
self.assertEqual(task.worker.hostname, "utest1")
self.assertEqual(task.result, "4")
self.assertEqual(task.runtime, 0.1234)
def assertStateEmpty(self, state):
self.assertFalse(state.tasks)
self.assertFalse(state.workers)
self.assertFalse(state.event_count)
self.assertFalse(state.task_count)
def assertState(self, state):
self.assertTrue(state.tasks)
self.assertTrue(state.workers)
self.assertTrue(state.event_count)
self.assertTrue(state.task_count)
def test_freeze_while(self):
s = State()
r = ev_snapshot(s)
r.play()
def work():
pass
s.freeze_while(work, clear_after=True)
self.assertFalse(s.event_count)
s2 = State()
r = ev_snapshot(s2)
r.play()
s2.freeze_while(work, clear_after=False)
self.assertTrue(s2.event_count)
def test_clear_tasks(self):
s = State()
r = ev_snapshot(s)
r.play()
self.assertTrue(s.tasks)
s.clear_tasks(ready=False)
self.assertFalse(s.tasks)
def test_clear(self):
r = ev_snapshot(State())
r.play()
self.assertTrue(r.state.event_count)
self.assertTrue(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertTrue(r.state.task_count)
r.state.clear()
self.assertFalse(r.state.event_count)
self.assertFalse(r.state.workers)
self.assertTrue(r.state.tasks)
self.assertFalse(r.state.task_count)
r.state.clear(False)
self.assertFalse(r.state.tasks)
def test_task_types(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(sorted(r.state.task_types()), ["task1", "task2"])
def test_tasks_by_timestamp(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_timestamp()), 20)
def test_tasks_by_type(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_type("task1")), 10)
self.assertEqual(len(r.state.tasks_by_type("task2")), 10)
def test_alive_workers(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.alive_workers()), 3)
def test_tasks_by_worker(self):
r = ev_snapshot(State())
r.play()
self.assertEqual(len(r.state.tasks_by_worker("utest1")), 10)
self.assertEqual(len(r.state.tasks_by_worker("utest2")), 10)
def test_survives_unknown_worker_event(self):
s = State()
s.worker_event("worker-unknown-event-xxx", {"foo": "bar"})
s.worker_event("worker-unknown-event-xxx", {"hostname": "xxx",
"foo": "bar"})
def test_survives_unknown_task_event(self):
s = State()
s.task_event("task-unknown-event-xxx", {"foo": "bar",
"uuid": "x",
"hostname": "y"})
def test_callback(self):
scratch = {}
def callback(state, event):
scratch["recv"] = True
s = State(callback=callback)
s.event({"type": "worker-online"})
self.assertTrue(scratch.get("recv"))
| agpl-3.0 |
andmos/ansible | lib/ansible/modules/cloud/cloudstack/cs_host.py | 13 | 18375 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_host
short_description: Manages hosts on Apache CloudStack based clouds.
description:
- Create, update and remove hosts.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the host.
required: true
aliases: [ 'ip_address' ]
url:
description:
- Url of the host used to create a host.
- If not provided, C(http://) and param C(name) is used as url.
- Only considered if C(state=present) and host does not yet exist.
username:
description:
- Username for the host.
- Required if C(state=present) and host does not yet exist.
password:
description:
- Password for the host.
- Required if C(state=present) and host does not yet exist.
pod:
description:
- Name of the pod.
- Required if C(state=present) and host does not yet exist.
cluster:
description:
- Name of the cluster.
hypervisor:
description:
- Name of the cluster.
- Required if C(state=present) and host does not yet exist.
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM', 'Simulator' ]
allocation_state:
description:
- Allocation state of the host.
choices: [ 'enabled', 'disabled' ]
host_tags:
description:
- Tags of the host.
aliases: [ host_tag ]
state:
description:
- State of the host.
default: 'present'
choices: [ 'present', 'absent' ]
zone:
description:
- Name of the zone in which the host should be deployed.
- If not set, default zone is used.
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure a host is present but disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
cluster: vcenter.example.com/ch-zrh-ix/pod01-cluster01
pod: pod01
zone: ch-zrh-ix-01
hypervisor: VMware
allocation_state: disabled
host_tags:
- perf
- gpu
- name: Ensure an existing host is disabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: disabled
- name: Ensure an existing host is enabled
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
allocation_state: enabled
- name: Ensure a host is absent
local_action:
module: cs_host
name: ix-pod01-esx01.example.com
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
capabilities:
description: Capabilities of the host.
returned: success
type: str
sample: hvm
cluster:
description: Cluster of the host.
returned: success
type: str
sample: vcenter.example.com/zone/cluster01
cluster_type:
description: Type of the cluster of the host.
returned: success
type: str
sample: ExternalManaged
cpu_allocated:
description: Amount in percent of the host's CPU currently allocated.
returned: success
type: str
sample: 166.25%
cpu_number:
description: Number of CPUs of the host.
returned: success
type: str
sample: 24
cpu_sockets:
description: Number of CPU sockets of the host.
returned: success
type: int
sample: 2
cpu_speed:
description: CPU speed in Mhz
returned: success
type: int
sample: 1999
cpu_used:
description: Amount of the host's CPU currently used.
returned: success
type: str
sample: 33.6%
cpu_with_overprovisioning:
description: Amount of the host's CPU after applying the cpu.overprovisioning.factor.
returned: success
type: str
sample: 959520.0
created:
description: Date when the host was created.
returned: success
type: str
sample: 2015-05-03T15:05:51+0200
disconnected:
description: Date when the host was disconnected.
returned: success
type: str
sample: 2015-05-03T15:05:51+0200
disk_size_allocated:
description: Host's currently allocated disk size.
returned: success
type: int
sample: 2593
disk_size_total:
description: Total disk size of the host
returned: success
type: int
sample: 259300
events:
description: Events available for the host
returned: success
type: str
sample: "Ping; HostDown; AgentConnected; AgentDisconnected; PingTimeout; ShutdownRequested; Remove; StartAgentRebalance; ManagementServerDown"
ha_host:
description: Whether the host is a HA host.
returned: success
type: bool
sample: false
has_enough_capacity:
description: Whether the host has enough CPU and RAM capacity to migrate a VM to it.
returned: success
type: bool
sample: true
host_tags:
description: Comma-separated list of tags for the host.
returned: success
type: str
sample: "perf"
hypervisor:
description: Host's hypervisor.
returned: success
type: str
sample: VMware
hypervisor_version:
description: Hypervisor version.
returned: success
type: str
sample: 5.1
ip_address:
description: IP address of the host
returned: success
type: str
sample: 10.10.10.1
is_local_storage_active:
description: Whether the local storage is available or not.
returned: success
type: bool
sample: false
last_pinged:
description: Date and time the host was last pinged.
returned: success
type: str
sample: "1970-01-17T17:27:32+0100"
management_server_id:
description: Management server ID of the host.
returned: success
type: int
sample: 345050593418
memory_allocated:
description: Amount of the host's memory currently allocated.
returned: success
type: int
sample: 69793218560
memory_total:
description: Total of memory of the host.
returned: success
type: int
sample: 206085263360
memory_used:
description: Amount of the host's memory currently used.
returned: success
type: int
sample: 65504776192
name:
description: Name of the host.
returned: success
type: str
sample: esx32.example.com
network_kbs_read:
description: Incoming network traffic on the host.
returned: success
type: int
sample: 0
network_kbs_write:
description: Outgoing network traffic on the host.
returned: success
type: int
sample: 0
os_category:
description: OS category name of the host.
returned: success
type: str
sample: ...
out_of_band_management:
description: Host out-of-band management information.
returned: success
type: str
sample: ...
pod:
description: Pod name of the host.
returned: success
type: str
sample: Pod01
removed:
description: Date and time the host was removed.
returned: success
type: str
sample: "1970-01-17T17:27:32+0100"
resource_state:
description: Resource state of the host.
returned: success
type: str
sample: Enabled
allocation_state::
description: Allocation state of the host.
returned: success
type: str
sample: enabled
state:
description: State of the host.
returned: success
type: str
sample: Up
suitable_for_migration:
description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
to it or not.
returned: success
type: str
sample: true
host_type:
description: Type of the host.
returned: success
type: str
sample: Routing
host_version:
description: Version of the host.
returned: success
type: str
sample: 4.5.2
gpu_group:
description: GPU cards present in the host.
returned: success
type: list
sample: []
zone:
description: Zone of the host.
returned: success
type: str
sample: zone01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
CS_HYPERVISORS
)
import time
class AnsibleCloudStackHost(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackHost, self).__init__(module)
self.returns = {
'averageload': 'average_load',
'capabilities': 'capabilities',
'clustername': 'cluster',
'clustertype': 'cluster_type',
'cpuallocated': 'cpu_allocated',
'cpunumber': 'cpu_number',
'cpusockets': 'cpu_sockets',
'cpuspeed': 'cpu_speed',
'cpuused': 'cpu_used',
'cpuwithoverprovisioning': 'cpu_with_overprovisioning',
'disconnected': 'disconnected',
'details': 'details',
'disksizeallocated': 'disk_size_allocated',
'disksizetotal': 'disk_size_total',
'events': 'events',
'hahost': 'ha_host',
'hasenoughcapacity': 'has_enough_capacity',
'hypervisor': 'hypervisor',
'hypervisorversion': 'hypervisor_version',
'ipaddress': 'ip_address',
'islocalstorageactive': 'is_local_storage_active',
'lastpinged': 'last_pinged',
'managementserverid': 'management_server_id',
'memoryallocated': 'memory_allocated',
'memorytotal': 'memory_total',
'memoryused': 'memory_used',
'networkkbsread': 'network_kbs_read',
'networkkbswrite': 'network_kbs_write',
'oscategoryname': 'os_category',
'outofbandmanagement': 'out_of_band_management',
'podname': 'pod',
'removed': 'removed',
'resourcestate': 'resource_state',
'suitableformigration': 'suitable_for_migration',
'type': 'host_type',
'version': 'host_version',
'gpugroup': 'gpu_group',
}
# States only usable by the updateHost API
self.allocation_states_for_update = {
'enabled': 'Enable',
'disabled': 'Disable',
}
self.host = None
def get_pod(self, key=None):
pod_name = self.module.params.get('pod')
if not pod_name:
return None
args = {
'name': pod_name,
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found" % pod_name)
def get_cluster(self, key=None):
cluster_name = self.module.params.get('cluster')
if not cluster_name:
return None
args = {
'name': cluster_name,
'zoneid': self.get_zone(key='id'),
}
clusters = self.query_api('listClusters', **args)
if clusters:
return self._get_by_key(key, clusters['cluster'][0])
self.module.fail_json(msg="Cluster %s not found" % cluster_name)
def get_host_tags(self):
host_tags = self.module.params.get('host_tags')
if host_tags is None:
return None
return ','.join(host_tags)
def get_host(self, refresh=False):
if self.host is not None and not refresh:
return self.host
name = self.module.params.get('name')
args = {
'zoneid': self.get_zone(key='id'),
'fetch_list': True,
}
res = self.query_api('listHosts', **args)
if res:
for h in res:
if name in [h['ipaddress'], h['name']]:
self.host = h
return self.host
def _handle_allocation_state(self, host):
allocation_state = self.module.params.get('allocation_state')
if not allocation_state:
return host
host = self._set_host_allocation_state(host)
# In case host in maintenance and target is maintenance
if host['allocationstate'].lower() == allocation_state and allocation_state == 'maintenance':
return host
# Cancel maintenance if target state is enabled/disabled
elif allocation_state in list(self.allocation_states_for_update.keys()):
host = self.disable_maintenance(host)
host = self._update_host(host, self.allocation_states_for_update[allocation_state])
# Only an enabled host can put in maintenance
elif allocation_state == 'maintenance':
host = self._update_host(host, 'Enable')
host = self.enable_maintenance(host)
return host
def _set_host_allocation_state(self, host):
if host is None:
host['allocationstate'] = 'Enable'
# Set host allocationstate to be disabled/enabled
elif host['resourcestate'].lower() in list(self.allocation_states_for_update.keys()):
host['allocationstate'] = self.allocation_states_for_update[host['resourcestate'].lower()]
else:
host['allocationstate'] = host['resourcestate']
return host
def present_host(self):
host = self.get_host()
if not host:
host = self._create_host(host)
else:
host = self._update_host(host)
if host:
host = self._handle_allocation_state(host)
return host
def _get_url(self):
url = self.module.params.get('url')
if url:
return url
else:
return "http://%s" % self.module.params.get('name')
def _create_host(self, host):
required_params = [
'password',
'username',
'hypervisor',
'pod',
]
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
args = {
'hypervisor': self.module.params.get('hypervisor'),
'url': self._get_url(),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
'podid': self.get_pod(key='id'),
'zoneid': self.get_zone(key='id'),
'clusterid': self.get_cluster(key='id'),
'hosttags': self.get_host_tags(),
}
if not self.module.check_mode:
host = self.query_api('addHost', **args)
host = host['host'][0]
return host
def _update_host(self, host, allocation_state=None):
args = {
'id': host['id'],
'hosttags': self.get_host_tags(),
'allocationstate': allocation_state,
}
if allocation_state is not None:
host = self._set_host_allocation_state(host)
if self.has_changed(args, host):
self.result['changed'] = True
if not self.module.check_mode:
host = self.query_api('updateHost', **args)
host = host['host']
return host
def absent_host(self):
host = self.get_host()
if host:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.enable_maintenance(host)
if res:
res = self.query_api('deleteHost', **args)
return host
def enable_maintenance(self, host):
if host['resourcestate'] not in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('prepareHostForMaintenance', **args)
self.poll_job(res, 'host')
host = self._poll_for_maintenance()
return host
def disable_maintenance(self, host):
if host['resourcestate'] in ['PrepareForMaintenance', 'Maintenance']:
self.result['changed'] = True
args = {
'id': host['id'],
}
if not self.module.check_mode:
res = self.query_api('cancelHostMaintenance', **args)
host = self.poll_job(res, 'host')
return host
def _poll_for_maintenance(self):
for i in range(0, 300):
time.sleep(2)
host = self.get_host(refresh=True)
if not host:
return None
elif host['resourcestate'] != 'PrepareForMaintenance':
return host
self.fail_json(msg="Polling for maintenance timed out")
def get_result(self, host):
super(AnsibleCloudStackHost, self).get_result(host)
if host:
self.result['allocation_state'] = host['resourcestate'].lower()
self.result['host_tags'] = host['hosttags'].split(',') if host.get('hosttags') else []
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['ip_address']),
url=dict(),
password=dict(no_log=True),
username=dict(),
hypervisor=dict(choices=CS_HYPERVISORS),
allocation_state=dict(choices=['enabled', 'disabled', 'maintenance']),
pod=dict(),
cluster=dict(),
host_tags=dict(type='list', aliases=['host_tag']),
zone=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_host = AnsibleCloudStackHost(module)
state = module.params.get('state')
if state == 'absent':
host = acs_host.absent_host()
else:
host = acs_host.present_host()
result = acs_host.get_result(host)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
loftytopping/UManSysProp_public | umansysprop/examples/Pure_component_properties_example_petroc.py | 1 | 11211 | ##########################################################################################
# #
# Example file that loads in SMILES strings and then calculates pure component #
# properties for a given temperature #
# #
# #
# Copyright (C) 2016 David Topping : david.topping@manchester.ac.uk #
# : davetopp80@gmail.com #
# Personal website: davetoppingsci.com #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published #
# by the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# #
##########################################################################################
# This file demonstrates how to call existing routines in UManSysProp to extract pure
# component properties. Please note this file is not designed to be run directly from
# this sub-directory of the main distribution. It is deisgned to illustrate how one might
# call the seperate methods according to your UManSysProp location.
#
# The conversion from uploaded SMILES to a Pybel object is also different from the mechanism
# used in the files in the 'tools' directory. Those files are designed to work with
# forms on the current website. The principle is the same, we simply call Pybel directly.
#
# Current properties calculated:
# - Pure component vapour pressure
# - Boiling point
# - Subcooled liquid density
# - Molecular weight and chemical formula
#
# Last modification 21/12/16
import glob
import pybel
import collections
import sys
# Here you will need to put the relevant path in for your UManSysProp distribution. Mine is
# given as an example - change this!
sys.path.append('/Users/mbdxkps3/Dropbox/Python/UManSysProp/UManSysProp_public/')
from umansysprop import boiling_points
from umansysprop import vapour_pressures
from umansysprop import critical_properties
from umansysprop import liquid_densities
import pdb
##########################################################################################
# 1. Read in the property data, seperating only .prop files from the rest.
# I have defined the .prop files myself, just for ease of use for this project.
# It is just a textfile with compound name/reference as first column, SMILES as second.
onlyfiles = [f for f in glob.glob('*.prop')]
#1) extract the data from each file and start generating a dictionary of Pybel objects
step=0
filenames=[]
Compound_reference=[]
smiles_array=[]
property_array=[]
Pybel_object_dict=dict()
for filename in onlyfiles: # If you have more than one file, for whatever reason
SMILES_flag=0
filenames.append(filename[:])
text=open(filename[:],'rU')
for line in text:
input = line.split()
# Keep a list of the information
Compound_reference.append(input[0])
smiles_array.append(input[1])
# Now create Pybel objects which are used in all property predictive techniquexs
Pybel_object=pybel.readstring('smi',input[1])
Pybel_object_dict[input[1]]=Pybel_object
##########################################################################################
# 2) Create a dictionary of properties based on these Pybel objects
# NOTE: For some of the vapour pressure values, you need to perform a boiling point estimation first
# It is therefore wise to do this initially
# 2a) Boiling points [(K)]
boiling_point_dict=collections.defaultdict(lambda: collections.defaultdict())
for smiles in smiles_array:
boiling_point_dict[smiles]['joback_and_reid']=boiling_points.joback_and_reid(Pybel_object_dict[smiles])
boiling_point_dict[smiles]['stein_and_brown']=boiling_points.stein_and_brown(Pybel_object_dict[smiles])
boiling_point_dict[smiles]['nannoolal']=boiling_points.nannoolal(Pybel_object_dict[smiles])
# 2b) Vapour pressures [log10 (atm) at a specific temperature]
# For those vapour pressure methods that require a boiling point, we have 3D dictionaries
vapour_pressure_dict_BP=collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict()))
vapour_pressure_dict=collections.defaultdict(lambda: collections.defaultdict())
temperature=298.15
for smiles in smiles_array:
vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Nannoolal']=vapour_pressures.nannoolal(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['nannoolal'])
vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Stein_Brown']=vapour_pressures.nannoolal(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['stein_and_brown'])
vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Joback_Reid']=vapour_pressures.nannoolal(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['joback_and_reid'])
vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Nannoolal']=vapour_pressures.myrdal_and_yalkowsky(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['nannoolal'])
vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Stein_Brown']=vapour_pressures.myrdal_and_yalkowsky(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['stein_and_brown'])
vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Joback_Reid']=vapour_pressures.myrdal_and_yalkowsky(Pybel_object_dict[smiles], temperature, boiling_point_dict[smiles]['joback_and_reid'])
#pdb.set_trace()
vapour_pressure_dict[smiles]['EVAP']=vapour_pressures.evaporation(Pybel_object_dict[smiles], temperature)
vapour_pressure_dict[smiles]['EVAP2']=vapour_pressures.evaporation2(Pybel_object_dict[smiles], temperature)
vapour_pressure_dict[smiles]['SIMPOL']=vapour_pressures.simpol(Pybel_object_dict[smiles], temperature)
# 2c) Subooled liquid density [(g/cc) at a specific temperature]
# Some require a pre-calculated critical properties and thus boiling points which are fed into the function directly
density_CP=collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict()))
density=collections.defaultdict(lambda: collections.defaultdict())
for smiles in smiles_array:
compound=Pybel_object_dict[smiles] #Using this simply to try and keep line extensions short as possible
boiling_point=boiling_point_dict[smiles]['nannoolal'] #Selecting this as an example
density[smiles]['Girolami']=liquid_densities.girolami(compound)
density_CP[smiles]['Schroeder']['CP_Nannoolal']=liquid_densities.schroeder(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
density_CP[smiles]['Schroeder']['CP_Joback_Reid']=liquid_densities.schroeder(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
density_CP[smiles]['Le_Bas']['CP_Nannoolal']=liquid_densities.le_bas(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
density_CP[smiles]['Le_Bas']['CP_Joback_Reid']=liquid_densities.le_bas(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
density_CP[smiles]['Tyn_Calus']['CP_Nannoolal']=liquid_densities.tyn_and_calus(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
density_CP[smiles]['Tyn_Calus']['CP_Joback_Reid']=liquid_densities.tyn_and_calus(compound, temperature, critical_properties.nannoolal(compound, boiling_point))
# 2d) Pybel object properties. By creating a pybel object you can extract a range of markers/features of the molecule
# In Python, these features of the Pybel object class can be found by typing dir(inset name of your pybel object here)
# For example:
general_dict=collections.defaultdict(lambda: collections.defaultdict())
for smiles in smiles_array:
general_dict[smiles]['Mw']=Pybel_object_dict[smiles].molwt
general_dict[smiles]['Formula']=Pybel_object_dict[smiles].formula
##########################################################################################
# 3) Example output
# The debugging stop point below allows you to interogate some derived values.
# For example, in the command line type:
# density_CP[smiles_array[4]]['Le_Bas']['CP_Nannoolal']
# vapour_pressure_dict_BP[smiles_array[4]]['VP_Myrdal_Yalkowsky']['BP_Nannoolal']
# To extract the density of the 5th compound in the smiles_array according to:
# - The method of Le_Bas using critical properties by Nannoolal
# and to extract the vapour pressure of the 5th compound in the smiles_array according to:
# - The method of Myrdal and Yalkowsky, using the boiling point estimation by Nannoolal
#
# To finish the program, simply type 'c' to continue. The following code then saves some
# of the vapour pressure estimations to a file.
file_name = open('testing.txt','w')
file_name.write(
str('SMILES')+'\t'+str('VP_Nannoolal_BP_Nannoolal')+'\t'+str('VP_Nannoolal_BP_Stein_Brown')+'\t'+str('VP_Nannoolal_BP_Joback_Reid')+'\t'+str('VP_Myrdal_Yalkowsky_BP_Nannoolal')+
'\t'+str('VP_Myrdal_Yalkowsky_BP_Stein_Brown')+'\t'+str('VP_Myrdal_Yalkowsky_BP_Joback_Reid')+'\t'+str('EVAP')+'\t'+str('EVAP2')+'\t'+str('SIMPOL'))
for smiles in smiles_array:
file_name.write('\n')
file_name.write(
str(smiles)+'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Nannoolal'])+'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Stein_Brown'])+
'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Nannoolal']['BP_Joback_Reid'])+'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Nannoolal'])+
'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Stein_Brown'])+'\t'+str(vapour_pressure_dict_BP[smiles]['VP_Myrdal_Yalkowsky']['BP_Joback_Reid'])+
'\t'+str(vapour_pressure_dict[smiles]['EVAP'])+'\t'+str(vapour_pressure_dict[smiles]['EVAP2'])+'\t'+str(vapour_pressure_dict[smiles]['SIMPOL'])
)
file_name.close()
| gpl-3.0 |
cheral/orange3 | Orange/tests/test_manifold.py | 4 | 5219 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from Orange.projection import (MDS, Isomap, LocallyLinearEmbedding,
SpectralEmbedding, TSNE)
from Orange.distance import Euclidean
from Orange.data import Table
class TestManifold(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ionosphere = Table('ionosphere')
cls.iris = Table('iris')
def test_mds(self):
data = self.ionosphere[:50]
for i in range(1, 4):
self.__mds_test_helper(data, n_com=i)
def __mds_test_helper(self, data, n_com):
mds_fit = MDS(
n_components=n_com, dissimilarity=Euclidean, random_state=0)
mds_fit = mds_fit(data)
mds_odist = MDS(
n_components=n_com, dissimilarity='precomputed', random_state=0)
mds_odist = mds_odist(Euclidean(data))
mds_sdist = MDS(
n_components=n_com, dissimilarity='euclidean', random_state=0)
mds_sdist = mds_sdist(data)
eshape = data.X.shape[0], n_com
self.assertTrue(np.allclose(mds_fit.embedding_, mds_odist.embedding_))
self.assertTrue(np.allclose(mds_fit.embedding_, mds_sdist.embedding_))
self.assertEqual(eshape, mds_fit.embedding_.shape)
self.assertEqual(eshape, mds_odist.embedding_.shape)
self.assertEqual(eshape, mds_sdist.embedding_.shape)
def test_mds_pca_init(self):
result = np.array([-2.6928912, 0.32603512])
projector = MDS(
n_components=2, dissimilarity=Euclidean, init_type='PCA',
n_init=1)
X = projector(self.iris).embedding_
np.testing.assert_array_almost_equal(X[0], result)
projector = MDS(
n_components=2, dissimilarity='precomputed', init_type='PCA',
n_init=1)
X = projector(Euclidean(self.iris)).embedding_
np.testing.assert_array_almost_equal(X[0], result)
projector = MDS(
n_components=2, dissimilarity='euclidean', init_type='PCA',
n_init=1)
X = projector(self.iris).embedding_
np.testing.assert_array_almost_equal(X[0], result)
projector = MDS(
n_components=6, dissimilarity='euclidean', init_type='PCA',
n_init=1)
X = projector(self.iris[:5]).embedding_
result = np.array([-0.31871, -0.064644, 0.015653, -1.5e-08, -4.3e-11, 0])
np.testing.assert_array_almost_equal(X[0], result)
def test_isomap(self):
for i in range(1, 4):
self.__isomap_test_helper(self.ionosphere, n_com=i)
def __isomap_test_helper(self, data, n_com):
isomap_fit = Isomap(n_neighbors=5, n_components=n_com)
isomap_fit = isomap_fit(data)
eshape = data.X.shape[0], n_com
self.assertEqual(eshape, isomap_fit.embedding_.shape)
def test_lle(self):
for i in range(1, 4):
self.__lle_test_helper(self.ionosphere, n_com=i)
def __lle_test_helper(self, data, n_com):
lle = LocallyLinearEmbedding(n_neighbors=5, n_components=n_com)
lle = lle(data)
ltsa = LocallyLinearEmbedding(n_neighbors=5, n_components=n_com,
method="ltsa",
eigen_solver="dense")
ltsa = ltsa(data)
hessian = LocallyLinearEmbedding(n_neighbors=15, n_components=n_com,
method="hessian",
eigen_solver="dense")
hessian = hessian(data)
modified = LocallyLinearEmbedding(n_neighbors=5, n_components=n_com,
method="modified",
eigen_solver="dense")
modified = modified(data)
self.assertEqual((data.X.shape[0], n_com), lle.embedding_.shape)
self.assertEqual((data.X.shape[0], n_com), ltsa.embedding_.shape)
self.assertEqual((data.X.shape[0], n_com), hessian.embedding_.shape)
self.assertEqual((data.X.shape[0], n_com), modified.embedding_.shape)
def test_se(self):
for i in range(1, 4):
self.__se_test_helper(self.ionosphere, n_com=i)
def __se_test_helper(self, data, n_com):
se = SpectralEmbedding(n_components=n_com, n_neighbors=5)
se = se(data)
self.assertEqual((data.X.shape[0], n_com), se.embedding_.shape)
def test_tsne(self):
data = self.ionosphere[:50]
for i in range(1, 4):
self.__tsne_test_helper(data, n_com=i)
def __tsne_test_helper(self, data, n_com):
tsne_def = TSNE(n_components=n_com, metric='euclidean')
tsne_def = tsne_def(data)
tsne_euc = TSNE(n_components=n_com, metric=Euclidean)
tsne_euc = tsne_euc(data)
tsne_pre = TSNE(n_components=n_com, metric='precomputed')
tsne_pre = tsne_pre(Euclidean(data))
self.assertEqual((data.X.shape[0], n_com), tsne_def.embedding_.shape)
self.assertEqual((data.X.shape[0], n_com), tsne_euc.embedding_.shape)
self.assertEqual((data.X.shape[0], n_com), tsne_pre.embedding_.shape)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.