gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""A more or less complete user-defined wrapper around dictionary objects."""
class UserDict(object):
def __init__(*args, **kwargs):
if not args:
raise TypeError("descriptor '__init__' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is "
"deprecated", PendingDeprecationWarning,
stacklevel=2)
else:
dict = None
self.data = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
def __repr__(self): return repr(self.data)
def __cmp__(self, dict):
if isinstance(dict, UserDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
__hash__ = None # Avoid Py3k warning
def __len__(self): return len(self.data)
def __getitem__(self, key):
if key in self.data:
return self.data[key]
if hasattr(self.__class__, "__missing__"):
return self.__class__.__missing__(self, key)
raise KeyError(key)
def __setitem__(self, key, item): self.data[key] = item
def __delitem__(self, key): del self.data[key]
def clear(self): self.data.clear()
def copy(self):
if self.__class__ is UserDict:
return UserDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
def keys(self): return self.data.keys()
def items(self): return self.data.items()
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self): return self.data.values()
def has_key(self, key): return key in self.data
def update(*args, **kwargs):
if not args:
raise TypeError("descriptor 'update' of 'UserDict' object "
"needs an argument")
self = args[0]
args = args[1:]
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
if args:
dict = args[0]
elif 'dict' in kwargs:
dict = kwargs.pop('dict')
import warnings
warnings.warn("Passing 'dict' as keyword argument is deprecated",
PendingDeprecationWarning, stacklevel=2)
else:
dict = None
if dict is None:
pass
elif isinstance(dict, UserDict):
self.data.update(dict.data)
elif isinstance(dict, type({})) or not hasattr(dict, 'items'):
self.data.update(dict)
else:
for k, v in dict.items():
self[k] = v
if len(kwargs):
self.data.update(kwargs)
def get(self, key, failobj=None):
if key not in self:
return failobj
return self[key]
def setdefault(self, key, failobj=None):
if key not in self:
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __contains__(self, key):
return key in self.data
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
# TODO: Make this a decorator once they're implemented.
fromkeys = classmethod(fromkeys)
class IterableUserDict(UserDict):
def __iter__(self):
return iter(self.data)
import _abcoll
_abcoll.MutableMapping.register(IterableUserDict)
class DictMixin(object):
# Mixin defining all dictionary methods for classes that already have
# a minimum dictionary interface including getitem, setitem, delitem,
# and keys. Without knowledge of the subclass constructor, the mixin
# does not define __init__() or copy(). In addition to the four base
# methods, progressively more efficiency comes with defining
# __contains__(), __iter__(), and iteritems().
# second level definitions support higher levels
def __iter__(self):
for k in self.keys():
yield k
def has_key(self, key):
try:
self[key]
except KeyError:
return False
return True
def __contains__(self, key):
return self.has_key(key)
# third level takes advantage of second level definitions
def iteritems(self):
for k in self:
yield (k, self[k])
def iterkeys(self):
return self.__iter__()
# fourth level uses definitions from lower levels
def itervalues(self):
for _, v in self.iteritems():
yield v
def values(self):
return [v for _, v in self.iteritems()]
def items(self):
return list(self.iteritems())
def clear(self):
for key in self.keys():
del self[key]
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError, "pop expected at most 2 arguments, got "\
+ repr(1 + len(args))
try:
value = self[key]
except KeyError:
if args:
return args[0]
raise
del self[key]
return value
def popitem(self):
try:
k, v = self.iteritems().next()
except StopIteration:
raise KeyError, 'container is empty'
del self[k]
return (k, v)
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'): # iteritems saves memory and lookups
for k, v in other.iteritems():
self[k] = v
elif hasattr(other, 'keys'):
for k in other.keys():
self[k] = other[k]
else:
for k, v in other:
self[k] = v
if kwargs:
self.update(kwargs)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __repr__(self):
return repr(dict(self.iteritems()))
def __cmp__(self, other):
if other is None:
return 1
if isinstance(other, DictMixin):
other = dict(other.iteritems())
return cmp(dict(self.iteritems()), other)
def __len__(self):
return len(self.keys())
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import get_storage_class
from .utils.loader import load_object
from .utils.recursive_dictionary import RecursiveDictionaryWithExcludes
logger = logging.getLogger(__name__)
# FILER_IMAGE_MODEL setting is used to swap Image model.
# If such global setting does not exist, it will be created at this point (with default model name).
# This is needed especially when using this setting in migrations.
if not hasattr(settings, 'FILER_IMAGE_MODEL'):
setattr(settings, 'FILER_IMAGE_MODEL', 'filer.Image')
FILER_IMAGE_MODEL = settings.FILER_IMAGE_MODEL
FILER_DEBUG = getattr(settings, 'FILER_DEBUG', False) # When True makes
FILER_SUBJECT_LOCATION_IMAGE_DEBUG = getattr(settings, 'FILER_SUBJECT_LOCATION_IMAGE_DEBUG', False)
FILER_WHITESPACE_COLOR = getattr(settings, 'FILER_WHITESPACE_COLOR', '#FFFFFF')
FILER_0_8_COMPATIBILITY_MODE = getattr(settings, 'FILER_0_8_COMPATIBILITY_MODE', False)
FILER_ENABLE_LOGGING = getattr(settings, 'FILER_ENABLE_LOGGING', False)
if FILER_ENABLE_LOGGING:
FILER_ENABLE_LOGGING = (
FILER_ENABLE_LOGGING and (getattr(settings, 'LOGGING') and
('' in settings.LOGGING['loggers'] or
'filer' in settings.LOGGING['loggers'])))
FILER_ENABLE_PERMISSIONS = getattr(settings, 'FILER_ENABLE_PERMISSIONS', False)
FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS = getattr(settings, 'FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS', False)
FILER_IS_PUBLIC_DEFAULT = getattr(settings, 'FILER_IS_PUBLIC_DEFAULT', True)
FILER_PAGINATE_BY = getattr(settings, 'FILER_PAGINATE_BY', 20)
_ICON_SIZES = getattr(settings, 'FILER_ADMIN_ICON_SIZES', ('16', '32', '48', '64'))
if not _ICON_SIZES:
raise ImproperlyConfigured('Please, configure FILER_ADMIN_ICON_SIZES')
# Reliably sort by integer value, but keep icon size as string.
# (There is some code in the wild that depends on this being strings.)
FILER_ADMIN_ICON_SIZES = [str(i) for i in sorted([int(s) for s in _ICON_SIZES])]
# Filer admin templates have specific icon sizes hardcoded: 32 and 48.
_ESSENTIAL_ICON_SIZES = ('32', '48')
if not all(x in FILER_ADMIN_ICON_SIZES for x in _ESSENTIAL_ICON_SIZES):
logger.warn(
"FILER_ADMIN_ICON_SIZES has not all of the essential icon sizes "
"listed: {}. Some icons might be missing in admin templates.".format(
_ESSENTIAL_ICON_SIZES))
# This is an ordered iterable that describes a list of
# classes that I should check for when adding files
FILER_FILE_MODELS = getattr(
settings, 'FILER_FILE_MODELS',
(FILER_IMAGE_MODEL, 'filer.File'))
DEFAULT_FILE_STORAGE = getattr(settings, 'DEFAULT_FILE_STORAGE', 'django.core.files.storage.FileSystemStorage')
MINIMAL_FILER_STORAGES = {
'public': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
}
},
'private': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
},
},
}
DEFAULT_FILER_STORAGES = {
'public': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': 'filer_public',
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_public_thumbnails',
},
},
},
'private': {
'main': {
'ENGINE': 'filer.storage.PrivateFileSystemStorage',
'OPTIONS': {
'location': os.path.abspath(os.path.join(settings.MEDIA_ROOT, '../smedia/filer_private')),
'base_url': '/smedia/filer_private/',
},
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': '',
},
'thumbnails': {
'ENGINE': 'filer.storage.PrivateFileSystemStorage',
'OPTIONS': {
'location': os.path.abspath(os.path.join(settings.MEDIA_ROOT, '../smedia/filer_private_thumbnails')),
'base_url': '/smedia/filer_private_thumbnails/',
},
'THUMBNAIL_OPTIONS': {},
},
},
}
MINIMAL_FILER_SERVERS = {
'private': {
'main': {
'ENGINE': None,
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': None,
'OPTIONS': {},
},
},
}
DEFAULT_FILER_SERVERS = {
'private': {
'main': {
'ENGINE': 'filer.server.backends.default.DefaultServer',
'OPTIONS': {},
},
'thumbnails': {
'ENGINE': 'filer.server.backends.default.DefaultServer',
'OPTIONS': {},
},
},
}
FILER_STORAGES = RecursiveDictionaryWithExcludes(MINIMAL_FILER_STORAGES, rec_excluded_keys=('OPTIONS', 'THUMBNAIL_OPTIONS'))
if FILER_0_8_COMPATIBILITY_MODE:
user_filer_storages = {
'public': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': getattr(settings, 'FILER_PUBLICMEDIA_PREFIX', 'filer_public'),
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_public_thumbnails',
},
},
},
'private': {
'main': {
'ENGINE': DEFAULT_FILE_STORAGE,
'UPLOAD_TO': 'filer.utils.generate_filename.randomized',
'UPLOAD_TO_PREFIX': getattr(settings, 'FILER_PRIVATEMEDIA_PREFIX', 'filer_private'),
},
'thumbnails': {
'ENGINE': DEFAULT_FILE_STORAGE,
'OPTIONS': {},
'THUMBNAIL_OPTIONS': {
'base_dir': 'filer_private_thumbnails',
},
},
},
}
else:
user_filer_storages = getattr(settings, 'FILER_STORAGES', {})
FILER_STORAGES.rec_update(user_filer_storages)
def update_storage_settings(user_settings, defaults, s, t):
if not user_settings[s][t]['ENGINE']:
user_settings[s][t]['ENGINE'] = defaults[s][t]['ENGINE']
user_settings[s][t]['OPTIONS'] = defaults[s][t]['OPTIONS']
if t == 'main':
if 'UPLOAD_TO' not in user_settings[s][t]:
user_settings[s][t]['UPLOAD_TO'] = defaults[s][t]['UPLOAD_TO']
if 'UPLOAD_TO_PREFIX' not in user_settings[s][t]:
user_settings[s][t]['UPLOAD_TO_PREFIX'] = defaults[s][t]['UPLOAD_TO_PREFIX']
if t == 'thumbnails':
if 'THUMBNAIL_OPTIONS' not in user_settings[s][t]:
user_settings[s][t]['THUMBNAIL_OPTIONS'] = defaults[s][t]['THUMBNAIL_OPTIONS']
return user_settings
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'public', 'main')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'public', 'thumbnails')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'private', 'main')
update_storage_settings(FILER_STORAGES, DEFAULT_FILER_STORAGES, 'private', 'thumbnails')
FILER_SERVERS = RecursiveDictionaryWithExcludes(MINIMAL_FILER_SERVERS, rec_excluded_keys=('OPTIONS',))
FILER_SERVERS.rec_update(getattr(settings, 'FILER_SERVERS', {}))
def update_server_settings(settings, defaults, s, t):
if not settings[s][t]['ENGINE']:
settings[s][t]['ENGINE'] = defaults[s][t]['ENGINE']
settings[s][t]['OPTIONS'] = defaults[s][t]['OPTIONS']
return settings
update_server_settings(FILER_SERVERS, DEFAULT_FILER_SERVERS, 'private', 'main')
update_server_settings(FILER_SERVERS, DEFAULT_FILER_SERVERS, 'private', 'thumbnails')
# Public media (media accessible without any permission checks)
FILER_PUBLICMEDIA_STORAGE = get_storage_class(FILER_STORAGES['public']['main']['ENGINE'])(**FILER_STORAGES['public']['main']['OPTIONS'])
FILER_PUBLICMEDIA_UPLOAD_TO = load_object(FILER_STORAGES['public']['main']['UPLOAD_TO'])
if 'UPLOAD_TO_PREFIX' in FILER_STORAGES['public']['main']:
FILER_PUBLICMEDIA_UPLOAD_TO = load_object('filer.utils.generate_filename.prefixed_factory')(FILER_PUBLICMEDIA_UPLOAD_TO, FILER_STORAGES['public']['main']['UPLOAD_TO_PREFIX'])
FILER_PUBLICMEDIA_THUMBNAIL_STORAGE = get_storage_class(FILER_STORAGES['public']['thumbnails']['ENGINE'])(**FILER_STORAGES['public']['thumbnails']['OPTIONS'])
FILER_PUBLICMEDIA_THUMBNAIL_OPTIONS = FILER_STORAGES['public']['thumbnails']['THUMBNAIL_OPTIONS']
# Private media (media accessible through permissions checks)
FILER_PRIVATEMEDIA_STORAGE = get_storage_class(FILER_STORAGES['private']['main']['ENGINE'])(**FILER_STORAGES['private']['main']['OPTIONS'])
FILER_PRIVATEMEDIA_UPLOAD_TO = load_object(FILER_STORAGES['private']['main']['UPLOAD_TO'])
if 'UPLOAD_TO_PREFIX' in FILER_STORAGES['private']['main']:
FILER_PRIVATEMEDIA_UPLOAD_TO = load_object('filer.utils.generate_filename.prefixed_factory')(FILER_PRIVATEMEDIA_UPLOAD_TO, FILER_STORAGES['private']['main']['UPLOAD_TO_PREFIX'])
FILER_PRIVATEMEDIA_THUMBNAIL_STORAGE = get_storage_class(FILER_STORAGES['private']['thumbnails']['ENGINE'])(**FILER_STORAGES['private']['thumbnails']['OPTIONS'])
FILER_PRIVATEMEDIA_THUMBNAIL_OPTIONS = FILER_STORAGES['private']['thumbnails']['THUMBNAIL_OPTIONS']
FILER_PRIVATEMEDIA_SERVER = load_object(FILER_SERVERS['private']['main']['ENGINE'])(**FILER_SERVERS['private']['main']['OPTIONS'])
FILER_PRIVATEMEDIA_THUMBNAIL_SERVER = load_object(FILER_SERVERS['private']['thumbnails']['ENGINE'])(**FILER_SERVERS['private']['thumbnails']['OPTIONS'])
# By default limit number of simultaneous uploads if we are using SQLite
if settings.DATABASES['default']['ENGINE'].endswith('sqlite3'):
_uploader_connections = 1
else:
_uploader_connections = 3
FILER_UPLOADER_CONNECTIONS = getattr(
settings, 'FILER_UPLOADER_CONNECTIONS', _uploader_connections)
FILER_DUMP_PAYLOAD = getattr(settings, 'FILER_DUMP_PAYLOAD', False) # Whether the filer shall dump the files payload
FILER_CANONICAL_URL = getattr(settings, 'FILER_CANONICAL_URL', 'canonical/')
| |
# import_export_vote_smart/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import retrieve_and_match_candidate_from_vote_smart, \
retrieve_vote_smart_candidates_into_local_db, \
retrieve_vote_smart_candidate_bio_into_local_db, \
retrieve_vote_smart_position_categories_into_local_db, \
retrieve_vote_smart_officials_into_local_db, retrieve_and_save_vote_smart_states, \
retrieve_vote_smart_ratings_for_candidate_into_local_db, retrieve_vote_smart_ratings_by_group_into_local_db, \
retrieve_vote_smart_special_interest_group_into_local_db, \
retrieve_vote_smart_special_interest_groups_into_local_db, \
transfer_vote_smart_special_interest_groups_to_we_vote_organizations, \
transfer_vote_smart_ratings_to_positions_for_candidate, transfer_vote_smart_ratings_to_positions_for_politician
from .models import VoteSmartCandidate, VoteSmartCategory, VoteSmartRating, VoteSmartRatingOneCandidate, \
VoteSmartSpecialInterestGroup, VoteSmartState
from .votesmart_local import VotesmartApiError
from admin_tools.views import redirect_to_sign_in_page
from candidate.models import CandidateCampaignManager, CandidateCampaign
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.messages import get_messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from exception.models import print_to_log
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, STATE_CODE_MAP
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def import_one_candidate_ratings_view(request, vote_smart_candidate_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# retrieve_vote_smart_ratings_for_candidate_into_local_db can be used for both We Vote candidate or politician
one_group_results = retrieve_vote_smart_ratings_for_candidate_into_local_db(vote_smart_candidate_id)
if one_group_results['success']:
messages.add_message(request, messages.INFO, "Ratings for one candidate retrieved. ")
else:
messages.add_message(request, messages.ERROR, "Ratings for one candidate NOT retrieved. "
"(error: {error_message})"
"".format(error_message=one_group_results['status']))
candidate_manager = CandidateCampaignManager()
results = candidate_manager.retrieve_candidate_campaign_from_vote_smart_id(vote_smart_candidate_id)
if results['candidate_campaign_found']:
candidate = results['candidate_campaign']
candidate_campaign_id = candidate.id
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_campaign_id,)))
else:
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()))
@login_required
def import_one_politician_ratings_view(request, vote_smart_candidate_id): # TODO DALE update to politician
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# retrieve_vote_smart_ratings_for_candidate_into_local_db can be used for both We Vote candidate or politician
one_group_results = retrieve_vote_smart_ratings_for_candidate_into_local_db(vote_smart_candidate_id)
if one_group_results['success']:
messages.add_message(request, messages.INFO, "Ratings for one candidate retrieved. ")
else:
messages.add_message(request, messages.ERROR, "Ratings for one candidate NOT retrieved. "
"(error: {error_message})"
"".format(error_message=one_group_results['status']))
candidate_manager = CandidateCampaignManager()
results = candidate_manager.retrieve_candidate_campaign_from_vote_smart_id(vote_smart_candidate_id)
if results['candidate_campaign_found']:
candidate = results['candidate_campaign']
candidate_campaign_id = candidate.id
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_campaign_id,)))
else:
return HttpResponseRedirect(reverse('candidate:candidate_list', args=()))
@login_required
def import_group_ratings_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# state_code = request.GET.get('state_code', 'NA') # Default to national
# category_id = request.GET.get('category_id', 0)
# Retrieve each group so we can request the ratings for each group
get_sig_group_count = 0
get_sig_error_message_count = 0
special_interest_group_list = VoteSmartSpecialInterestGroup.objects.order_by('name')
for one_group in special_interest_group_list:
special_interest_group_id = one_group.sigId
one_group_results = retrieve_vote_smart_ratings_by_group_into_local_db(special_interest_group_id)
if not one_group_results['success']:
print_to_log(logger=logger, exception_message_optional=one_group_results['status'])
get_sig_error_message_count += 1
else:
get_sig_group_count += 1
messages.add_message(request, messages.INFO, "Ratings from {get_sig_group_count} "
"Special Interest Groups retrieved. "
"(errors: {get_sig_error_message_count})"
"".format(get_sig_group_count=get_sig_group_count,
get_sig_error_message_count=get_sig_error_message_count))
return HttpResponseRedirect(reverse('import_export_vote_smart:vote_smart_rating_list', args=()))
@login_required
def import_one_group_ratings_view(request, special_interest_group_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
one_group_results = retrieve_vote_smart_ratings_by_group_into_local_db(special_interest_group_id)
if one_group_results['success']:
messages.add_message(request, messages.INFO, "Ratings from Special Interest Group retrieved. ")
else:
messages.add_message(request, messages.ERROR, "Ratings from Special Interest Group NOT retrieved. "
"(error: {error_message})"
"".format(error_message=one_group_results['status']))
return HttpResponseRedirect(reverse('import_export_vote_smart:special_interest_group_rating_list',
args=(special_interest_group_id,)))
@login_required
def import_states_view(request):
"""
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
retrieve_and_save_vote_smart_states()
template_values = {
'state_list': VoteSmartState.objects.order_by('name'),
}
return render(request, 'import_export_vote_smart/vote_smart_import.html', template_values)
@login_required
def import_photo_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# NOTE: This view is for testing purposes. For the operational "Import Vote Smart Images" view, see:
# "candidate_retrieve_photos_view" in candidate/views_admin.py
last_name = "Trump"
results = retrieve_vote_smart_candidates_into_local_db(last_name)
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.INFO, "Photo retrieved.")
# Now we can go on to make sure we have the right VoteSmartCandidate
vote_smart_candidate_id = 15723
# ...and then retrieve the photo
results = retrieve_vote_smart_candidate_bio_into_local_db(vote_smart_candidate_id)
last_name = "Pelosi"
results = retrieve_vote_smart_officials_into_local_db(last_name)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'import_export_vote_smart/vote_smart_import.html', template_values)
@login_required
def import_special_interest_groups_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
# state_code = request.GET.get('state_code', 'NA') # Default to national
# category_id = request.GET.get('category_id', 0)
# First retrieve an index of all groups for each state and category
group_count = 0
error_message_count = 0
position_category_list = VoteSmartCategory.objects.order_by('name')
for position_category in position_category_list:
category_id = position_category.categoryId
for state_code, state_name in STATE_CODE_MAP.items():
results = retrieve_vote_smart_special_interest_groups_into_local_db(category_id, state_code)
if not results['success']:
# messages.add_message(request, messages.INFO, results['status'])
print_to_log(logger=logger, exception_message_optional=results['status'])
error_message_count += 1
else:
group_count += 1
messages.add_message(request, messages.INFO, "{group_count} Special Interest Groups retrieved. "
"(errors: {error_message_count})"
"".format(group_count=group_count,
error_message_count=error_message_count))
# Then retrieve the details about each group
get_sig_group_count = 0
get_sig_error_message_count = 0
special_interest_group_list = VoteSmartSpecialInterestGroup.objects.order_by('name')
for one_group in special_interest_group_list:
special_interest_group_id = one_group.sigId
one_group_results = retrieve_vote_smart_special_interest_group_into_local_db(special_interest_group_id)
if not one_group_results['success']:
print_to_log(logger=logger, exception_message_optional=one_group_results['status'])
get_sig_error_message_count += 1
else:
get_sig_group_count += 1
messages.add_message(request, messages.INFO, "{get_sig_group_count} Special Interest Groups augmented. "
"(errors: {get_sig_error_message_count})"
"".format(get_sig_group_count=get_sig_group_count,
get_sig_error_message_count=get_sig_error_message_count))
return HttpResponseRedirect(reverse('import_export_vote_smart:vote_smart_special_interest_group_list', args=()))
@login_required
def vote_smart_candidate_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
candidate_list = []
candidate_list_found = False
try:
candidate_list = VoteSmartCandidate.objects.order_by('lastName')[:1000] # Descending order, and limited to 1000
if len(candidate_list):
candidate_list_found = True
except VotesmartApiError as error_instance:
# Catch the error message coming back from Vote Smart and pass it in the status
error_message = error_instance.args
status = "EXCEPTION_RAISED: {error_message}".format(error_message=error_message)
print_to_log(logger=logger, exception_message_optional=status)
# election_list = Election.objects.order_by('-election_day_text')
if candidate_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'candidate_list': candidate_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'import_export_vote_smart/candidate_list.html', template_values)
@login_required
def vote_smart_rating_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
rating_list = []
rating_list_found = False
try:
rating_list = VoteSmartRating.objects.order_by('-timeSpan')[:1000] # Descending order, and limited to 1000
if len(rating_list):
rating_list_found = True
except VotesmartApiError as error_instance:
# Catch the error message coming back from Vote Smart and pass it in the status
error_message = error_instance.args
status = "EXCEPTION_RAISED: {error_message}".format(error_message=error_message)
print_to_log(logger=logger, exception_message_optional=status)
# election_list = Election.objects.order_by('-election_day_text')
if rating_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'rating_list': rating_list,
# 'election_list': election_list,
# 'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
# 'election_list': election_list,
# 'google_civic_election_id': google_civic_election_id,
}
return render(request, 'import_export_vote_smart/rating_list.html', template_values)
@login_required
def special_interest_group_rating_list_view(request, special_interest_group_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
special_interest_group_id = convert_to_int(special_interest_group_id)
# google_civic_election_id = request.GET.get('google_civic_election_id', 0)
special_interest_group = VoteSmartSpecialInterestGroup()
special_interest_group_found = False
try:
special_interest_group_query = VoteSmartSpecialInterestGroup.objects.filter(sigId=special_interest_group_id)
if special_interest_group_query.count():
special_interest_group = special_interest_group_query[0]
special_interest_group_found = True
except VotesmartApiError as error_instance:
# Catch the error message coming back from Vote Smart and pass it in the status
error_message = error_instance.args
status = "EXCEPTION_RAISED: {error_message}".format(error_message=error_message)
print_to_log(logger=logger, exception_message_optional=status)
special_interest_group_found = False
if not special_interest_group_found:
messages.add_message(request, messages.ERROR,
'Could not find special_interest_group when trying to retrieve ratings.')
return HttpResponseRedirect(reverse('import_export_vote_smart:vote_smart_special_interest_group_list', args=()))
else:
rating_list = []
rating_list_found = False
try:
rating_list = VoteSmartRatingOneCandidate.objects.order_by('-timeSpan')
rating_list = rating_list.filter(sigId=special_interest_group_id)
if len(rating_list):
rating_list_found = True
except VotesmartApiError as error_instance:
# Catch the error message coming back from Vote Smart and pass it in the status
error_message = error_instance.args
status = "EXCEPTION_RAISED: {error_message}".format(error_message=error_message)
print_to_log(logger=logger, exception_message_optional=status)
# election_list = Election.objects.order_by('-election_day_text')
if rating_list_found:
template_values = {
'messages_on_stage': messages_on_stage,
'special_interest_group': special_interest_group,
'rating_list': rating_list,
# 'election_list': election_list,
# 'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'special_interest_group': special_interest_group,
# 'election_list': election_list,
# 'google_civic_election_id': google_civic_election_id,
}
return render(request, 'import_export_vote_smart/group_rating_list.html', template_values)
@login_required
def vote_smart_special_interest_group_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
special_interest_group_list = VoteSmartSpecialInterestGroup.objects.order_by('name')
template_values = {
'messages_on_stage': messages_on_stage,
'special_interest_group_list': special_interest_group_list,
}
return render(request, 'import_export_vote_smart/special_interest_group_list.html', template_values)
@login_required
def import_vote_smart_position_categories_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = retrieve_vote_smart_position_categories_into_local_db()
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.INFO, "Categories retrieved.")
return HttpResponseRedirect(reverse('import_export_vote_smart:vote_smart_position_category_list', args=()))
def retrieve_positions_from_vote_smart_for_election_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
try:
candidate_list = CandidateCampaign.objects.all()
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
candidate_list = candidate_list.order_by('candidate_name')[:500]
except CandidateCampaign.DoesNotExist:
messages.add_message(request, messages.INFO, "Could not find any candidates for google_civic_election_id: "
"{google_civic_election_id}."
"".format(google_civic_election_id=google_civic_election_id))
return HttpResponseRedirect(reverse('position:position_list', args=()))
message = "Entering retrieve_positions_from_vote_smart_for_election."
print_to_log(logger, exception_message_optional=message)
vote_smart_candidates_that_exist = 0
vote_smart_candidates_created = 0
vote_smart_candidates_not_found = 0
we_vote_organizations_created = 0
organization_positions_that_exist = 0
organization_positions_created = 0
# Do a first pass through where we get positions for candidates for whom we already have an id
for we_vote_candidate in candidate_list:
if we_vote_candidate.vote_smart_id:
retrieve_results = retrieve_vote_smart_ratings_for_candidate_into_local_db(we_vote_candidate.vote_smart_id)
transfer_results = transfer_vote_smart_ratings_to_positions_for_candidate(we_vote_candidate.id)
if retrieve_results['rating_one_candidate_exists']:
vote_smart_candidates_that_exist += 1
if retrieve_results['rating_one_candidate_created']:
vote_smart_candidates_created += 1
if transfer_results['we_vote_organizations_created']:
we_vote_organizations_created += 1
if transfer_results['organization_positions_that_exist']:
organization_positions_that_exist += 1
if transfer_results['organization_positions_created']:
organization_positions_created += 1
message = "About to cycle through candidates for whom we don't have Vote Smart IDs for."
print_to_log(logger, exception_message_optional=message)
# Then we cycle through again, reach out to Vote Smart to match the candidate if we did not have a vote_smart_id,
# and if we find a new Vote Smart id, we get positions for that candidate
for we_vote_candidate in candidate_list:
if not we_vote_candidate.vote_smart_id:
force_retrieve = False
results = retrieve_and_match_candidate_from_vote_smart(we_vote_candidate, force_retrieve)
if results['success'] and results['we_vote_candidate_id']:
we_vote_candidate = results['we_vote_candidate']
if we_vote_candidate.vote_smart_id:
retrieve_results = retrieve_vote_smart_ratings_for_candidate_into_local_db(
we_vote_candidate.vote_smart_id)
transfer_results = transfer_vote_smart_ratings_to_positions_for_candidate(we_vote_candidate.id)
if retrieve_results['rating_one_candidate_exists']:
vote_smart_candidates_that_exist += 1
if retrieve_results['rating_one_candidate_created']:
vote_smart_candidates_created += 1
if transfer_results['we_vote_organizations_created']:
we_vote_organizations_created += 1
if transfer_results['organization_positions_that_exist']:
organization_positions_that_exist += 1
if transfer_results['organization_positions_created']:
organization_positions_created += 1
else:
vote_smart_candidates_not_found += 1
message = "Google Civic Election ID: {election_id}, " \
"{vote_smart_candidates_that_exist} candidates from Vote Smart looked at, " \
"{vote_smart_candidates_created} new candidates cached from Vote Smart, " \
"{vote_smart_candidates_not_found} candidates not found in Vote Smart, " \
"{we_vote_organizations_created} organizations created in We Vote, " \
"{organization_positions_that_exist} positions from Vote Smart already exist locally, and " \
"{organization_positions_created} positions from Vote Smart just created locally.".\
format(election_id=google_civic_election_id,
vote_smart_candidates_that_exist=vote_smart_candidates_that_exist,
vote_smart_candidates_created=vote_smart_candidates_created,
vote_smart_candidates_not_found=vote_smart_candidates_not_found,
we_vote_organizations_created=we_vote_organizations_created,
organization_positions_that_exist=organization_positions_that_exist,
organization_positions_created=organization_positions_created)
print_to_log(logger, exception_message_optional=message)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('position:position_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def transfer_vote_smart_ratings_to_positions_for_candidate_view(request, candidate_campaign_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = transfer_vote_smart_ratings_to_positions_for_candidate(candidate_campaign_id)
if results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.ERROR, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_campaign_id,)))
@login_required
def transfer_vote_smart_ratings_to_positions_for_politician_view(request, politician_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = transfer_vote_smart_ratings_to_positions_for_politician(politician_id)
if results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.ERROR, results['status'])
return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(politician_id,)))
@login_required
def transfer_vote_smart_sigs_to_we_vote_orgs_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = transfer_vote_smart_special_interest_groups_to_we_vote_organizations()
if results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.ERROR, results['status'])
return HttpResponseRedirect(reverse('import_export_vote_smart:vote_smart_special_interest_group_list', args=()))
@login_required
def state_detail_view(request, pk):
"""
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
state_id = pk
template_values = {
'state': VoteSmartState.objects.get(stateId=state_id),
}
return render(request, 'import_export_vote_smart/state_detail.html', template_values)
@login_required
def vote_smart_index_view(request):
"""
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
template_values = {
}
return render(request, 'import_export_vote_smart/index.html', template_values)
@login_required
def vote_smart_position_category_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
position_category_list = VoteSmartCategory.objects.order_by('name')
template_values = {
'messages_on_stage': messages_on_stage,
'position_category_list': position_category_list,
}
return render(request, 'import_export_vote_smart/position_category_list.html', template_values)
| |
"""Future-returning APIs for coroutines."""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
from collections import namedtuple
from zmq import POLLOUT, POLLIN
try:
from tornado.concurrent import Future
except ImportError:
from .minitornado.concurrent import Future
class CancelledError(Exception):
pass
class _TornadoFuture(Future):
"""Subclass Tornado Future, reinstating cancellation."""
def cancel(self):
if self.done():
return False
self.set_exception(CancelledError())
return True
def cancelled(self):
return self.done() and isinstance(self.exception(), CancelledError)
import zmq as _zmq
from zmq.eventloop.ioloop import IOLoop
_FutureEvent = namedtuple('_FutureEvent', ('future', 'kind', 'kwargs', 'msg'))
# mixins for tornado/asyncio compatibility
class _AsyncTornado(object):
_Future = _TornadoFuture
_READ = IOLoop.READ
_WRITE = IOLoop.WRITE
def _default_loop(self):
return IOLoop.current()
class _AsyncPoller(_zmq.Poller):
"""Poller that returns a Future on poll, instead of blocking."""
def poll(self, timeout=-1):
"""Return a Future for a poll event"""
future = self._Future()
if timeout == 0:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
return future
loop = self._default_loop()
# register Future to be called as soon as any event is available on any socket
# only support polling on zmq sockets, for now
watcher = self._Future()
for socket, mask in self.sockets:
if mask & _zmq.POLLIN:
socket._add_recv_event('poll', future=watcher)
if mask & _zmq.POLLOUT:
socket._add_send_event('poll', future=watcher)
def on_poll_ready(f):
if future.done():
return
if watcher.exception():
future.set_exception(watcher.exception())
else:
try:
result = super(_AsyncPoller, self).poll(0)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
watcher.add_done_callback(on_poll_ready)
if timeout is not None and timeout > 0:
# schedule cancel to fire on poll timeout, if any
def trigger_timeout():
if not watcher.done():
watcher.set_result(None)
timeout_handle = loop.call_later(
1e-3 * timeout,
trigger_timeout
)
def cancel_timeout(f):
if hasattr(timeout_handle, 'cancel'):
timeout_handle.cancel()
else:
loop.remove_timeout(timeout_handle)
future.add_done_callback(cancel_timeout)
def cancel_watcher(f):
if not watcher.done():
watcher.cancel()
future.add_done_callback(cancel_watcher)
return future
class Poller(_AsyncTornado, _AsyncPoller):
pass
class _AsyncSocket(_zmq.Socket):
_recv_futures = None
_send_futures = None
_state = 0
_shadow_sock = None
_poller_class = Poller
io_loop = None
def __init__(self, context, socket_type, io_loop=None):
super(_AsyncSocket, self).__init__(context, socket_type)
self.io_loop = io_loop or self._default_loop()
self._recv_futures = []
self._send_futures = []
self._state = 0
self._shadow_sock = _zmq.Socket.shadow(self.underlying)
self._init_io_state()
def recv_multipart(self, flags=0, copy=True, track=False):
"""Receive a complete multipart zmq message.
Returns a Future whose result will be a multipart message.
"""
return self._add_recv_event('recv_multipart',
dict(flags=flags, copy=copy, track=track)
)
def recv(self, flags=0, copy=True, track=False):
"""Receive a single zmq frame.
Returns a Future, whose result will be the received frame.
Recommend using recv_multipart instead.
"""
return self._add_recv_event('recv',
dict(flags=flags, copy=copy, track=track)
)
def send_multipart(self, msg, flags=0, copy=True, track=False):
"""Send a complete multipart zmq message.
Returns a Future that resolves when sending is complete.
"""
return self._add_send_event('send_multipart', msg=msg,
kwargs=dict(flags=flags, copy=copy, track=track),
)
def send(self, msg, flags=0, copy=True, track=False):
"""Send a single zmq frame.
Returns a Future that resolves when sending is complete.
Recommend using send_multipart instead.
"""
return self._add_send_event('send', msg=msg,
kwargs=dict(flags=flags, copy=copy, track=track),
)
def _deserialize(self, recvd, load):
"""Deserialize with Futures"""
f = self._Future()
def _chain(_):
if recvd.exception():
f.set_exception(recvd.exception())
else:
buf = recvd.result()
try:
loaded = load(buf)
except Exception as e:
f.set_exception(e)
else:
f.set_result(loaded)
recvd.add_done_callback(_chain)
return f
def poll(self, timeout=None, flags=_zmq.POLLIN):
"""poll the socket for events
returns a Future for the poll results.
"""
if self.closed:
raise _zmq.ZMQError(_zmq.ENOTSUP)
p = self._poller_class()
p.register(self, flags)
f = p.poll(timeout)
future = self._Future()
def unwrap_result(f):
if future.done():
return
if f.exception():
future.set_exception(f.exception())
else:
evts = dict(f.result())
future.set_result(evts.get(self, 0))
f.add_done_callback(unwrap_result)
return future
def _add_timeout(self, future, timeout):
"""Add a timeout for a send or recv Future"""
def future_timeout():
print("calling future timeout")
if future.done():
# future already resolved, do nothing
return
# raise EAGAIN
future.set_exception(_zmq.Again())
self._call_later(timeout, future_timeout)
def _call_later(self, delay, callback):
"""Schedule a function to be called later
Override for different IOLoop implementations
Tornado and asyncio happen to both have ioloop.call_later
with the same signature.
"""
self.io_loop.call_later(delay, callback)
def _add_recv_event(self, kind, kwargs=None, future=None):
"""Add a recv event, returning the corresponding Future"""
f = future or self._Future()
if kind.startswith('recv') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
# short-circuit non-blocking calls
recv = getattr(self._shadow_sock, kind)
try:
r = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
return f
if hasattr(_zmq, 'RCVTIMEO'):
timeout_ms = self._shadow_sock.rcvtimeo
if timeout_ms >= 0:
self._add_timeout(f, timeout_ms * 1e-3)
self._recv_futures.append(
_FutureEvent(f, kind, kwargs, msg=None)
)
if self.events & POLLIN:
# recv immediately, if we can
self._handle_recv()
if self._recv_futures:
self._add_io_state(self._READ)
return f
def _add_send_event(self, kind, msg=None, kwargs=None, future=None):
"""Add a send event, returning the corresponding Future"""
f = future or self._Future()
if kind.startswith('send') and kwargs.get('flags', 0) & _zmq.DONTWAIT:
if kind == 'send_multipart':
kwargs['msg_parts'] = msg
# short-circuit non-blocking calls
send = getattr(self._shadow_sock, kind)
try:
r = send(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(r)
return f
if hasattr(_zmq, 'SNDTIMEO'):
timeout_ms = self._shadow_sock.sndtimeo
if timeout_ms >= 0:
self._add_timeout(f, timeout_ms * 1e-3)
self._send_futures.append(
_FutureEvent(f, kind, kwargs=kwargs, msg=msg)
)
if self.events & POLLOUT:
# send immediately if we can
self._handle_send()
if self._send_futures:
self._add_io_state(self._WRITE)
return f
def _handle_recv(self):
"""Handle recv events"""
f = None
while self._recv_futures:
f, kind, kwargs, _ = self._recv_futures.pop(0)
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._recv_futures:
self._drop_io_state(self._READ)
if f is None:
return
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'recv_multipart':
recv = self._shadow_sock.recv_multipart
elif kind == 'recv':
recv = self._shadow_sock.recv
else:
raise ValueError("Unhandled recv event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = recv(**kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
def _handle_send(self):
f = None
while self._send_futures:
f, kind, kwargs, msg = self._send_futures.pop(0)
# skip any cancelled futures
if f.done():
f = None
else:
break
if not self._send_futures:
self._drop_io_state(self._WRITE)
if f is None:
return
if kind == 'poll':
# on poll event, just signal ready, nothing else.
f.set_result(None)
return
elif kind == 'send_multipart':
send = self._shadow_sock.send_multipart
elif kind == 'send':
send = self._shadow_sock.send
else:
raise ValueError("Unhandled send event type: %r" % kind)
kwargs['flags'] |= _zmq.DONTWAIT
try:
result = send(msg, **kwargs)
except Exception as e:
f.set_exception(e)
else:
f.set_result(result)
# event masking from ZMQStream
def _handle_events(self, fd, events):
"""Dispatch IO events to _handle_recv, etc."""
if events & self._READ:
self._handle_recv()
if events & self._WRITE:
self._handle_send()
def _add_io_state(self, state):
"""Add io_state to poller."""
if not self._state & state:
self._state = self._state | state
self._update_handler(self._state)
def _drop_io_state(self, state):
"""Stop poller from watching an io_state."""
if self._state & state:
self._state = self._state & (~state)
self._update_handler(self._state)
def _update_handler(self, state):
"""Update IOLoop handler with state."""
self._state = state
self.io_loop.update_handler(self, state)
def _init_io_state(self):
"""initialize the ioloop event handler"""
self.io_loop.add_handler(self, self._handle_events, self._state)
class Socket(_AsyncTornado, _AsyncSocket):
pass
class Context(_zmq.Context):
io_loop = None
@staticmethod
def _socket_class(self, socket_type):
return Socket(self, socket_type, io_loop=self.io_loop)
def __init__(self, *args, **kwargs):
io_loop = kwargs.pop('io_loop', None)
super(Context, self).__init__(*args, **kwargs)
self.io_loop = io_loop or IOLoop.current()
| |
"""
Compute Engine definitions for the Pipeline API.
"""
from abc import (
ABCMeta,
abstractmethod,
)
from uuid import uuid4
from six import (
iteritems,
with_metaclass,
)
from six.moves import zip_longest
from numpy import array
from pandas import (
DataFrame,
date_range,
MultiIndex,
)
from toolz import groupby, juxt
from toolz.curried.operator import getitem
from zipline.lib.adjusted_array import ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.utils.numpy_utils import repeat_first_axis, repeat_last_axis
from zipline.utils.pandas_utils import explode
from .term import AssetExists
class PipelineEngine(with_metaclass(ABCMeta)):
@abstractmethod
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute values for `pipeline` between `start_date` and `end_date`.
Returns a DataFrame with a MultiIndex of (date, asset) pairs
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
Returns
-------
result : pd.DataFrame
A frame of computed results.
The columns `result` correspond wil be the computed results of
`pipeline.columns`, which should be a dictionary mapping strings to
instances of `zipline.pipeline.term.Term`.
For each date between `start_date` and `end_date`, `result` will
contain a row for each asset that passed `pipeline.screen`. A
screen of None indicates that a row should be returned for each
asset that existed each day.
"""
raise NotImplementedError("run_pipeline")
class NoOpPipelineEngine(PipelineEngine):
"""
A PipelineEngine that doesn't do anything.
"""
def run_pipeline(self, pipeline, start_date, end_date):
return DataFrame(
index=MultiIndex.from_product(
[date_range(start=start_date, end=end_date, freq='D'), ()],
),
columns=sorted(pipeline.columns.keys()),
)
class SimplePipelineEngine(object):
"""
PipelineEngine class that computes each term independently.
Parameters
----------
get_loader : callable
A function that is given an atomic term and returns a PipelineLoader
to use to retrieve raw data for that term.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
"""
__slots__ = [
'_get_loader',
'_calendar',
'_finder',
'_root_mask_term',
'__weakref__',
]
def __init__(self, get_loader, calendar, asset_finder):
self._get_loader = get_loader
self._calendar = calendar
self._finder = asset_finder
self._root_mask_term = AssetExists()
def run_pipeline(self, pipeline, start_date, end_date):
"""
Compute a pipeline.
Parameters
----------
pipeline : zipline.pipeline.Pipeline
The pipeline to run.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `terms`. Topologically
sort the graph to determine an order in which we can compute the terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for each
known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing **all**
filters. The sum, N, of all these values is the total number of rows in
our output frame, so we pre-allocate an output array of length N for
each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by `zipline.pipeline.graph.TermGraph`.
Step 1 is performed in `self._compute_root_mask`.
Step 2 is performed in `self.compute_chunk`.
Steps 3, 4, and 5 are performed in self._format_factor_matrix.
See Also
--------
PipelineEngine.run_pipeline
"""
if end_date < start_date:
raise ValueError(
"start_date must be before or equal to end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
screen_name = uuid4().hex
graph = pipeline.to_graph(screen_name, self._root_mask_term)
extra_rows = graph.extra_rows[self._root_mask_term]
root_mask = self._compute_root_mask(start_date, end_date, extra_rows)
dates, assets, root_mask_values = explode(root_mask)
outputs = self.compute_chunk(
graph,
dates,
assets,
initial_workspace={self._root_mask_term: root_mask_values},
)
out_dates = dates[extra_rows:]
screen_values = outputs.pop(screen_name)
return self._to_narrow(outputs, screen_values, out_dates, assets)
def _compute_root_mask(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of extra rows to compute before `start_date`.
Extra rows are needed by terms like moving averages that require a
trailing window of data.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError(
msg="Insufficient data to compute Pipeline mask: "
"start date was %s, "
"earliest known date was %s, "
"and %d extra rows were requested." % (
start_date, calendar[0], extra_rows,
),
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx],
include_start_date=False
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
ret = lifetimes.loc[:, existed]
shape = ret.shape
assert shape[0] * shape[1] != 0, 'root mask cannot be empty'
return ret
def _mask_and_dates_for_term(self, term, workspace, graph, dates):
"""
Load mask and mask row labels for term.
"""
mask = term.mask
offset = graph.extra_rows[mask] - graph.extra_rows[term]
return workspace[mask][offset:], dates[offset:]
@staticmethod
def _inputs_for_term(term, workspace, graph):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store as
many rows as will be necessary to serve **any** computation requiring
that input.
"""
offsets = graph.offset
if term.windowed:
# If term is windowed, then all input data should be instances of
# AdjustedArray.
return [
workspace[input_].traverse(
window_length=term.window_length,
offset=offsets[term, input_]
)
for input_ in term.inputs
]
# If term is not windowed, input_data may be an AdjustedArray or
# np.ndarray. Coerce the former to the latter.
out = []
for input_ in term.inputs:
input_data = ensure_ndarray(workspace[input_])
offset = offsets[term, input_]
# OPTIMIZATION: Don't make a copy by doing input_data[0:] if
# offset is zero.
if offset:
input_data = input_data[offset:]
out.append(input_data)
return out
def get_loader(self, term):
# AssetExists is one of the atomic terms in the graph, so we look up
# a loader here when grouping by loader, but since it's already in the
# workspace, we don't actually use that group.
if term is AssetExists():
return None
return self._get_loader(term)
def compute_chunk(self, graph, dates, assets, initial_workspace):
"""
Compute the Pipeline terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.pipeline.graph.TermGraph
dates : pd.DatetimeIndex
Row labels for our root mask.
assets : pd.Int64Index
Column labels for our root mask.
initial_workspace : dict
Map from term -> output.
Must contain at least entry for `self._root_mask_term` whose shape
is `(len(dates), len(assets))`, but may contain additional
pre-computed terms for testing or optimization purposes.
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
self._validate_compute_chunk_params(dates, assets, initial_workspace)
get_loader = self.get_loader
# Copy the supplied initial workspace so we don't mutate it in place.
workspace = initial_workspace.copy()
# If atomic terms share the same loader and extra_rows, load them all
# together.
atomic_group_key = juxt(get_loader, getitem(graph.extra_rows))
atomic_groups = groupby(atomic_group_key, graph.atomic_terms)
for term in graph.ordered():
# `term` may have been supplied in `initial_workspace`, and in the
# future we may pre-compute atomic terms coming from the same
# dataset. In either case, we will already have an entry for this
# term, which we shouldn't re-compute.
if term in workspace:
continue
# Asset labels are always the same, but date labels vary by how
# many extra rows are needed.
mask, mask_dates = self._mask_and_dates_for_term(
term, workspace, graph, dates
)
if term.atomic:
to_load = sorted(
atomic_groups[atomic_group_key(term)],
key=lambda t: t.dataset
)
loader = get_loader(term)
loaded = tuple(loader.load_adjusted_array(
to_load, mask_dates, assets, mask,
))
assert len(to_load) == len(loaded)
for loaded_term, adj_array in zip_longest(to_load, loaded):
workspace[loaded_term] = adj_array
else:
workspace[term] = term._compute(
self._inputs_for_term(term, workspace, graph),
mask_dates,
assets,
mask,
)
assert(workspace[term].shape == mask.shape)
out = {}
graph_extra_rows = graph.extra_rows
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][graph_extra_rows[term]:]
return out
def _to_narrow(self, data, mask, dates, assets):
"""
Convert raw computed pipeline results into a DataFrame for public APIs.
Parameters
----------
data : dict[str -> ndarray[ndim=2]]
Dict mapping column names to computed results.
mask : ndarray[bool, ndim=2]
Mask array of values to keep.
dates : ndarray[datetime64, ndim=1]
Row index for arrays `data` and `mask`
assets : ndarray[int64, ndim=2]
Column index for arrays `data` and `mask`
Returns
-------
results : pd.DataFrame
The indices of `results` are as follows:
index : two-tiered MultiIndex of (date, asset).
Contains an entry for each (date, asset) pair corresponding to
a `True` value in `mask`.
columns : Index of str
One column per entry in `data`.
If mask[date, asset] is True, then result.loc[(date, asset), colname]
will contain the value of data[colname][date, asset].
"""
resolved_assets = array(self._finder.retrieve_all(assets))
dates_kept = repeat_last_axis(dates.values, len(assets))[mask]
assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask]
return DataFrame(
data={name: arr[mask] for name, arr in iteritems(data)},
index=MultiIndex.from_arrays([dates_kept, assets_kept]),
).tz_localize('UTC', level=0)
def _validate_compute_chunk_params(self, dates, assets, initial_workspace):
"""
Verify that the values passed to compute_chunk are well-formed.
"""
root = self._root_mask_term
clsname = type(self).__name__
# Writing this out explicitly so this errors in testing if we change
# the name without updating this line.
compute_chunk_name = self.compute_chunk.__name__
if root not in initial_workspace:
raise AssertionError(
"root_mask values not supplied to {cls}.{method}".format(
cls=clsname,
method=compute_chunk_name,
)
)
shape = initial_workspace[root].shape
implied_shape = len(dates), len(assets)
if shape != implied_shape:
raise AssertionError(
"root_mask shape is {shape}, but received dates/assets "
"imply that shape should be {implied}".format(
shape=shape,
implied=implied_shape,
)
)
| |
import datetime
import decimal
import re
import time
import math
from itertools import tee
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is the
# same as the TextField Django field type, which means the custom field's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
self.model = cls
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Automatic key")
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "AutoField"
def validate(self, value, model_instance):
pass
def get_db_prep_value(self, value, connection, prepared=False):
return connection.ops.value_to_db_auto(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.date.today()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateTimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
def formfield(self, **kwargs):
# As with CharField, this will cause email validation to be performed twice
defaults = {
'form_class': forms.EmailField,
}
defaults.update(kwargs)
return super(EmailField, self).formfield(**defaults)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a Web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=False, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
def formfield(self, **kwargs):
# As with CharField, this will cause URL validation to be performed twice
defaults = {
'form_class': forms.URLField,
}
defaults.update(kwargs)
return super(URLField, self).formfield(**defaults)
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
import warnings
warnings.warn("Use of XMLField has been deprecated; please use TextField instead.",
DeprecationWarning)
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
| |
#!/usr/bin/python
#
# Copyright (c) 2019 Yunge Zhu, (@yungezz)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_roleassignment_info
version_added: "2.9"
short_description: Gets Azure Role Assignment facts
description:
- Gets facts of Azure Role Assignment.
options:
scope:
description:
- The scope that the role assignment applies to.
- For example, use /subscriptions/{subscription-id}/ for a subscription.
- /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name} for a resource group.
- /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name}/providers/{resource-provider}/{resource-type}/{resource-name} for a resource.
name:
description:
- Name of role assignment.
- Mutual exclusive with I(assignee).
assignee:
description:
- Object id of a user, group or service principal.
- Mutually exclusive with I(name).
role_definition_id:
description:
- Resource id of role definition.
extends_documentation_fragment:
- azure
author:
- Yunge Zhu(@yungezz)
'''
EXAMPLES = '''
- name: Get role assignments for specific service principal
azure_rm_roleassignment_info:
assignee: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- name: Get role assignments for specific scope
azure_rm_roleassignment_info:
scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
RETURN = '''
roleassignments:
description:
- List of role assignments.
returned: always
type: complex
contains:
id:
description:
- Id of role assignment.
type: str
returned: always
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
name:
description:
- Name of role assignment.
type: str
returned: always
sample: myRoleAssignment
type:
descripition:
- Type of role assignment.
type: str
returned: always
sample: custom
principal_id:
description:
- Principal Id of the role assigned to.
type: str
returned: always
sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
role_definition_id:
description:
- Role definition id that was assigned to principal_id.
type: str
returned: always
sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
scope:
description:
- The role assignment scope.
type: str
returned: always
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from msrest.serialization import Model
from azure.mgmt.authorization import AuthorizationManagementClient
except ImportError:
# This is handled in azure_rm_common
pass
def roleassignment_to_dict(assignment):
return dict(
id=assignment.id,
name=assignment.name,
type=assignment.type,
principal_id=assignment.principal_id,
role_definition_id=assignment.role_definition_id,
scope=assignment.scope
)
class AzureRMRoleAssignmentInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(
type='str'
),
scope=dict(
type='str'
),
assignee=dict(
type='str'
),
role_definition_id=dict(
type='str'
)
)
self.name = None
self.scope = None
self.assignee = None
self.role_definition_id = None
self.results = dict(
changed=False
)
self._client = None
mutually_exclusive = [['name', 'assignee']]
super(AzureRMRoleAssignmentInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_tags=False,
mutually_exclusive=mutually_exclusive)
def exec_module(self, **kwargs):
"""Main module execution method"""
is_old_facts = self.module._name == 'azure_rm_roleassignment_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_roleassignment_facts' module has been renamed to 'azure_rm_roleassignment_info'", version='2.13')
for key in list(self.module_arg_spec.keys()):
if hasattr(self, key):
setattr(self, key, kwargs[key])
# get management client
self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version="2018-01-01-preview")
if self.name:
self.results['roleassignments'] = self.get_by_name()
elif self.assignee:
self.results['roleassignments'] = self.get_by_assignee()
elif self.scope:
self.results['roleassignments'] = self.list_by_scope()
else:
self.fail("Please specify name or assignee")
return self.results
def get_by_name(self):
'''
Gets the properties of the specified role assignment by name.
:return: deserialized role assignment dictionary
'''
self.log("Gets role assignment {0} by name".format(self.name))
results = []
try:
response = self._client.role_assignments.get(scope=self.scope, role_assignment_name=self.name)
if response:
response = roleassignment_to_dict(response)
if self.role_definition_id:
if self.role_definition_id == response['role_definition_id']:
results = [response]
else:
results = [response]
except CloudError as ex:
self.log("Didn't find role assignment {0} in scope {1}".format(self.name, self.scope))
return results
def get_by_assignee(self):
'''
Gets the role assignments by assignee.
:return: deserialized role assignment dictionary
'''
self.log("Gets role assignment {0} by name".format(self.name))
results = []
filter = "principalId eq '{0}'".format(self.assignee)
try:
response = list(self._client.role_assignments.list(filter=filter))
if response and len(response) > 0:
response = [roleassignment_to_dict(a) for a in response]
if self.role_definition_id:
for r in response:
if r['role_definition_id'] == self.role_definition_id:
results.append(r)
else:
results = response
except CloudError as ex:
self.log("Didn't find role assignments to assignee {0}".format(self.assignee))
return results
def list_by_scope(self):
'''
Lists the role assignments by specific scope.
:return: deserialized role assignment dictionary
'''
self.log("Lists role assignment by scope {0}".format(self.scope))
results = []
try:
response = list(self._client.role_assignments.list_for_scope(scope=self.scope, filter='atScope()'))
if response and len(response) > 0:
response = [roleassignment_to_dict(a) for a in response]
if self.role_definition_id:
for r in response:
if r['role_definition_id'] == self.role_definition_id:
results.append(r)
else:
results = response
except CloudError as ex:
self.log("Didn't find role assignments to scope {0}".format(self.scope))
return results
def main():
"""Main execution"""
AzureRMRoleAssignmentInfo()
if __name__ == '__main__':
main()
| |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
""" This file contains widgets that can be used for dropping Constant class
variables. It will construct an input form for the value.
QVariableDropBox
QVerticalWidget
QVariableInputWidget
QVariableInputForm
QDragVariableLabel
QHoverVariableLabel
"""
from PyQt4 import QtCore, QtGui
from core import debug
from core.vistrail.module_function import ModuleFunction
from core.vistrail.module_param import ModuleParam
from core.modules import module_registry
from core.modules.basic_modules import Constant
from gui.common_widgets import QPromptWidget
from gui.modules import get_widget_class
from gui.modules.constant_configuration import StandardConstantWidget, \
FileChooserToolButton
from gui.module_palette import QModuleTreeWidget
from gui.theme import CurrentTheme
from gui.utils import show_question, YES_BUTTON, NO_BUTTON
import uuid
################################################################################
class QVariableDropBox(QtGui.QScrollArea):
"""
QVariableDropBox is just a widget such that items that subclass
Constant from the module palette can be dropped into its client rect.
It then constructs an input form based on the type of handling widget
"""
def __init__(self, parent=None):
""" QVariableDropBox(parent: QWidget) -> QVariableDropBox
Initialize widget constraints
"""
QtGui.QScrollArea.__init__(self, parent)
self.setAcceptDrops(True)
self.setWidgetResizable(True)
self.vWidget = QVerticalWidget()
self.setWidget(self.vWidget)
self.updateLocked = False
self.controller = None
def dragEnterEvent(self, event):
""" dragEnterEvent(event: QDragEnterEvent) -> None
Set to accept drops from the module palette
"""
if type(event.source())==QModuleTreeWidget:
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
""" dragMoveEvent(event: QDragMoveEvent) -> None
Set to accept drag move event from the module palette
"""
if type(event.source())==QModuleTreeWidget:
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
def dropEvent(self, event):
""" dropEvent(event: QDragMoveEvent) -> None
Accept drop event to add a new variable
"""
if type(event.source())==QModuleTreeWidget:
data = event.mimeData()
if hasattr(data, 'items'):
event.accept()
assert len(data.items) == 1
item = data.items[0]
if issubclass(item.descriptor.module, Constant):
if item.descriptor and self.controller:
self.lockUpdate()
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
'Enter the variable name',
QtGui.QLineEdit.Normal,
'')
var_name = str(text).strip()
while ok and self.controller.check_vistrail_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok:
self.vWidget.addVariable(str(uuid.uuid1()), var_name, item.descriptor)
self.scrollContentsBy(0, self.viewport().height())
self.unlockUpdate()
#self.emit(QtCore.SIGNAL("paramsAreaChanged"))
def updateController(self, controller):
""" updateController(controller: VistrailController) -> None
Construct input forms for a controller's variables
"""
self.controller = controller
if self.updateLocked: return
self.vWidget.clear()
if controller:
reg = module_registry.get_module_registry()
for var_name, var_info in controller.get_vistrail_variables().iteritems():
var_uuid = var_info[0]
identifier, name, namespace = var_info[1]
var_strValue = var_info[2]
try:
descriptor = reg.get_descriptor_by_name(identifier, name, namespace)
except module_registry.ModuleRegistryException:
debug.critical("Missing Module Descriptor for VisTrail Variable %s\nPackage: %s\nType: %s\nNamespace: %s"%(var_name,identifier,name,namespace))
continue
self.vWidget.addVariable(var_uuid, var_name, descriptor, var_strValue)
self.vWidget.showPromptByChildren()
else:
self.vWidget.showPrompt(False)
def lockUpdate(self):
""" lockUpdate() -> None
Do not allow updateModule()
"""
self.updateLocked = True
def unlockUpdate(self):
""" unlockUpdate() -> None
Allow updateModule()
"""
self.updateLocked = False
class QVerticalWidget(QPromptWidget):
"""
QVerticalWidget is a widget holding other variable widgets
vertically
"""
def __init__(self, parent=None):
""" QVerticalWidget(parent: QWidget) -> QVerticalWidget
Initialize with a vertical layout
"""
QPromptWidget.__init__(self, parent)
self.setPromptText("Drag a constant from the Modules panel to create a variable")
self.setLayout(QtGui.QVBoxLayout())
self.layout().setMargin(0)
self.layout().setSpacing(5)
self.layout().setAlignment(QtCore.Qt.AlignTop)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.setMinimumHeight(20)
self._variable_widgets = []
def addVariable(self, var_uuid, var_name, descriptor, var_strValue=""):
""" addVariable(var_uuid:str, var_name: str, descriptor: ModuleDescriptor, var_strValue: str) -> None
Add an input form for the variable
"""
inputForm = QVariableInputWidget(var_uuid, var_name, descriptor, var_strValue, self)
self.connect(inputForm, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().addWidget(inputForm)
inputForm.show()
self.setMinimumHeight(self.layout().minimumSize().height())
self.showPrompt(False)
self._variable_widgets.append(inputForm)
def clear(self):
""" clear() -> None
Clear and delete all widgets in the layout
"""
self.setEnabled(False)
for v in self._variable_widgets:
self.disconnect(v, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
self.layout().removeWidget(v)
v.deleteLater()
self._variable_widgets = []
self.setEnabled(True)
def delete_form(self, input_form):
self.disconnect(input_form, QtCore.SIGNAL('deleted(QWidget*)'),
self.delete_form)
var_name = input_form.var_name
variableBox = self.parent().parent()
self.layout().removeWidget(input_form)
self._variable_widgets.remove(input_form)
input_form.deleteLater()
self.showPromptByChildren()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(var_name, None)
variableBox.unlockUpdate()
self.setMinimumHeight(self.layout().minimumSize().height())
class QVariableInputWidget(QtGui.QDockWidget):
def __init__(self, var_uuid, var_name, descriptor, var_strValue="", parent=None):
QtGui.QDockWidget.__init__(self, parent)
self.var_uuid = var_uuid
self.var_name = var_name
self.descriptor = descriptor
self.setFeatures(QtGui.QDockWidget.DockWidgetClosable)
# Create group and titlebar widgets for input widget
self.group_box = QVariableInputForm(descriptor, var_strValue, self)
self.setWidget(self.group_box)
title_widget = QtGui.QWidget()
title_layout = QtGui.QHBoxLayout()
self.closeButton = QtGui.QToolButton()
self.closeButton.setAutoRaise(True)
self.closeButton.setIcon(QtGui.QIcon(self.style().standardPixmap(QtGui.QStyle.SP_TitleBarCloseButton)))
self.closeButton.setIconSize(QtCore.QSize(13, 13))
self.closeButton.setFixedWidth(16)
self.label = QHoverVariableLabel(var_name)
title_layout.addWidget(self.label)
title_layout.addWidget(self.closeButton)
title_widget.setLayout(title_layout)
self.setTitleBarWidget(title_widget)
self.connect(self.closeButton, QtCore.SIGNAL('clicked()'), self.close)
def renameVariable(self, var_name):
# First delete old var entry
variableBox = self.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
variableBox.controller.set_vistrail_variable(self.var_name, None)
variableBox.unlockUpdate()
# Create var entry with new name, but keeping the same uuid
self.var_name = var_name
self.label.setText(var_name)
self.group_box.updateMethod()
def closeEvent(self, event):
choice = show_question('Delete %s?'%self.var_name,
'Are you sure you want to permanently delete the VisTrail variable "%s"?\n\nNote: Any workflows using this variable will be left in an invalid state.'%self.var_name,
[NO_BUTTON,YES_BUTTON],
NO_BUTTON)
if choice == NO_BUTTON:
event.ignore()
return
self.emit(QtCore.SIGNAL('deleted(QWidget*)'), self)
def keyPressEvent(self, e):
if e.key() in [QtCore.Qt.Key_Delete, QtCore.Qt.Key_Backspace]:
self.close()
else:
QtGui.QDockWidget.keyPressEvent(self, e)
def check_variable(self, name):
""" check_variable(name: str) -> Boolean
Returns True if the vistrail already has the variable name
"""
variableBox = self.parent().parent().parent()
if variableBox.controller:
return variableBox.controller.check_vistrail_variable(name)
return False
class QVariableInputForm(QtGui.QGroupBox):
"""
QVariableInputForm is a widget with multiple input lines depends on
the method signature
"""
def __init__(self, descriptor, var_strValue="", parent=None):
""" QVariableInputForm(descriptor: ModuleDescriptor, var_strValue: str, parent: QWidget) -> QVariableInputForm
Initialize with a vertical layout
"""
QtGui.QGroupBox.__init__(self, parent)
self.setLayout(QtGui.QGridLayout())
self.layout().setMargin(5)
self.layout().setSpacing(5)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Fixed)
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
# Create widget for editing variable
p = ModuleParam(type=descriptor.name, identifier=descriptor.identifier, namespace=descriptor.namespace)
p.strValue = var_strValue
widget_type = get_widget_class(descriptor.module)
self.widget = widget_type(p, self)
self.label = QDragVariableLabel(p.type)
self.layout().addWidget(self.label, 0, 0)
self.layout().addWidget(self.widget, 0, 1)
self.updateMethod()
def focusInEvent(self, event):
""" gotFocus() -> None
Make sure the form painted as selected
"""
self.setAutoFillBackground(True)
def focusOutEvent(self, event):
""" lostFocus() -> None
Make sure the form painted as non-selected and then
perform a parameter changes
"""
self.setAutoFillBackground(False)
def updateMethod(self):
""" updateMethod() -> None
Update the variable values in vistrail controller
"""
inputWidget = self.parent()
variableBox = inputWidget.parent().parent().parent()
if variableBox.controller:
variableBox.lockUpdate()
descriptor = inputWidget.descriptor
descriptor_info = (descriptor.identifier, descriptor.name, descriptor.namespace)
value = (inputWidget.var_uuid, descriptor_info, str(self.widget.contents()))
variableBox.controller.set_vistrail_variable(inputWidget.var_name, value)
variableBox.unlockUpdate()
class QDragVariableLabel(QtGui.QLabel):
"""
QDragVariableLabel is a QLabel that can be dragged to connect
to an input port
"""
def __init__(self, var_type='', parent=None):
""" QDragVariableLabel(var_type:str,
parent: QWidget) -> QDragVariableLabel
Initialize the label with a variable type
"""
QtGui.QLabel.__init__(self, parent)
self.var_type = var_type
self.setText(var_type)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(CurrentTheme.OPEN_HAND_CURSOR)
self.setToolTip('Drag to an input port')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
var_name = inputWidget.var_name
var_uuid = inputWidget.var_uuid
# Create pixmap from variable name and type
drag_str = var_name + ' : ' + self.var_type
drag_label = QDragVariableLabel(drag_str)
drag_label.adjustSize()
painter = QtGui.QPainter()
font = QtGui.QFont()
size = drag_label.size()
image = QtGui.QImage(size.width()+4, size.height()+4, QtGui.QImage.Format_ARGB32_Premultiplied)
image.fill(0)
painter.begin(image)
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(self.palette().highlight())
painter.drawRect(QtCore.QRectF(0, 0, image.width(), image.height()))
painter.setFont(font)
painter.setPen(QtCore.Qt.black)
painter.drawText(QtCore.QRect(QtCore.QPoint(2,2), size), QtCore.Qt.AlignLeft | QtCore.Qt.TextSingleLine, drag_str)
painter.end()
pixmap = QtGui.QPixmap.fromImage(image)
# Create drag action
mimeData = QtCore.QMimeData()
portspec = inputWidget.descriptor.get_port_spec('value', 'output')
mimeData.variableData = (portspec, var_uuid, var_name)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setHotSpot(pixmap.rect().bottomRight())
drag.setPixmap(pixmap)
drag.start(QtCore.Qt.MoveAction)
class QHoverVariableLabel(QtGui.QLabel):
"""
QHoverVariableLabel is a QLabel that supports hover actions similar
to a hot link
"""
def __init__(self, var_name='', parent=None):
""" QHoverVariableLabel(var_name:str,
parent: QWidget) -> QHoverVariableLabel
Initialize the label with a variable name
"""
QtGui.QLabel.__init__(self, parent)
self.var_name = var_name
self.setText(var_name)
self.setAttribute(QtCore.Qt.WA_Hover)
self.setCursor(QtCore.Qt.PointingHandCursor)
self.setToolTip('Click to rename')
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
def event(self, event):
""" event(event: QEvent) -> Event Result
Override to handle hover enter and leave events for hot links
"""
if event.type()==QtCore.QEvent.HoverEnter:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_SELECT_COLOR)
if event.type()==QtCore.QEvent.HoverLeave:
self.palette().setColor(QtGui.QPalette.WindowText,
CurrentTheme.HOVER_DEFAULT_COLOR)
return QtGui.QLabel.event(self, event)
def mousePressEvent(self, event):
""" mousePressEvent(event: QMouseEvent) -> None
If mouse click on the label, show up a dialog to change/add
the variable name
"""
if event.button()==QtCore.Qt.LeftButton:
inputWidget = self.parent().parent()
orig_var_name = inputWidget.var_name
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
'Enter the new variable name',
QtGui.QLineEdit.Normal,
orig_var_name)
var_name = str(text).strip()
while ok and self.parent().parent().check_variable(var_name):
msg =" This variable name is already being used.\
Please enter a different variable name "
(text, ok) = QtGui.QInputDialog.getText(self,
'Set New Variable Name',
msg,
QtGui.QLineEdit.Normal,
text)
var_name = str(text).strip()
if ok and var_name != orig_var_name:
self.setText(var_name)
inputWidget.renameVariable(var_name)
| |
#!/usr/bin/python
import sys, itertools, os, re, glob
# use glob.glob. it uses os.listdir() and fnmatch.fnmatch() ..so it's unix style pattern match
def check_sandbox_for_errors(LOG_DIR=None, python_test_name='',
cloudShutdownIsError=False, sandboxIgnoreErrors=False, pattern=None):
# show the parameters
### print "check_sandbox_for_errors:", locals()
# gets set below on error (returned)
errorFound = False
if not LOG_DIR:
LOG_DIR = './sandbox'
if not os.path.exists(LOG_DIR):
return
# FIX! wait for h2o to flush to files? how?
# Dump any assertion or error line to the screen
# Both "passing" and failing tests??? I guess that's good.
# if you find a problem, just keep printing till the end, in that file.
# The stdout/stderr is shared for the entire cloud session?
# so don't want to dump it multiple times?
# glob gives full path, so we have to strip to match os.listdir()
fileList = []
# if we're using a pattern, ignore the "done" files
if pattern:
# search whatever the pattern says
# need to exclude directories (syn_datasets)
fileList1 = glob.glob(LOG_DIR + "/" + pattern)
# have to remove all the line count temp files
# ignore the json file we copy there also (anything eding in json)
for filename in fileList1:
if os.path.isfile(filename) and not re.search('doneToLine', filename) and not re.search('\.json$', filename):
fileList.append(os.path.basename(filename))
if len(fileList)==0:
raise Exception("Unexpected: h2o_sandbox found 0 files in %s that matched the pattern: %s" % (LOG_DIR, pattern) )
else:
fileList1 = os.listdir(LOG_DIR)
# don't search the R stdout/stderr
# this matches the python h2o captured stdout/stderr, and also any downloaded h2o logs
# not the commands.log
for filename in fileList1:
# for h2o on hadoop, in the common unit test stuff, we download zipped logs from h2o
# at the end and expand them. They will be in sandbox like this, because of the names h2o creates
# in the zip (I flatten it in sandbox): h2o_192.168.1.178_54321.log
# So look for that pattern too!
if re.search('h2o.*stdout|h2o.*stderr|h2o\..*\.log', filename) and not re.search('doneToLine', filename):
fileList.append(filename)
if len(fileList)==0:
# let this go...sh2junit.py apparently calls h2o_sandbox() looking for h2o logs?
emsg = "Unexpected: h2o_sandbox found 0 files in %s that matched the stdout/stderr pattern" % LOG_DIR
if sandboxIgnoreErrors:
print emsg
return
else:
# FIX! have to figure out what to do about when there are logs available to check for h2o on hadoop
# and when to not care if they're not there
pass
# raise Exception(emsg)
# print "h2o_sandbox: checking", len(fileList), "files"
errLines = []
for filename in fileList:
sandFile = open(LOG_DIR + "/" + filename, "r")
# if we've already walked it, there will be a matching file
# with the last line number we checked
try:
with open(LOG_DIR + "/doneToLine." + filename) as f:
# if multiple processes are checking, this file isn't locked
# if it's empty, treat it as zero
r = f.readline().rstrip()
if not r or r=="":
doneToLine = 0
else:
try:
doneToLine = int(r)
except:
raise Exception("%s/doneToLine.%s is corrupted (multiprocess issue?): %s" % (LOG_DIR, filename, r))
except IOError:
# no file
doneToLine = 0
# if we're using a pattern, ignore the doneToLine stuff (always start at 0
if pattern:
doneToLine = 0
# just in case error/assert is lower or upper case
# FIX! aren't we going to get the cloud building info failure messages
# oh well...if so ..it's a bug! "killing" is temp to detect jar mismatch error
regex1String = 'found multiple|exception|error|ERRR|assert|killing|killed|required ports'
if cloudShutdownIsError:
regex1String += '|shutdown command'
regex1 = re.compile(regex1String, re.IGNORECASE)
regex2 = re.compile('Caused',re.IGNORECASE)
# regex3 = re.compile('warn|info|TCP', re.IGNORECASE)
# FIX! temp to avoid the INFO in jan's latest logging. don't print any info?
regex3 = re.compile('warn|TCP', re.IGNORECASE)
# many hdfs/apache messages have 'error' in the text. treat as warning if they have '[WARN]'
# i.e. they start with:
# [WARN]
# if we started due to "warning" ...then if we hit exception, we don't want to stop
# we want that to act like a new beginning. Maybe just treat "warning" and "info" as
# single line events? that's better
printing = 0 # "printing" is per file.
lines = 0 # count per file! errLines accumulates for multiple files.
currentLine = 0
log_python_test_name = None
for line in sandFile:
currentLine += 1
m = re.search('(python_test_name:) (.*)', line)
if m:
log_python_test_name = m.group(2)
# if log_python_test_name == python_test_name):
# print "Found log_python_test_name:", log_python_test_name
# don't check if we've already checked
if currentLine <= doneToLine:
continue
# if log_python_test_name and (log_python_test_name != python_test_name):
# print "h2o_sandbox.py: ignoring because wrong test name:", currentLine
# JIT reporting looks like this..don't detect that as an error
printSingleWarning = False
foundBad = False
if not ' bytes)' in line:
# no multiline FSM on this
printSingleWarning = regex3.search(line)
# 13190 280 ### sun.nio.ch.DatagramChannelImpl::ensureOpen (16 bytes)
# don't detect these class loader info messags as errors
#[Loaded java.lang.Error from /usr/lib/jvm/java-7-oracle/jre/lib/rt.jar]
foundBad = regex1.search(line) and not (
('Prediction error' in line) or
(('Act/Prd' in line) and ('Error' in line)) or
(('AUC' in line) and ('Gini' in line) and ('Precision' in line)) or
('Error on training data' in line) or
('Error on validation data' in line) or
('water.DException' in line) or
# the manyfiles data has eRRr in a warning about test/train data
('WARN SCORM' in line) or
# ignore the long, long lines that the JStack prints as INFO
('stack_traces' in line) or
# shows up as param to url for h2o
('out_of_bag_error_estimate' in line) or
# R stdout confusion matrix. Probably need to figure out how to exclude R logs
('Training Error' in line) or
# now from GBM
('Mean Squared Error' in line) or
('Error' in line and 'Actual' in line) or
# fvec
('prediction error' in line) or
('errors on' in line) or
# R
('class.error' in line) or
# original RF
('error rate' in line) or
('[Loaded ' in line) or
('[WARN]' in line) or
('CalcSquareErrorsTasks' in line))
if (printing==0 and foundBad):
printing = 1
lines = 1
elif (printing==1):
lines += 1
# if we've been printing, stop when you get to another error
# keep printing if the pattern match for the condition
# is on a line with "Caused" in it ("Caused by")
# only use caused for overriding an end condition
foundCaused = regex2.search(line)
# since the "at ..." lines may have the "bad words" in them, we also don't want
# to stop if a line has " *at " at the beginning.
# Update: Assertion can be followed by Exception.
# Make sure we keep printing for a min of 4 lines
foundAt = re.match(r'[\t ]+at ',line)
if foundBad and (lines>10) and not (foundCaused or foundAt):
printing = 2
if (printing==1):
# to avoid extra newline from print. line already has one
errLines.append(line)
sys.stdout.write(line)
if (printSingleWarning):
# don't print these lines
if not (
('Unable to load native-hadoop library' in line) or
('stack_traces' in line) or
('Multiple local IPs detected' in line) or
('[Loaded ' in line) or
('RestS3Service' in line) ):
sys.stdout.write(line)
sandFile.close()
# remember what you've checked so far, with a file that matches, plus a suffix
# this is for the case of multiple tests sharing the same log files
# only want the test that caused the error to report it. (not flat the subsequent ones as fail)
# overwrite if exists
with open(LOG_DIR + "/" + "doneToLine." + filename, "w") as f:
f.write(str(currentLine) + "\n")
sys.stdout.flush()
# already has \n in each line
# doing this kludge to put multiple line message in the python traceback,
# so it will be reported by jenkins. The problem with printing it to stdout
# is that we're in the tearDown class, and jenkins won't have this captured until
# after it thinks the test is done (tearDown is separate from the test)
# we probably could have a tearDown with the test rather than the class, but we
# would have to update all tests.
if len(errLines)!=0:
# check if the lines all start with INFO: or have "apache" in them
justInfo = 0
for e in errLines:
# very hacky. try to ignore the captured broken pipe exceptions.
# if any line has this, ignore the whole group (may miss something)
if "Broken pipe" in e:
justInfo = 1
# if every line has this (beginning of line match)
elif justInfo==0 and not re.match("INFO:", e):
justInfo = 2
if justInfo==2:
emsg1 = " check_sandbox_for_errors: Errors in sandbox stdout or stderr (or R stdout/stderr).\n" + \
"Could have occurred at any prior time\n\n"
emsg2 = "".join(errLines)
errorFound = True
errorMessage = python_test_name + emsg1 + emsg2
# just print if using the pattern match
if pattern:
print "####################################################################"
print errorMessage
print "####################################################################"
if not pattern and not sandboxIgnoreErrors:
raise Exception(errorMessage)
if errorFound:
return errorMessage
else:
## print "h2o_sandbox: h2o logs seem okay"
return
if __name__ == "__main__":
# if you call from the command line, we'll just pass the first two positionally.
# here's a low budget argsparse :) (args are optional!)
arg_names = ['me', 'LOG_DIR', 'python_test_name', 'cloudShutdownIsError', 'sandboxIgnoreErrors']
args = dict(itertools.izip_longest(arg_names, sys.argv))
errorMessage = check_sandbox_for_errors(
LOG_DIR=args['LOG_DIR'],
python_test_name=args['python_test_name'],
cloudShutdownIsError=args['cloudShutdownIsError'],
sandboxIgnoreErrors=args['sandboxIgnoreErrors'])
# it shouldn't return here because it should take the exception)
if errorMessage:
raise Exception('Error found in the logs that we want to consider fatal')
| |
import os
import socket
import threading
import unittest
from collections import OrderedDict
from mock import MagicMock
from parameterized import parameterized
from hazelcast.config import _Config
from hazelcast.core import Address
from hazelcast.reactor import (
AsyncoreReactor,
_WakeableLoop,
_SocketedWaker,
_PipedWaker,
_BasicLoop,
AsyncoreConnection,
)
from hazelcast.util import AtomicInteger
from tests.base import HazelcastTestCase
from tests.util import get_current_timestamp
class ReactorTest(unittest.TestCase):
def test_default_loop_is_wakeable(self):
reactor = AsyncoreReactor()
self.assertIsInstance(reactor._loop, _WakeableLoop)
def test_reactor_lifetime(self):
t_count = threading.active_count()
reactor = AsyncoreReactor()
reactor.start()
try:
self.assertEqual(t_count + 1, threading.active_count()) # reactor thread
finally:
reactor.shutdown()
self.assertEqual(t_count, threading.active_count())
LOOP_CLASSES = [
(
"wakeable",
_WakeableLoop,
),
(
"basic",
_BasicLoop,
),
]
class LoopTest(HazelcastTestCase):
def test_wakeable_loop_default_waker(self):
loop = _WakeableLoop({})
try:
if os.name == "nt":
self.assertIsInstance(loop.waker, _SocketedWaker)
else:
self.assertIsInstance(loop.waker, _PipedWaker)
finally:
loop.waker.close()
def test_wakeable_loop_waker_closes_last(self):
dispatchers = OrderedDict()
loop = _WakeableLoop(dispatchers) # Waker comes first in the dict
mock_dispatcher = MagicMock(readable=lambda: False, writeable=lambda: False)
dispatchers[loop.waker._fileno + 1] = mock_dispatcher
original_close = loop.waker.close
def assertion():
mock_dispatcher.close.assert_called()
original_close()
loop.waker.close = assertion
loop.shutdown()
@parameterized.expand(LOOP_CLASSES)
def test_check_loop(self, _, cls):
loop = cls({})
# For the WakeableLoop, we are checking that
# the loop can be waken up, and once the reactor
# handles the written bytes, it is not awake
# anymore. Assertions are in the method
# implementation. For, the BasicLoop, this should
# be no-op, just checking it is not raising any
# error.
loop.check_loop()
@parameterized.expand(LOOP_CLASSES)
def test_add_timer(self, _, cls):
call_count = AtomicInteger()
def callback():
call_count.add(1)
loop = cls({})
loop.start()
loop.add_timer(0, callback) # already expired, should be run immediately
def assertion():
self.assertEqual(1, call_count.get())
try:
self.assertTrueEventually(assertion)
finally:
loop.shutdown()
@parameterized.expand(LOOP_CLASSES)
def test_timer_cleanup(self, _, cls):
call_count = AtomicInteger()
def callback():
call_count.add(1)
loop = cls({})
loop.start()
loop.add_timer(float("inf"), callback) # never expired, must be cleaned up
try:
self.assertEqual(0, call_count.get())
finally:
loop.shutdown()
def assertion():
self.assertEqual(1, call_count.get())
self.assertTrueEventually(assertion)
@parameterized.expand(LOOP_CLASSES)
def test_timer_that_adds_another_timer(self, _, cls):
loop = cls({})
loop.start()
call_count = AtomicInteger()
def callback():
if call_count.get() == 0:
loop.add_timer(0, callback)
call_count.add(1)
loop.add_timer(float("inf"), callback)
loop.shutdown()
def assertion():
self.assertEqual(2, call_count.get()) # newly added timer must also be cleaned up
self.assertTrueEventually(assertion)
@parameterized.expand(LOOP_CLASSES)
def test_timer_that_shuts_down_loop(self, _, cls):
# It may be the case that, we want to shutdown the client(hence, the loop) in timers
loop = cls({})
loop.start()
loop.add_timer(0, lambda: loop.shutdown())
def assertion():
self.assertFalse(loop._is_live)
try:
self.assertTrueEventually(assertion)
finally:
loop.shutdown() # Should be no op
class SocketedWakerTest(unittest.TestCase):
def setUp(self):
self.waker = _SocketedWaker({})
def tearDown(self):
try:
self.waker.close()
except:
pass
def test_wake(self):
waker = self.waker
self.assertFalse(waker.awake)
waker.wake()
self.assertTrue(waker.awake)
self.assertEqual(b"x", waker._reader.recv(1))
def test_wake_while_awake(self):
waker = self.waker
waker.wake()
waker.wake()
self.assertTrue(waker.awake)
self.assertEqual(b"x", waker._reader.recv(2)) # only the first one should write
def test_handle_read(self):
waker = self.waker
waker.wake()
self.assertTrue(waker.awake)
waker.handle_read()
self.assertFalse(waker.awake)
# BlockingIOError on Py3, socket.error on Py2
with self.assertRaises((IOError, socket.error)):
# handle_read should consume the socket, there should be nothing
waker._reader.recv(1)
def test_close(self):
waker = self.waker
writer = waker._writer
reader = waker._reader
self.assertNotEqual(-1, writer.fileno())
self.assertNotEqual(-1, reader.fileno())
waker.close()
self.assertEqual(-1, writer.fileno())
self.assertEqual(-1, reader.fileno())
class PipedWakerTest(unittest.TestCase):
def setUp(self):
self.waker = _PipedWaker({})
def tearDown(self):
try:
self.waker.close()
except:
pass
def test_wake(self):
waker = self.waker
self.assertFalse(waker.awake)
waker.wake()
self.assertTrue(waker.awake)
self.assertEqual(b"x", os.read(waker._read_fd, 1))
def test_wake_while_awake(self):
waker = self.waker
waker.wake()
waker.wake()
self.assertTrue(waker.awake)
self.assertEqual(b"x", os.read(waker._read_fd, 2)) # only the first one should write
def test_handle_read(self):
waker = self.waker
waker.wake()
self.assertTrue(waker.awake)
waker.handle_read()
self.assertFalse(waker.awake)
if os.name == "nt":
# pipes are not non-blocking on Windows, assertion below blocks forever on Windows
return
# BlockingIOError on Py3, OSError on Py2
with self.assertRaises((IOError, OSError)):
# handle_read should consume the pipe, there should be nothing
os.read(waker._read_fd, 1)
def test_close(self):
waker = self.waker
w_fd = waker._write_fd
r_fd = waker._read_fd
self.assertEqual(1, os.write(w_fd, b"x"))
self.assertEqual(b"x", os.read(r_fd, 1))
waker.close()
with self.assertRaises(OSError):
os.write(w_fd, b"x")
with self.assertRaises(OSError):
os.read(r_fd, 1)
class MockServer:
def __init__(self):
self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._s.settimeout(3.0)
self._s.bind(("localhost", 0))
self._s.listen(1)
self._t = threading.Thread(target=self._handler)
self._t.start()
def _handler(self):
try:
conn, _ = self._s.accept()
conn.close()
except:
pass
def get_address(self):
host, port = self._s.getsockname()
return Address(host, port)
def close(self):
self._s.close()
self._t.join()
class AsyncoreConnectionTest(unittest.TestCase):
def setUp(self):
self.server = None
def tearDown(self):
if self.server:
self.server.close()
def test_socket_options(self):
self.server = MockServer()
config = _Config()
config.socket_options = [(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)]
conn = AsyncoreConnection(
MagicMock(map=dict()), None, None, self.server.get_address(), config, None
)
try:
# By default this is set to 0
self.assertEqual(1, conn.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR))
finally:
conn._inner_close()
def test_receive_buffer_size(self):
# When the SO_RCVBUF option is set, we should try
# to use that value while trying to read something.
self.server = MockServer()
config = _Config()
size = 64 * 1024
config.socket_options = [(socket.SOL_SOCKET, socket.SO_RCVBUF, size)]
conn = AsyncoreConnection(
MagicMock(map=dict()), None, None, self.server.get_address(), config, None
)
try:
# By default this is set to 128000
self.assertEqual(size, conn.receive_buffer_size)
finally:
conn._inner_close()
def test_send_buffer_size(self):
# When the SO_SNDBUF option is set, we should try
# to use that value while trying to write something.
self.server = MockServer()
config = _Config()
size = 64 * 1024
config.socket_options = [(socket.SOL_SOCKET, socket.SO_SNDBUF, size)]
conn = AsyncoreConnection(
MagicMock(map=dict()), None, None, self.server.get_address(), config, None
)
try:
# By default this is set to 128000
self.assertEqual(size, conn.send_buffer_size)
finally:
conn._inner_close()
def test_constructor_with_unreachable_addresses(self):
addr = Address("192.168.0.1", 5701)
config = _Config()
start = get_current_timestamp()
conn = AsyncoreConnection(MagicMock(map=dict()), MagicMock(), None, addr, config, None)
try:
# Server is unreachable, but this call should return
# before connection timeout
self.assertLess(get_current_timestamp() - start, config.connection_timeout)
finally:
conn.close(None, None)
def test_resources_cleaned_up_after_immediate_failure(self):
addr = Address("invalid-address", 5701)
config = _Config()
mock_reactor = MagicMock(map={})
try:
conn = AsyncoreConnection(mock_reactor, MagicMock(), None, addr, config, None)
conn.close(None, None)
self.fail("Connection attempt to an invalid address should fail immediately")
except socket.error:
# Constructor of the connection should remove itself from the
# dispatchers map of the reactor.
self.assertEqual(0, len(mock_reactor.map))
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This is a modified version of 'service.py' (version 1.1.1), part of the 'atom' module
# from the gdata-python-client project (http://code.google.com/p/gdata-python-client/) by Google Inc.
# Copyright (C) 2006, 2007, 2008 Google Inc.
#
# It has been modified to support json formatted data instead of atom.
# Copyright (C) 2012 rambla.eu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeffrey Scudder)'
"""RawsService provides CRUD ops. in line with the Atom Publishing Protocol.
RawsService: Encapsulates the ability to perform insert, update and delete
operations with the Atom Publishing Protocol and json formatted data.
An instance can perform query, insertion, deletion, and
update.
"""
import re
import httplib
import urllib
import raws_json
import json
# Module level variable specifies which module should be used by RawsService
# objects to make HttpRequests. This setting can be overridden on each
# instance of RawsService.
http_request_handler = raws_json
class Error(Exception):
pass
class BadAuthentication(Error):
pass
class NotAuthenticated(Error):
pass
class NonAuthSubToken(Error):
pass
class RequestError(Error):
pass
class UnexpectedReturnType(Error):
pass
class BadAuthenticationServiceURL(Error):
pass
class Feed(object):
def __init__(self, feed = None):
self.entries = []
if "entry" in feed["feed"]:
for e in feed["feed"]["entry"]:
self.entries.append({"entry":e,})
class RawsService(raws_json.JsonService):
"""Contains elements needed for Raws login and CRUD request headers.
Maintains additional headers (tokens for example) needed for the Raws
services to allow a user to perform inserts, updates, and deletes.
"""
def __init__(self, username=None, password=None, source=None, server=None, port = None,
additional_headers=None, handler=None, ssl = False):
"""Creates an object of type RawsService.
Args:
username: string (optional) The username for authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection will be opened. (eg 'rass.cdn01.rambla.be').
additional_headers: dictionary (optional) Any additional headers which should be included with CRUD operations.
handler: module (optional) The module whose HttpRequest function should be used when making requests to the server. The default value is atom.service.
ssl: bool (optional) Use SSL encryption.
"""
self.username = username
self.password = password
self.server = server
self.additional_headers = additional_headers or {}
self.handler = handler or http_request_handler
self.ssl = ssl
if port:
self.port = port
elif ssl:
self.port = 443
else:
self.port = 80 # default
self.__SetSource(source)
# Authentication operations
if self.username and self.password:
self.UseBasicAuth(self.username, self.password)
def set_credentials(self, username, password, server = None, port = None):
""" Sets the authentication credentials and server name on the object. """
self.username = username
self.password = password
if server:
self.server = server
if port:
self.port = port
# Authentication operations
if self.username and self.password:
self.UseBasicAuth(self.username, self.password)
def get_service_uri(self):
base_uri = "http://" + self.server
if self.port:
base_uri += ":" + self.port
return base_uri
# Private methods to create the source property.
def __GetSource(self):
return self.__source
def __SetSource(self, new_source):
self.__source = new_source
# Update the UserAgent header to include the new application name.
self.additional_headers['User-Agent'] = '%s Raws-Python/1.1.1' % (self.__source)
source = property(__GetSource, __SetSource, doc="""The source is the name of the application making the request. It should be in the form company_id-app_name-app_version""")
# CRUD operations
def Get(self, uri, extra_headers=None, redirects_remaining=4, encoding='UTF-8', converter=None):
"""Query the Raws API with the given URI
The uri is the portion of the URI after the server value
To perform a query against RAMS, set the server to
'rams.mon01.rambla.be' and set the uri to '/traffic/...', where ... is
your query. For example, to get recursive file traffic: '/traffic/?kind=recursive'
Args:
uri: string The query in the form of a URI. Example:
'/traffic/?kind=recursive'.
extra_headers: dictionary (optional) Extra HTTP headers to be included
in the GET request. These headers are in addition to
those stored in the client's additional_headers property.
The client automatically sets the Content-Type and
Authorization headers.
redirects_remaining: int (optional) Tracks the number of additional
redirects this method will allow. If the service object receives
a redirect and remaining is 0, it will not follow the redirect.
This was added to avoid infinite redirect loops.
encoding: string (optional) The character encoding for the server's
response. Default is UTF-8
converter: func (optional) A function which will transform
the server's results before it is returned. Example: use
RawsFeedFromString to parse the server response as if it
were a RawsFeed.
Returns:
If there is no ResultsTransformer specified in the call, a RawsFeed
or RawsEntry depending on which is sent from the server. If the
response is niether a feed or entry and there is no ResultsTransformer,
return a string. If there is a ResultsTransformer, the returned value
will be that of the ResultsTransformer function.
"""
if extra_headers is None:
extra_headers = {"Accept":"application/json"}
else:
extra_headers.update({"Accept":"application/json"})
server_response = self.handler.HttpRequest(self, 'GET', None, uri, extra_headers=extra_headers)
result_body = server_response.read()
if server_response.status == 200:
return json.loads(s = result_body)
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
# def GetMedia(self, uri, extra_headers=None, file_path = None):
# """Returns a MediaSource containing media and its metadata from the given
# URI string, storing it into the local file_path.
# """
# response_handle = self.handler.HttpRequest(self, 'GET', None, uri, extra_headers=extra_headers)
# if not response_handle:
# raise rawsc.RawscException('Failed to retrieve response handle from URI = %s.' % str(uri))
# media_source = rawsc.MediaSource(file_handle = response_handle, content_type = response_handle.getheader('Content-Type'), content_length = response_handle.getheader('Content-Length'))
# if not media_source:
# raise rawsc.RawscException('Failed to create media_source object after retrieving URI = %s.' % str(uri))
# if file_path is not None:
# if not media_source.writeFile(file_path):
# raise rawsc.RawscException('Failed writing response (URI = %s) to path = %s.' % (str(uri), str(file_path)))
# return media_source
#
#
# def GetEntry(self, uri, extra_headers=None):
# """Query the Raws API with the given URI and receive an Entry.
#
# See also documentation for rawsc.service.Get
#
# Args:
# uri: string The query in the form of a URI. Example:
# '/item/mysubdir/myfile.mp4'.
# extra_headers: dictionary (optional) Extra HTTP headers to be included
# in the GET request. These headers are in addition to
# those stored in the client's additional_headers property.
# The client automatically sets the Content-Type and
# Authorization headers.
#
# Returns:
# A RawsEntry built from the XML in the server's response.
# """
#
# result = self.Get(uri, extra_headers, converter=atom.EntryFromString)
# if isinstance(result, atom.Entry):
# return result
# else:
# raise UnexpectedReturnType, 'Server did not send an entry'
#
# def GetFeed(self, uri, extra_headers=None,
# converter=rawsc.RawsFeedFromString):
# """Query the Raws API with the given URI and receive a Feed.
#
# See also documentation for rawsc.service.Get
#
# Args:
# uri: string The query in the form of a URI. Example:
# '/dir/mysubdir/?kind=file'.
# extra_headers: dictionary (optional) Extra HTTP headers to be included
# in the GET request. These headers are in addition to
# those stored in the client's additional_headers property.
# The client automatically sets the Content-Type and
# Authorization headers.
#
# Returns:
# A RawsFeed built from the XML in the server's response.
# """
#
# result = self.Get(uri, extra_headers, converter=converter)
# if isinstance(result, atom.Feed):
# return result
# else:
# raise UnexpectedReturnType, 'Server did not send a feed'
#
# def GetNext(self, feed):
# """Requests the next 'page' of results in the feed.
#
# This method uses the feed's next link to request an additional feed
# and uses the class of the feed to convert the results of the GET request.
#
# Args:
# feed: atom.Feed or a subclass. The feed should contain a next link and
# the type of the feed will be applied to the results from the
# server. The new feed which is returned will be of the same class
# as this feed which was passed in.
#
# Returns:
# A new feed representing the next set of results in the server's feed.
# The type of this feed will match that of the feed argument.
# """
# next_link = feed.GetNextLink()
# # Create a closure which will convert an XML string to the class of
# # the feed object passed in.
# def ConvertToFeedClass(xml_string):
# return atom.CreateClassFromXMLString(feed.__class__, xml_string)
# # Make a GET request on the next link and use the above closure for the
# # converted which processes the XML string from the server.
# if next_link and next_link.href:
# return self.Get(next_link.href, converter=ConvertToFeedClass)
# else:
# return None
#
def Post(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert or update data into a Raws service at the given URI.
Args:
data: string, ElementTree._Element, atom.Entry, or rawsc.RawsEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/job/'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&dict_key=value&...'.
Example: {'paginate_by': '50'} becomes &paginate_by=50
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
RawsEntryFromString which will parse the body of the server's
response and return a RawsEntry.
Returns:
If the post succeeded, this method will return a RawsFeed, RawsEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return self.PostOrPut('POST', data, uri, extra_headers=extra_headers,
url_params=url_params, escape_params=escape_params,
redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def PostOrPut(self, verb, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=4, media_source=None,
converter=None):
"""Insert data into a Raws service at the given URI.
Args:
verb: string, either 'POST' or 'PUT'
data: string, ElementTree._Element, atom.Entry, or rawsc.RawsEntry The
XML to be sent to the uri.
uri: string The location (feed) to which the data should be inserted.
Example: '/job/'.
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&paginate_by=50&...'.
Example: {'paginate_by': '50'} becomes &paginate_by=50
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
media_source: MediaSource (optional) Container for the media to be sent
along with the entry, if provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
RawsEntryFromString which will parse the body of the server's
response and return a RawsEntry.
Returns:
If the post succeeded, this method will return a RawsFeed, RawsEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
if extra_headers is None:
extra_headers = {"Accept":"application/json"}
else:
extra_headers.update({"Accept":"application/json"})
if data and media_source:
if ElementTree.iselement(data):
data_str = ElementTree.tostring(data)
else:
data_str = str(data)
multipart = []
multipart.append('Media multipart posting\r\n--END_OF_PART\r\n' + \
'Content-Type: application/atom+xml\r\n\r\n')
multipart.append('\r\n--END_OF_PART\r\nContent-Type: ' + \
media_source.content_type+'\r\n\r\n')
multipart.append('\r\n--END_OF_PART--\r\n')
extra_headers['MIME-version'] = '1.0'
extra_headers['Content-Length'] = str(len(multipart[0]) +
len(multipart[1]) + len(multipart[2]) +
len(data_str) + media_source.content_length)
server_response = self.handler.HttpRequest(self, verb,
[multipart[0], data_str, multipart[1], media_source.file_handle,
multipart[2]], uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params,
content_type='multipart/related; boundary=END_OF_PART')
result_body = server_response.read()
elif media_source or isinstance(data, raws_json.MediaSource):
if isinstance(data, raws_json.MediaSource):
media_source = data
extra_headers['Content-Length'] = str(media_source.content_length)
extra_headers['Slug'] = str(media_source.svr_filename)
server_response = self.handler.HttpRequest(self, verb,
media_source.file_handle, uri, extra_headers=extra_headers,
url_params=url_params, escape_params=escape_params,
content_type=media_source.content_type)
result_body = server_response.read()
else:
http_data = json.dumps(data)
content_type = 'application/json'
server_response = self.handler.HttpRequest(self, verb,
http_data, uri, extra_headers=extra_headers,
url_params=url_params, escape_params=escape_params,
content_type=content_type)
result_body = server_response.read()
# Server returns 201 for most post requests, but when performing a batch
# request the server responds with a 200 on success.
if server_response.status == 201 or server_response.status == 200:
return json.loads(result_body)
else:
raise RequestError, {'status': server_response.status, 'reason': server_response.reason, 'body': result_body}
def Put(self, data, uri, extra_headers=None, url_params=None,
escape_params=True, redirects_remaining=3, media_source=None,
converter=None):
"""Updates an entry at the given URI.
Args:
data: string, ElementTree._Element, or xml_wrapper.ElementWrapper The
XML containing the updated data.
uri: string A URI indicating entry to which the update will be applied.
Example: '/dir/my_new_subdir/'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type,
Authorization, and Content-Length headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&paginate_by=50&...'.
Example: {'paginate_by': '50'} becomes &paginate_by=50
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
converter: func (optional) A function which will be executed on the
server's response. Often this is a function like
RawsEntryFromString which will parse the body of the server's
response and return a RawsEntry.
Returns:
If the put succeeded, this method will return a RawsFeed, RawsEntry,
or the results of running converter on the server's result body (if
converter was specified).
"""
return self.PostOrPut('PUT', data, uri, extra_headers=extra_headers,
url_params=url_params, escape_params=escape_params,
redirects_remaining=redirects_remaining,
media_source=media_source, converter=converter)
def Delete(self, uri, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4):
"""Deletes the entry at the given URI.
Args:
uri: string The URI of the entry to be deleted. Example:
'/item/mysubdir/myfile.mp4'
extra_headers: dict (optional) HTTP headers which are to be included.
The client automatically sets the Content-Type and
Authorization headers.
url_params: dict (optional) Additional URL parameters to be included
in the URI. These are translated into query arguments
in the form '&paginate_by=50&...'.
Example: {'paginate_by': '50'} becomes &paginate_by=50
escape_params: boolean (optional) If false, the calling code has already
ensured that the query will form a valid URL (all
reserved characters have been escaped). If true, this
method will escape the query and any URL parameters
provided.
Returns:
True if the entry was deleted.
"""
if extra_headers is None:
extra_headers = {}
server_response = self.handler.HttpRequest(self, 'DELETE', None, uri,
extra_headers=extra_headers, url_params=url_params,
escape_params=escape_params)
result_body = server_response.read()
if server_response.status == 204:
return True
else:
raise RequestError, {'status': server_response.status,
'reason': server_response.reason, 'body': result_body}
def PostTxtFile(self, uri, data, filename, extra_headers=None, url_params=None, escape_params=True, redirects_remaining=4):
""" POST data content to URI and set filename as SLUG """
if extra_headers is None:
extra_headers = {"Accept":"application/json"}
else:
extra_headers.update({"Accept":"application/json"})
extra_headers['Content-Length'] = str(len(data))
extra_headers['Slug'] = str(filename)
content_type = 'application/data'
server_response = self.handler.HttpRequest(self, "POST", data, uri, extra_headers=extra_headers, url_params=url_params, escape_params=escape_params,
content_type=content_type)
result_body = server_response.read()
# Server returns 201 for most post requests, but when performing a batch
# request the server responds with a 200 on success.
if server_response.status == 201 or server_response.status == 200:
return json.loads(result_body)
else:
raise RequestError, {'status': server_response.status, 'reason': server_response.reason, 'body': result_body}
def get_enclosure_link(self, entry):
url = None
for link in entry["entry"]["link"]:
if link.has_key("rel"):
if "enclosure" == link["rel"]:
url = link["href"]
break
return url
class Query(dict):
"""Constructs a query URL to be used in GET requests
Url parameters are created by adding key-value pairs to this object as a
dict. For example, to add &paginate_by=50 to the URL do
my_query['paginate_by'] = 50
Category queries are created by adding category strings to the categories
member. All items in the categories list will be concatenated with the /
symbol (symbolizing a category x AND y restriction). If you would like to OR
2 categories, append them as one string with a | between the categories.
For example, do query.categories.append('Fritz|Laurie') to create a query
like this feed/-/Fritz%7CLaurie . This query will look for results in both
categories.
"""
def __init__(self, feed=None, text_query=None, params=None,
categories=None):
"""Constructor for Query
Args:
feed: str (optional) The path for the feed (Examples:
'/dir/mysubdir/' or 'customer/used/'
text_query: str (optional) The contents of the q query parameter. The
contents of the text_query are URL escaped upon conversion to a URI.
params: dict (optional) Parameter value string pairs which become URL
params when translated to a URI. These parameters are added to the
query's items (key-value pairs).
categories: list (optional) List of category strings which should be
included as query categories. Currently not supported by RAWS.
If you want to get results from category A or B (both
categories), specify a single list item 'A|B'.
"""
self.feed = feed
self.categories = []
if text_query:
self.text_query = text_query
if isinstance(params, dict):
for param in params:
self[param] = params[param]
if isinstance(categories, list):
for category in categories:
self.categories.append(category)
def ToUri(self):
q_feed = self.feed or ''
category_string = '/'.join([urllib.quote_plus(c) for c in self.categories])
# Add categories to the feed if there are any.
if len(self.categories) > 0:
q_feed = q_feed + '/-/' + category_string
return raws_json.BuildUri(q_feed, self)
def __str__(self):
return self.ToUri()
| |
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""An implementation of a data store based on mongo."""
import hashlib
import threading
import time
from bson import binary
from bson import objectid
import pymongo
from pymongo import errors
import logging
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import utils
class MongoDataStore(data_store.DataStore):
"""A Mongo based data store."""
def __init__(self):
# Support various versions on the pymongo connection object.
try:
connector = pymongo.MongoClient
except AttributeError:
connector = pymongo.Connection
if config_lib.CONFIG["Mongo.server"]:
mongo_client = connector(
host=config_lib.CONFIG["Mongo.server"],
port=int(config_lib.CONFIG["Mongo.port"]))
else:
mongo_client = connector()
# For now use a single "data" collection
self.db_handle = mongo_client[config_lib.CONFIG["Mongo.db_name"]]
# We have two collections - the latest collection maintains the latest data
# and the versioned collection maintains versioned data.
self.latest_collection = self.db_handle.latest
self.versioned_collection = self.db_handle.versioned
# Ensure we have the correct indexes.
for idx in ["subject", "predicate", "timestamp"]:
self.latest_collection.ensure_index(idx)
self.versioned_collection.ensure_index(idx)
super(MongoDataStore, self).__init__()
def _GetCursor(self, spec, timestamp, limit):
"""Create a mongo cursor based on the timestamp restriction."""
if timestamp == self.NEWEST_TIMESTAMP or timestamp is None:
collection = self.latest_collection
elif timestamp == self.ALL_TIMESTAMPS:
collection = self.versioned_collection
elif isinstance(timestamp, tuple):
collection = self.versioned_collection
start, end = timestamp
spec = {"$and": [dict(timestamp={"$gte": int(start)}),
dict(timestamp={"$lte": int(end)}),
spec]}
else:
raise data_store.Error("Undefined timestamp specification.")
cursor = collection.find(spec).sort("timestamp", pymongo.DESCENDING)
if limit:
cursor = cursor.limit(limit)
return cursor
def ResolveMulti(self, subject, attributes, timestamp=None, limit=None,
token=None):
"""Resolves multiple attributes at once for one subject."""
self.security_manager.CheckDataStoreAccess(
token, [subject], self.GetRequiredResolveAccess(attributes))
# Build a query spec.
spec = {"$and": [
# Subject matches any of the requested subjects.
dict(subject=utils.SmartUnicode(subject)),
{"$or": [dict(predicate=utils.SmartUnicode(x)) for x in attributes]},
]}
results_returned = 0
for document in self._GetCursor(spec, timestamp, 0):
subject = document["subject"]
value = Decode(document)
if limit:
if results_returned >= limit:
return
results_returned += 1
yield (document["predicate"], value, document["timestamp"])
def DeleteSubject(self, subject, sync=False, token=None):
"""Completely deletes all information about the subject."""
_ = sync
subject = utils.SmartUnicode(subject)
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
self.latest_collection.remove(dict(subject=subject))
self.versioned_collection.remove(dict(subject=subject))
def MultiSet(self, subject, values, timestamp=None, replace=True,
sync=True, to_delete=None, token=None):
"""Set multiple attributes' values for this subject in one operation."""
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
if timestamp is None:
timestamp = time.time() * 1e6
# Prepare a mongo bulk insert for all the values.
documents = []
subject = utils.SmartUnicode(subject)
to_delete = set(to_delete or [])
latest = {}
# Build a document for each unique timestamp.
for attribute, sequence in values.items():
for value in sequence:
if isinstance(value, tuple):
value, entry_timestamp = value
else:
entry_timestamp = timestamp
if entry_timestamp is None:
entry_timestamp = timestamp
attribute = utils.SmartUnicode(attribute)
prefix = attribute.split(":", 1)[0]
document = dict(subject=subject, timestamp=int(entry_timestamp),
predicate=attribute, prefix=prefix)
_Encode(document, value)
documents.append(document)
latest[attribute] = document
# Replacing means to delete all versions of the attribute first.
if replace:
to_delete.add(attribute)
if to_delete:
self.DeleteAttributes(subject, to_delete, token=token)
# Just write using bulk insert mode.
if documents:
try:
self.versioned_collection.insert(documents, w=1 if sync else 0)
except errors.PyMongoError as e:
logging.error("Mongo Error %s", e)
raise data_store.Error(utils.SmartUnicode(e))
# Maintain the latest documents in the latest collection.
for attribute, document in latest.items():
document.pop("_id", None)
self.latest_collection.update(
dict(subject=subject, predicate=attribute, prefix=prefix),
document, upsert=True, w=1 if sync else 0)
def DeleteAttributes(self, subject, attributes, start=None, end=None,
sync=True, token=None):
"""Remove all the attributes from this subject."""
_ = sync # Unused attribute, mongo is always synced.
subject = utils.SmartUnicode(subject)
self.security_manager.CheckDataStoreAccess(token, [subject], "w")
if not attributes:
# Nothing to delete.
return
# Build a spec to select the subject and any of the attributes.
spec = {"$and": [
dict(subject=subject),
{"$or": [dict(predicate=utils.SmartUnicode(x)) for x in attributes]},
]}
if not start and not end:
# Just delete all the versions.
self.versioned_collection.remove(spec)
self.latest_collection.remove(spec)
return
unversioned_spec = {"$and": [
dict(subject=subject),
{"$or": [dict(predicate=utils.SmartUnicode(x)) for x in attributes]},
]}
if start:
spec["$and"].append(dict(timestamp={"$gte": int(start)}))
if not end:
# We can optimize this case since the latest version will always
# be unchanged or deleted.
self.versioned_collection.remove(spec)
self.latest_collection.remove(spec)
return
spec["$and"].append(dict(timestamp={"$lte": int(end)}))
self.versioned_collection.remove(spec)
to_delete = set(attributes)
to_set = {}
cursor = self.versioned_collection.find(unversioned_spec).sort("timestamp")
for document in cursor:
value = Decode(document)
attribute = document["predicate"]
to_delete.discard(attribute)
timestamp = document["timestamp"]
prefix = attribute.split(":", 1)[0]
document = dict(subject=subject, timestamp=timestamp,
predicate=attribute, prefix=prefix)
_Encode(document, value)
to_set[attribute] = document
if to_delete:
delete_spec = {"$and": [
dict(subject=subject),
{"$or": [dict(predicate=utils.SmartUnicode(x)) for x in attributes]},
]}
self.latest_collection.remove(delete_spec)
if to_set:
for document in to_set.itervalues():
self.latest_collection.update(
dict(subject=subject, predicate=attribute, prefix=prefix),
document, upsert=True, w=1 if sync else 0)
def MultiResolveRegex(self, subjects, attribute_regex, timestamp=None,
limit=None, token=None):
"""Retrieves a bunch of subjects in one round trip."""
self.security_manager.CheckDataStoreAccess(
token, subjects, self.GetRequiredResolveAccess(attribute_regex))
if not subjects:
return {}
result = {}
dedup_set = set()
# Build a query spec.
# Subject matches any of the requested subjects.
spec = dict(subject={"$in": [utils.SmartUnicode(x) for x in subjects]})
# For a wildcard we just select all attributes by not applying a condition
# at all.
if isinstance(attribute_regex, basestring):
attribute_regex = [attribute_regex]
if attribute_regex != [".*"]:
spec = {"$and": [
spec,
{"$or": [dict(predicate={"$regex": x}) for x in attribute_regex]},
]}
for document in self._GetCursor(spec, timestamp, limit):
subject = document["subject"]
value = Decode(document)
attribute = document.get("predicate")
if attribute is None:
# This might not be a normal aff4 attribute - transactions are one
# example for this.
continue
# Sometimes due to race conditions in mongodb itself (upsert operation is
# not atomic), the latest_collection can contain multiple versions of the
# same attribute.
if ((timestamp == self.NEWEST_TIMESTAMP or timestamp is None) and
(subject, attribute) in dedup_set):
continue
dedup_set.add((subject, attribute))
result.setdefault(subject, []).append(
(attribute, value, document["timestamp"]))
return result.iteritems()
def Size(self):
info = self.db_handle.command("dbStats")
return info["storageSize"]
def Transaction(self, subject, lease_time=None, token=None):
return MongoTransaction(self, subject, lease_time=lease_time, token=token)
class MongoTransaction(data_store.CommonTransaction):
"""The Mongo data store transaction object.
This object does not aim to ensure ACID like consistently. We only ensure that
two simultaneous locks can not be held on the same RDF subject.
This means that the first thread which grabs the lock is considered the owner
of the transaction. Any subsequent transactions on the same subject will fail
immediately with data_store.TransactionError.
A lock is considered expired after a certain time.
"""
lock_creation_lock = threading.Lock()
locked = False
def __init__(self, store, subject, lease_time=None, token=None):
"""Ensure we can take a lock on this subject."""
super(MongoTransaction, self).__init__(store, subject,
lease_time=lease_time, token=token)
self.object_id = objectid.ObjectId(
hashlib.sha256(utils.SmartStr(self.subject)).digest()[:12])
if lease_time is None:
lease_time = config_lib.CONFIG["Datastore.transaction_timeout"]
self.expires = time.time() + lease_time
self.document = self.store.latest_collection.find_and_modify(
query={"_id": self.object_id, "expires": {"$lt": time.time()}},
update=dict(_id=self.object_id, expires=self.expires),
upsert=False, new=True)
if self.document:
# Old transaction expired and we hold a lock now:
self.locked = True
return
# Maybe the lock did not exist yet. To create it, we use a lock to reduce
# the chance of deleting some other lock created at the same time. Note that
# there still exists a very small race if this happens in multiple processes
# at the same time.
with self.lock_creation_lock:
document = self.store.latest_collection.find({"_id": self.object_id})
if not document.count():
self.UpdateLease(lease_time)
cursor = self.store.latest_collection.find({"_id": self.object_id})
if cursor.count() != 1:
self._DeleteLock()
logging.warn("Multiple lock rows for %s", subject)
raise data_store.TransactionError("Error while locking %s." % subject)
self.document = cursor.next()
if self.document["expires"] != self.expires:
raise data_store.TransactionError("Subject %s is locked" % subject)
# We hold a lock now:
self.locked = True
return
raise data_store.TransactionError("Subject %s is locked" % subject)
def UpdateLease(self, duration):
self.expires = time.time() + duration
self.store.latest_collection.save(
dict(_id=self.object_id, expires=self.expires))
if self.document:
self.document["expires"] = self.expires
def Abort(self):
if self.locked:
self._RemoveLock()
def Commit(self):
if self.locked:
super(MongoTransaction, self).Commit()
self._RemoveLock()
def _RemoveLock(self):
# Remove the lock on the document.
if not self.store.latest_collection.find_and_modify(
query=self.document, update=dict(_id=self.object_id, expires=0)):
raise data_store.TransactionError("Lock was overridden for %s." %
self.subject)
self.locked = False
def _DeleteLock(self):
# Deletes the lock entirely from the document.
document = dict(_id=self.object_id, expires=self.expires)
if not self.store.latest_collection.remove(query=document):
raise data_store.TransactionError(
"Could not remove lock for %s." % self.subject)
self.locked = False
def Decode(document):
"""Decodes from a value using the protobuf specified."""
value = document.get("int_value")
if value is None:
value = document.get("str_value")
if value is None:
value = str(document.get("value"))
return value
def _Encode(document, value):
"""Encodes the value into the document.
Args:
document: The mogo document which will receive this new value.
value: A value to be encoded in the database.
Returns:
The modified document.
"""
if hasattr(value, "SerializeToDataStore"):
value = value.SerializeToDataStore()
elif hasattr(value, "SerializeToString"):
value = value.SerializeToString()
if isinstance(value, (long, int)):
document["int_value"] = value
elif isinstance(value, str):
document["value"] = binary.Binary(value)
else:
document["str_value"] = utils.SmartUnicode(value)
return document
| |
import re
from zope.interface import implementer
from pyramid.interfaces import (
IRoutesMapper,
IRoute,
)
from pyramid.compat import (
PY3,
native_,
text_,
text_type,
string_types,
binary_type,
is_nonstr_iter,
decode_path_info,
)
from pyramid.exceptions import URLDecodeError
from pyramid.traversal import (
quote_path_segment,
split_path_info,
)
_marker = object()
@implementer(IRoute)
class Route(object):
def __init__(self, name, pattern, factory=None, predicates=(),
pregenerator=None):
self.pattern = pattern
self.path = pattern # indefinite b/w compat, not in interface
self.match, self.generate = _compile_route(pattern)
self.name = name
self.factory = factory
self.predicates = predicates
self.pregenerator = pregenerator
@implementer(IRoutesMapper)
class RoutesMapper(object):
def __init__(self):
self.routelist = []
self.routes = {}
def has_routes(self):
return bool(self.routelist)
def get_routes(self):
return self.routelist
def get_route(self, name):
return self.routes.get(name)
def connect(self, name, pattern, factory=None, predicates=(),
pregenerator=None, static=False):
if name in self.routes:
oldroute = self.routes[name]
if oldroute in self.routelist:
self.routelist.remove(oldroute)
route = Route(name, pattern, factory, predicates, pregenerator)
if not static:
self.routelist.append(route)
self.routes[name] = route
return route
def generate(self, name, kw):
return self.routes[name].generate(kw)
def __call__(self, request):
environ = request.environ
try:
# empty if mounted under a path in mod_wsgi, for example
path = decode_path_info(environ['PATH_INFO'] or '/')
except KeyError:
path = '/'
except UnicodeDecodeError as e:
raise URLDecodeError(e.encoding, e.object, e.start, e.end, e.reason)
for route in self.routelist:
match = route.match(path)
if match is not None:
preds = route.predicates
info = {'match':match, 'route':route}
if preds and not all((p(info, request) for p in preds)):
continue
return info
return {'route':None, 'match':None}
# stolen from bobo and modified
old_route_re = re.compile(r'(\:[_a-zA-Z]\w*)')
star_at_end = re.compile(r'\*(\w*)$')
# The tortuous nature of the regex named ``route_re`` below is due to the
# fact that we need to support at least one level of "inner" squigglies
# inside the expr of a {name:expr} pattern. This regex used to be just
# (\{[a-zA-Z][^\}]*\}) but that choked when supplied with e.g. {foo:\d{4}}.
route_re = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
def update_pattern(matchobj):
name = matchobj.group(0)
return '{%s}' % name[1:]
def _compile_route(route):
# This function really wants to consume Unicode patterns natively, but if
# someone passes us a bytestring, we allow it by converting it to Unicode
# using the ASCII decoding. We decode it using ASCII because we don't
# want to accept bytestrings with high-order characters in them here as
# we have no idea what the encoding represents.
if route.__class__ is not text_type:
try:
route = text_(route, 'ascii')
except UnicodeDecodeError:
raise ValueError(
'The pattern value passed to add_route must be '
'either a Unicode string or a plain string without '
'any non-ASCII characters (you provided %r).' % route)
if old_route_re.search(route) and not route_re.search(route):
route = old_route_re.sub(update_pattern, route)
if not route.startswith('/'):
route = '/' + route
remainder = None
if star_at_end.search(route):
route, remainder = route.rsplit('*', 1)
pat = route_re.split(route)
# every element in "pat" will be Unicode (regardless of whether the
# route_re regex pattern is itself Unicode or str)
pat.reverse()
rpat = []
gen = []
prefix = pat.pop() # invar: always at least one element (route='/'+route)
# We want to generate URL-encoded URLs, so we url-quote the prefix, being
# careful not to quote any embedded slashes. We have to replace '%' with
# '%%' afterwards, as the strings that go into "gen" are used as string
# replacement targets.
gen.append(quote_path_segment(prefix, safe='/').replace('%', '%%')) # native
rpat.append(re.escape(prefix)) # unicode
while pat:
name = pat.pop() # unicode
name = name[1:-1]
if ':' in name:
# reg may contain colons as well,
# so we must strictly split name into two parts
name, reg = name.split(':', 1)
else:
reg = '[^/]+'
gen.append('%%(%s)s' % native_(name)) # native
name = '(?P<%s>%s)' % (name, reg) # unicode
rpat.append(name)
s = pat.pop() # unicode
if s:
rpat.append(re.escape(s)) # unicode
# We want to generate URL-encoded URLs, so we url-quote this
# literal in the pattern, being careful not to quote the embedded
# slashes. We have to replace '%' with '%%' afterwards, as the
# strings that go into "gen" are used as string replacement
# targets. What is appended to gen is a native string.
gen.append(quote_path_segment(s, safe='/').replace('%', '%%'))
if remainder:
rpat.append('(?P<%s>.*?)' % remainder) # unicode
gen.append('%%(%s)s' % native_(remainder)) # native
pattern = ''.join(rpat) + '$' # unicode
match = re.compile(pattern).match
def matcher(path):
# This function really wants to consume Unicode patterns natively,
# but if someone passes us a bytestring, we allow it by converting it
# to Unicode using the ASCII decoding. We decode it using ASCII
# because we don't want to accept bytestrings with high-order
# characters in them here as we have no idea what the encoding
# represents.
if path.__class__ is not text_type:
path = text_(path, 'ascii')
m = match(path)
if m is None:
return None
d = {}
for k, v in m.groupdict().items():
# k and v will be Unicode 2.6.4 and lower doesnt accept unicode
# kwargs as **kw, so we explicitly cast the keys to native
# strings in case someone wants to pass the result as **kw
nk = native_(k, 'ascii')
if k == remainder:
d[nk] = split_path_info(v)
else:
d[nk] = v
return d
gen = ''.join(gen)
def generator(dict):
newdict = {}
for k, v in dict.items():
if PY3: # pragma: no cover
if v.__class__ is binary_type:
# url_quote below needs a native string, not bytes on Py3
v = v.decode('utf-8')
else:
if v.__class__ is text_type:
# url_quote below needs bytes, not unicode on Py2
v = v.encode('utf-8')
if k == remainder:
# a stararg argument
if is_nonstr_iter(v):
v = '/'.join(
[quote_path_segment(x, safe='/') for x in v]
) # native
else:
if v.__class__ not in string_types:
v = str(v)
v = quote_path_segment(v, safe='/')
else:
if v.__class__ not in string_types:
v = str(v)
# v may be bytes (py2) or native string (py3)
v = quote_path_segment(v, safe='/')
# at this point, the value will be a native string
newdict[k] = v
result = gen % newdict # native string result
return result
return matcher, generator
| |
import re
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
field_size_re = re.compile(r'^\s*(?:var)?char\s*\(\s*(\d+)\s*\)\s*$')
def get_field_size(name):
""" Extract the size number from a "varchar(11)" type name """
m = field_size_re.search(name)
return int(m.group(1)) if m else None
# This light wrapper "fakes" a dictionary interface, because some SQLite data
# types include variables in them -- e.g. "varchar(30)" -- and can't be matched
# as a simple dictionary lookup.
class FlexibleFieldLookupDict(object):
# Maps SQL types to Django Field types. Some of the SQL types have multiple
# entries here because SQLite allows for anything and doesn't normalize the
# field type; it uses whatever was given.
base_data_types_reverse = {
'bool': 'BooleanField',
'boolean': 'BooleanField',
'smallint': 'SmallIntegerField',
'smallint unsigned': 'PositiveSmallIntegerField',
'smallinteger': 'SmallIntegerField',
'int': 'IntegerField',
'integer': 'IntegerField',
'bigint': 'BigIntegerField',
'integer unsigned': 'PositiveIntegerField',
'decimal': 'DecimalField',
'real': 'FloatField',
'text': 'TextField',
'char': 'CharField',
'blob': 'BinaryField',
'date': 'DateField',
'datetime': 'DateTimeField',
'time': 'TimeField',
}
def __getitem__(self, key):
key = key.lower()
try:
return self.base_data_types_reverse[key]
except KeyError:
size = get_field_size(key)
if size is not None:
return ('CharField', {'max_length': size})
raise KeyError
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
SELECT name, type FROM sqlite_master
WHERE type in ('table', 'view') AND NOT name='sqlite_sequence'
ORDER BY name""")
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
return [FieldInfo(info['name'], info['type'], None, info['size'], None, None,
info['null_ok']) for info in self._table_info(cursor, table_name)]
def column_name_converter(self, name):
"""
SQLite will in some cases, e.g. when returning columns from views and
subselects, return column names in 'alias."column"' format instead of
simply 'column'.
Affects SQLite < 3.7.15, fixed by http://www.sqlite.org/src/info/5526e0aa3c
"""
# TODO: remove when SQLite < 3.7.15 is sufficiently old.
# 3.7.13 ships in Debian stable as of 2014-03-21.
if self.connection.Database.sqlite_version_info < (3, 7, 15):
return name.split('.')[-1].strip('"')
else:
return name
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# Dictionary of relations to return
relations = {}
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
try:
results = cursor.fetchone()[0].strip()
except TypeError:
# It might be a view, then no results will be returned
return relations
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_desc in results.split(','):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('references (\S*) ?\(["|]?(.*)["|]?\)', field_desc, re.I)
if not m:
continue
table, column = [s.strip('"') for s in m.groups()]
if field_desc.startswith("FOREIGN KEY"):
# Find name of the target FK field
m = re.match('FOREIGN KEY\(([^\)]*)\).*', field_desc, re.I)
field_name = m.groups()[0].strip('"')
else:
field_name = field_desc.split()[0].strip('"')
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s", [table])
result = cursor.fetchall()[0]
other_table_results = result[0].strip()
li, ri = other_table_results.index('('), other_table_results.rindex(')')
other_table_results = other_table_results[li + 1:ri]
for other_desc in other_table_results.split(','):
other_desc = other_desc.strip()
if other_desc.startswith('UNIQUE'):
continue
other_name = other_desc.split(' ', 1)[0].strip('"')
if other_name == column:
relations[field_name] = (other_name, table)
break
return relations
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
"""
key_columns = []
# Schema for this table
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
# Walk through and look for references to other tables. SQLite doesn't
# really have enforced references, but since it echoes out the SQL used
# to create the table we can look for REFERENCES statements used there.
for field_index, field_desc in enumerate(results.split(',')):
field_desc = field_desc.strip()
if field_desc.startswith("UNIQUE"):
continue
m = re.search('"(.*)".*references (.*) \(["|](.*)["|]\)', field_desc, re.I)
if not m:
continue
# This will append (column_name, referenced_table_name, referenced_column_name) to key_columns
key_columns.append(tuple(s.strip('"') for s in m.groups()))
return key_columns
def get_indexes(self, cursor, table_name):
indexes = {}
for info in self._table_info(cursor, table_name):
if info['pk'] != 0:
indexes[info['name']] = {'primary_key': True,
'unique': False}
cursor.execute('PRAGMA index_list(%s)' % self.connection.ops.quote_name(table_name))
# seq, name, unique
for index, unique in [(field[1], field[2]) for field in cursor.fetchall()]:
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
info = cursor.fetchall()
# Skip indexes across multiple fields
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
m = re.search('"(.*)".*PRIMARY KEY( AUTOINCREMENT)?$', field_desc)
if m:
return m.groups()[0]
return None
def _table_info(self, cursor, name):
cursor.execute('PRAGMA table_info(%s)' % self.connection.ops.quote_name(name))
# cid, name, type, notnull, dflt_value, pk
return [{'name': field[1],
'type': field[2],
'size': get_field_size(field[2]),
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
# Sqlite3 3.8.9+ has 5 columns, however older versions only give 3
# columns. Discard last 2 columns if there.
number, index, unique = row[:3]
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints
| |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import ugettext_lazy as _
class AbstractHuman(models.Model):
"""Simple Abstract Human model
Note that this model may be linked to django registered users
"""
first_name = models.CharField(_("First name"), max_length=100)
last_name = models.CharField(_("Last name"), max_length=100)
first_initial = models.CharField(_("First Initial(s)"), max_length=10, blank=True)
# This is a django user
user = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, on_delete=models.CASCADE
)
alias = models.ForeignKey(
"self",
on_delete=models.CASCADE,
related_name="aliases",
related_query_name="alias_human",
blank=True,
null=True,
)
class Meta:
abstract = True
def __str__(self):
return self.get_formatted_name()
def save(self, *args, **kwargs):
"""Set initials and try to set django user before saving"""
self._set_first_initial()
self._set_user()
super(AbstractHuman, self).save(*args, **kwargs)
def _set_first_initial(self, force=False):
"""Set author first name initial"""
if self.first_initial and not force:
return
self.first_initial = " ".join([c[0] for c in self.first_name.split()])
def get_formatted_name(self):
"""Return author formated full name, e.g. Maupetit J"""
return "%s %s" % (self.last_name, self.first_initial)
def _set_user(self):
"""Look for local django user based on human name"""
if "" in (self.last_name, self.first_name):
return
self._set_first_initial()
User = get_user_model()
try:
self.user = User.objects.get(
models.Q(last_name__iexact=self.last_name),
models.Q(first_name__iexact=self.first_name)
| models.Q(first_name__istartswith=self.first_initial[0]),
)
except User.DoesNotExist:
pass
except User.MultipleObjectsReturned:
pass
class Author(AbstractHuman):
"""Entry author"""
class Meta:
ordering = ("last_name", "first_name")
verbose_name = _("Author")
verbose_name_plural = _("Authors")
class Editor(AbstractHuman):
"""Journal or book editor"""
class Meta:
ordering = ("last_name", "first_name")
verbose_name = _("Editor")
verbose_name_plural = _("Editors")
class AbstractEntity(models.Model):
"""Simple abstract entity"""
name = models.CharField(_("Name"), max_length=150)
abbreviation = models.CharField(
_("Entity abbreviation"),
max_length=100,
blank=True,
help_text=_("e.g. Proc Natl Acad Sci U S A"),
)
class Meta:
abstract = True
def __str__(self):
return self.name
class Journal(AbstractEntity):
"""Peer reviewed journal"""
class Meta:
verbose_name = _("Journal")
verbose_name_plural = _("Journals")
class Publisher(AbstractEntity):
"""Journal or book publisher"""
class Meta:
verbose_name = _("Publisher")
verbose_name_plural = _("Publishers")
class Entry(models.Model):
"""The core model for references
Largely guided by the BibTeX file format (see
http://en.wikipedia.org/wiki/BibTeX).
Unsupported fields (for now):
* eprint: A specification of an electronic publication, often a preprint
or a technical report
* howpublished: How it was published, if the publishing method is
nonstandard
* institution: The institution that was involved in the publishing, but not
necessarily the publisher
* key: A hidden field used for specifying or overriding the alphabetical
order of entries (when the "author" and "editor" fields are missing).
Note that this is very different from the key (mentioned just after this
list) that is used to cite or cross-reference the entry.
* series: The series of books the book was published in (e.g. "The Hardy
Boys" or "Lecture Notes in Computer Science")
* type: The field overriding the default type of publication (e.g.
"Research Note" for techreport, "{PhD} dissertation" for phdthesis,
"Section" for inbook/incollection)
"""
ARTICLE = "article"
BOOK = "book"
BOOKLET = "booklet"
CONFERENCE = "conference"
INBOOK = "inbook"
INCOLLECTION = "incollection"
INPROCEEDINGS = "inproceedings"
MANUAL = "manual"
MASTERSTHESIS = "mastersthesis"
MISC = "misc"
PHDTHESIS = "phdthesis"
PROCEEDINGS = "proceedings"
TECHREPORT = "techreport"
UNPUBLISHED = "unpublished"
ENTRY_TYPES_CHOICES = (
(ARTICLE, _("Article")),
(BOOK, _("Book")),
(BOOKLET, _("Book (no publisher)")),
(CONFERENCE, _("Conference")),
(INBOOK, _("Book chapter")),
(INCOLLECTION, _("Book from a collection")),
(INPROCEEDINGS, _("Conference proceedings article")),
(MANUAL, _("Technical documentation")),
(MASTERSTHESIS, _("Master's Thesis")),
(MISC, _("Miscellaneous")),
(PHDTHESIS, _("PhD Thesis")),
(PROCEEDINGS, _("Conference proceedings")),
(TECHREPORT, _("Technical report")),
(UNPUBLISHED, _("Unpublished work")),
)
type = models.CharField(
_("Entry type"), max_length=50, choices=ENTRY_TYPES_CHOICES, default=ARTICLE
)
# Base fields
title = models.CharField(_("Title"), max_length=255)
authors = models.ManyToManyField(
"Author", related_name="entries", through="AuthorEntryRank"
)
journal = models.ForeignKey(
"Journal", related_name="entries", blank=True, on_delete=models.CASCADE
)
publication_date = models.DateField(_("Publication date"), null=True)
is_partial_publication_date = models.BooleanField(
_("Partial publication date?"),
default=True,
help_text=_(
"Check this if the publication date is incomplete (for example "
"if only the year is valid)"
),
)
volume = models.CharField(
_("Volume"),
max_length=50,
blank=True,
help_text=_("The volume of a journal or multi-volume book"),
)
number = models.CharField(
_("Number"),
max_length=50,
blank=True,
help_text=_(
"The '(issue) number' of a journal, magazine, or tech-report, if "
"applicable. (Most publications have a 'volume', but no 'number' "
"field.)"
),
)
pages = models.CharField(
_("Pages"),
max_length=50,
blank=True,
help_text=_("Page numbers, separated either by commas or " "double-hyphens"),
)
url = models.URLField(
_("URL"), blank=True, help_text=_("The WWW address where to find this resource")
)
# Identifiers
doi = models.CharField(
_("DOI"),
max_length=100,
blank=True,
help_text=_("Digital Object Identifier for this resource"),
)
issn = models.CharField(
_("ISSN"),
max_length=20,
blank=True,
help_text=_("International Standard Serial Number"),
)
isbn = models.CharField(
_("ISBN"),
max_length=20,
blank=True,
help_text=_("International Standard Book Number"),
)
pmid = models.CharField(
_("PMID"), blank=True, max_length=20, help_text=_("Pubmed ID")
)
# Book
booktitle = models.CharField(
_("Book title"),
max_length=50,
blank=True,
help_text=_("The title of the book, if only part of it is being cited"),
)
edition = models.CharField(
_("Edition"),
max_length=100,
blank=True,
help_text=_(
"The edition of a book, long form (such as 'First' or " "'Second')"
),
)
chapter = models.CharField(_("Chapter number"), max_length=50, blank=True)
# PhD Thesis
school = models.CharField(
_("School"),
max_length=50,
blank=True,
help_text=_("The school where the thesis was written"),
)
# Proceedings
organization = models.CharField(
_("Organization"),
max_length=50,
blank=True,
help_text=_("The conference sponsor"),
)
# Misc
editors = models.ManyToManyField("Editor", related_name="entries", blank=True)
publisher = models.ForeignKey(
"Publisher",
related_name="entries",
null=True,
blank=True,
on_delete=models.CASCADE,
)
address = models.CharField(
_("Address"),
max_length=250,
blank=True,
help_text=_(
"Publisher's address (usually just the city, but can be the full "
"address for lesser-known publishers)"
),
)
annote = models.CharField(
_("Annote"),
max_length=250,
blank=True,
help_text=_("An annotation for annotated bibliography styles (not typical)"),
)
note = models.TextField(
_("Note"), blank=True, help_text=_("Miscellaneous extra information")
)
# Related publications
crossref = models.ManyToManyField("self", blank=True)
class Meta:
verbose_name = _("Entry")
verbose_name_plural = _("Entries")
ordering = ("-publication_date",)
def __str__(self):
"""Format entry with a default bibliography style"""
# Authors
author_str = "%(last_name)s %(first_initial)s"
s = ", ".join([author_str % a.__dict__ for a in self.get_authors()])
s = ", and ".join(s.rsplit(", ", 1)) # last author case
s += ", "
# Title
s += '"%(title)s", ' % self.__dict__
# Journal
if self.journal.abbreviation:
s += "in %(abbreviation)s, " % self.journal.__dict__
else:
# fall back to the real name
s += "in %(name)s, " % self.journal.__dict__
# Misc
if self.volume and self.pages:
s += "vol. %(volume)s, pp. %(pages)s, " % self.__dict__
if self.publication_date:
s += "%s." % self.publication_date.strftime("%B %Y")
return s
def _get_first_author(self):
"""
Get this entry first author
"""
if not len(self.get_authors()):
return ""
return self.get_authors()[0]
first_author = property(_get_first_author)
def _get_last_author(self):
"""
Get this entry last author
"""
if not len(self.get_authors()):
return ""
return self.get_authors()[-1]
last_author = property(_get_last_author)
def get_authors(self):
"""
Get ordered authors list
Note that authorentryrank_set is ordered as expected while the authors
queryset is not (M2M with a through case).
"""
return [aer.author for aer in self.authorentryrank_set.all()]
class Collection(models.Model):
"""Define a collection of entries"""
name = models.CharField(_("Name"), max_length=100)
short_description = models.TextField(_("Short description"), blank=True, null=True)
entries = models.ManyToManyField("Entry", related_name="collections")
class Meta:
verbose_name = _("Collection")
verbose_name_plural = _("Collections")
def __str__(self):
return self.name
class AuthorEntryRank(models.Model):
"""Give the author rank for an entry author sequence"""
author = models.ForeignKey(Author, on_delete=models.CASCADE)
entry = models.ForeignKey(Entry, on_delete=models.CASCADE)
rank = models.IntegerField(
_("Rank"), help_text=_("Author rank in entry authors sequence")
)
class Meta:
verbose_name = _("Author Entry Rank")
verbose_name_plural = _("Author Entry Ranks")
ordering = ("rank",)
def __str__(self):
return "%(author)s:%(rank)d:%(entry)s" % {
"author": self.author,
"entry": self.entry,
"rank": self.rank,
}
| |
import urllib
import urllib2
import json
import logging
import info
import re
import base64
import simplejson
import zlib
from HTMLParser import HTMLParser
lyric_url = "http://www.xiami.com/radio/lyric"
related_info_url = "http://www.xiami.com/radio/relate-info"
get_hq_url_temp = "http://www.xiami.com/song/gethqsong/sid/%s"
similar_artists_url_temp = "http://www.xiami.com/ajax/similar-artists?id=%s&c=%d"
song_url_temp = "http://www.xiami.com/song/%s"
artist_id_rex = re.compile("/artist/([0-9]+)")
song_info_url_temp = "http://www.xiami.com/song/playlist/id/%s/object_name/default/object_id/0/cat/json" # noqa
logger = logging.getLogger('song')
class SongPageParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.title = None
self.artist = None
self.album = None
self.image = None
self.album_id = None
self.artist_id = None
self.in_nav = False
def handle_starttag(self, tag, attrs):
attrs = dict(attrs)
if tag == 'meta':
try:
prop = attrs['property']
content = attrs['content']
if prop == 'og:title':
self.title = content
elif prop == 'og:music:artist':
self.artist = content
elif prop == 'og:music:album':
self.album = content
elif prop == 'og:image':
self.image = content
except:
pass
elif tag == 'a':
try:
if attrs['id'] == 'albumCover':
href = attrs['href']
self.album_id = href.split('/')[-1]
except:
pass
if self.in_nav:
if 'href' in attrs:
ret = artist_id_rex.match(attrs['href'])
if ret:
self.artist_id = ret.group(1)
elif tag == 'div':
if 'id' in attrs and attrs['id'] == 'nav':
self.in_nav = True
def handle_endtag(self, tag):
if tag == 'div':
if self.in_nav:
self.in_nav = False
class Song(object):
# expected attributes:
# * title
# * song_id
# * album_id
# * album_name
# * grade
# * artist
# * location
# * pic
# * length
# * artist_id
# * rec_note
# * hq_location
def __init__(self, parsed={}):
for key in parsed:
setattr(self, key, parsed[key])
if hasattr(self, 'location'):
self.info_loaded = True
else:
self.info_loaded = False
@staticmethod
def from_id(song_id):
song_info = {'song_id': str(song_id)}
return Song(song_info)
@staticmethod
def from_encoded(encoded):
try:
decoded = simplejson.loads(zlib.decompress(base64.b64decode(encoded)))
song = Song()
for key in decoded:
setattr(song, key, decoded[key])
return song
except:
return None
def encode(self):
return base64.b64encode(zlib.compress(simplejson.dumps(vars(self))))
def dump_info(self):
print self.title, self.location
def get_title(self):
if hasattr(self, 'songName'):
return self.songName
else:
return self.title
def get_lyric(self):
if not hasattr(self, 'song_id'):
raise Exception("missing song id")
# use POST as the official one
args = urllib.urlencode({'sid': self.song_id})
lyric = urllib2.urlopen(lyric_url, args).read()
return lyric
def get_hq_location(self, state):
if not hasattr(self, 'song_id'):
raise Exception("missing song id")
# hq_location should not be cached
# they will timeout after a certain time
# if hasattr(self, 'hq_location'):
# return self.hq_location
get_hq_url = get_hq_url_temp % self.song_id
logger.debug("get hq req: %s" % get_hq_url)
get_hq_req = urllib2.Request(get_hq_url)
if 'player_path' in state:
get_hq_req.add_header('Referer', state['player_path'])
get_hq_rep = urllib2.urlopen(get_hq_req).read()
try:
get_hq_parsed = json.loads(get_hq_rep)
except Exception as e:
logger.exception("fail to parse get hq reply: %s", get_hq_rep)
raise e
if 'status' not in get_hq_parsed or get_hq_parsed['status'] != 1:
raise Exception("fail to get hq url. status = %d" % get_hq_parsed['status'])
# should not be reused; timeout after a while
self.hq_location = decrypt_location(get_hq_parsed['location'])
return self.hq_location
def get_related_info(self, state):
if not hasattr(self, 'artist_id'):
raise Exception("missing artist id")
xiamitoken = info.get_xiamitoken(state)
# use POST as the official one
args = urllib.urlencode({'arid': self.artist_id, '_xiamitoken': xiamitoken})
request = urllib2.Request(related_info_url)
request.add_header('Referer', state['radio_page_path'])
related_info = urllib2.urlopen(request, args).read()
return related_info
def get_similar_artists(self, count):
if not hasattr(self, 'artist_id'):
raise Exception("missing artist id")
similar_artists_url = similar_artists_url_temp % (self.artist_id, count)
similar_artists = urllib2.urlopen(similar_artists_url).read()
return json.loads(similar_artists)
def get_song_url(self):
if not hasattr(self, 'song_id'):
raise Exception("missing song id")
return song_url_temp % self.song_id
def load_info(self):
if not hasattr(self, 'song_id'):
raise Exception("missing song id")
if self.info_loaded:
return
logging.debug("loading info of %s" % self.song_id)
song_info_url = song_info_url_temp % self.song_id
song_info_ret = urllib2.urlopen(song_info_url).read()
song_info = json.loads(song_info_ret)
if 'status' not in song_info or not song_info['status']:
raise Exception("fail to load song info.%s" %
(song_info['message'] if 'message' in song_info else ""))
my_info = song_info['data']['trackList'][0]
self.__init__(my_info)
def load_info_from_page(self):
''' Load info of this song, index by song_id (deprecated)'''
# missing:
# * grade
# * location
# * length
# * rec_note
url = self.get_song_url()
song_page = urllib2.urlopen(url).read().decode('utf-8')
parser = SongPageParser()
parser.feed(song_page)
self.title = parser.title
self.album_name = parser.album
self.artist = parser.artist
self.pic = parser.image
self.album_id = parser.album_id
self.artist_id = parser.artist_id
def decrypt_location(encrypted):
output = ''
# decryption method obtained from internet
# characters of the URL is listed in a table vertically
# and the encoding result is read out horzontally
# first part is the number of rows
i = 0
while encrypted[i].isdigit():
i += 1
rows = int(encrypted[:i])
encrypted = encrypted[i:]
total_len = len(encrypted)
# looks like this:
# h******************** ^
# t******************** | final_col_len
# t******************** v
# p*******************
# %*******************
# <- min_row_len ->
r = 0
c = 0
pos = 0
min_row_len = total_len / rows
final_col_len = total_len % rows
for x in xrange(total_len):
output += encrypted[pos]
if r == rows - 1:
# last row reached, reset to first row
r = 0
c += 1
pos = c
else:
# move to next row
pos += min_row_len
if r < final_col_len:
pos += 1
r += 1
# why 0 is replaced by ^.....
return urllib.unquote(output).replace('^', '0')
| |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dhcp
import glob
import os.path
import subprocess
import sys
import wifi
client_regression = [
# devices for which we have a pcap but have decided not to add
# to the database
('', './testdata/pcaps/ASUS Transformer TF300 2.4GHz.pcap'),
('', './testdata/pcaps/Blackberry Bold 9930 2.4GHz GFRG210 Specific Probe.pcap'),
('', './testdata/pcaps/Blackberry Bold 9930 5GHz GFRG210 Specific Probe.pcap'),
('', './testdata/pcaps/HTC Evo 2.4GHz.pcap'),
('', './testdata/pcaps/HTC Incredible 2.4GHz.pcap'),
('', './testdata/pcaps/HTC Inspire 2.4GHz.pcap'),
('', './testdata/pcaps/HTC Sensation 2.4GHz.pcap'),
('', './testdata/pcaps/HTC Thunderbolt 2.4GHz.pcap'),
('', './testdata/pcaps/HTC Titan 2.4GHz.pcap'),
('', './testdata/pcaps/iPad Mini 4th gen 5GHz MK6L2LL Broadcast Probe.pcap'),
('', './testdata/pcaps/iPad Mini 4th gen 5GHz MK6L2LL Specific Probe.pcap'),
('', './testdata/pcaps/Lenovo_T440_80211ac_2x2_Windows8_2_4_GHz.pcap'),
('', './testdata/pcaps/LG E900 2.4GHz.pcap'),
('', './testdata/pcaps/LG G2X 2.4GHz.pcap'),
('', './testdata/pcaps/LG Revolution 2.4GHz.pcap'),
('', './testdata/pcaps/MediaTek MT7610U 2.4GHz.pcap'),
('', './testdata/pcaps/MacBook Air late 2014 (A1466) 5GHz.pcap'),
('', './testdata/pcaps/MacBook Pro early 2014 (A1502) 2.4GHz.pcap'),
('', './testdata/pcaps/MacBook Air late 2014 (A1466) 2.4GHz.pcap'),
('', './testdata/pcaps/MacBook Air late 2010 (A1369) 2.4GHz.pcap'),
('', './testdata/pcaps/MacBook Pro early 2014 (A1502) 5GHz.pcap'),
('', './testdata/pcaps/MacBook Air late 2010 (A1369) 5GHz.pcap'),
('', './testdata/pcaps/Motorola Droid 2 2.4GHz.pcap'),
('', './testdata/pcaps/Motorola Droid 3 2.4GHz.pcap'),
('', './testdata/pcaps/Motorola Droid Razr 2.4GHz XT910 Broadcast Probe.pcap'),
('', './testdata/pcaps/Motorola Droid Razr 2.4GHz XT910 Specific Probe.pcap'),
('', './testdata/pcaps/Motorola Droid Razr 2.4GHz XT910.pcap'),
('', './testdata/pcaps/Motorola Droid Razr 5GHz XT910.pcap'),
('', './testdata/pcaps/Motorola Droid Razr Maxx 2.4GHz.pcap'),
('', './testdata/pcaps/Nexus One 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Charge 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Captivate 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Continuum 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Epic 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Exhibit 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Fascinate 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Galaxy Tab 2 2.4GHz.pcap'),
('', './testdata/pcaps/Samsung Galaxy 4G 2.4GHz SGH-T959V.pcap'),
('', './testdata/pcaps/Samsung Infuse 5GHz.pcap'),
('', './testdata/pcaps/Samsung Vibrant 2.4GHz.pcap'),
('', './testdata/pcaps/Sony Ericsson Xperia X10 2.4GHz.pcap'),
('', './testdata/pcaps/Sony NSX-48GT1 2.4GHz Broadcast Probe.pcap'),
('', './testdata/pcaps/Sony NSX-48GT1 2.4GHz Specific Probe.pcap'),
# Names where the identified species doesn't exactly match the filename,
# usually because multiple devices are too similar to distinguish. We name
# the file for the specific device which was captured, and add an entry
# here for the best identification which we can manage.
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Paperwhite 2012 2.4GHz B024.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Voyage 2.4GHz B013.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Voyage 2.4GHz B054.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz Google Wifi OS 4.1.3 SN 9203 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz Google Wifi OS 4.1.3 SN 9203 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz Google Wifi OS 4.1.3 SN B00E Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz Google Wifi OS 4.1.3 SN B00E Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz GFRG210 OS 4.1.3 SN 9203.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz GFRG210 OS 4.1.3 SN B00E.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN B00E Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN B00E Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN B00E Broadcast Probe #2.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN B00E Specific Probe #2.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN 9203 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz OnHub OS 4.1.3 SN 9203 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz WNDR3800 OS 4.1.3 SN 9203 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz WNDR3800 OS 4.1.3 SN 9203 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz WNDR3800 OS 4.1.3 SN B00E Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle 4 2.4GHz WNDR3800 OS 4.1.3 SN B00E Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz OnHub OS 5.3.7.3 SN B011 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz OnHub OS 5.3.7.3 SN B011 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz WNDR3800 OS 5.3.7.3 SN B011 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz WNDR3800 OS 5.3.7.3 SN B011 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz GFRG210 OS 5.3.7.3 SN B011 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz GFRG210 OS 5.3.7.3 SN B011 Specific Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz Google Wifi OS 5.3.7.3 SN B011 Broadcast Probe.pcap'),
('Amazon Kindle', './testdata/pcaps/Amazon Kindle Touch 2.4GHz Google Wifi OS 5.3.7.3 SN B011 Specific Probe.pcap'),
('Apple Watch', './testdata/pcaps/Apple Watch Series 1 2.4GHz Google Wifi watchOS 3.2.3 MJ2V2LL Specific Probe.wcap.pcap'),
('Apple Watch', './testdata/pcaps/Apple Watch Series 1 2.4GHz GFRG210 watchOS 3.2.3 MJ2V2LL Specific Probe.wcap.pcap'),
('Apple Watch', './testdata/pcaps/Apple Watch Series 1 2.4GHz Technicolor DPC3941B watchOS 3.2.3 MJ2V2LL Specific Probe.wcap.pcap'),
('Apple Watch', './testdata/pcaps/Apple Watch Series 1 2.4GHz Onhub watchOS 3.2.3 MJ2V2LL Specific Probe.wcap.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam HD 2.4GHz GFRG210 Specific Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam Pro 2.4GHz GFRG210 Broadcast Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam HD 2.4GHz Google Wifi Specific Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam Pro 2.4GHz Google Wifi Broadcast Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam HD 2.4GHz GFRG210 Broadcast Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam Pro 2.4GHz Google Wifi Specific Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam HD 2.4GHz Google Wifi Broadcast Probe.pcap'),
('Dropcam HD or Pro', './testdata/pcaps/Dropcam Pro 2.4GHz GFRG210 Specific Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 1st gen 5GHz GFRG210 iOS5.1.1 MB292LL Specific Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz GFRG210 iOS9.3.5 FC979LL Specific Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz Google Wifi iOS9.3.5 FC979LL Specific Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 1st gen 5GHz OnHub iOS5.1.1 MB292LL Broadcast Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 1st gen 5GHz Google Wifi iOS5.1.1 MB292LL Broadcast Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz OnHub iOS9.3.5 FC979LL Broadcast Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz OnHub iOS9.3.5 FC979LL Specific Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz GFRG210 iOS9.3.5 FC979LL Broadcast Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 1st gen 5GHz GFRG210 iOS5.1.1 MB292LL Broadcast Probe.pcap'),
('iPad 1st or 2nd gen', './testdata/pcaps/iPad 2nd gen 5GHz Google Wifi iOS9.3.5 FC979LL Broadcast Probe.pcap'),
('iPhone 6/6+', './testdata/pcaps/iPhone 6 5GHz GFRG210 iOS 9 MG552LL.pcap'),
('iPhone 6/6+', './testdata/pcaps/iPhone 6+ 5GHz iOS 9.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 5GHz.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz RRM.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz MKRD2LL iOS 10.0.2 Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz iOS 10.0.2 Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz iOS 10.0.2 Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 5GHz.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 5GHz RRM.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz GFRG210 iOS10.2 MKRD2LL Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz GFRG210 iOS10.2 MKRD2LL Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz OnHub iOS10.2 MKRD2LL Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz Google Wifi iOS10.2 MKRD2LL Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz OnHub iOS10.2 MKRD2LL Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s 2.4GHz Google Wifi iOS10.2 MKRD2LL Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz Google Wifi iOS10.2 MKV22LL Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz OnHub iOS10.2 MKV22LL Broadcast Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz OnHub iOS10.2 MKV22LL Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz Google Wifi iOS10.2 MKV22LL Specific Probe.pcap'),
('iPhone 6s/6s+', './testdata/pcaps/iPhone 6s+ 2.4GHz GFRG210 iOS10.2 MKV22LL Broadcast Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz GFRG210 iOS10.2 MN8H2LL Broadcast Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7+ 2.4GHz.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz GFRG210 iOS10.2 MN8H2LL Specific Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz Google Wifi iOS10.2 MN8H2LL Broadcast Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz OnHub iOS10.2 MN8H2LL Specific Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz OnHub iOS10.2 MN8H2LL Broadcast Probe.pcap'),
('iPhone 7/7+', './testdata/pcaps/iPhone 7 2.4GHz Google Wifi iOS10.2 MN8H2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz GFRG210 iOS 11.0.3 MQ7G2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz GFRG210 iOS 11.0.3 MQ7G2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Technicolor DPC3941B iOS 11.0.3 MQ7G2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Technicolor DPC3941B iOS 11.0.3 MQ7G2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Onhub iOS 11.0.3 MQ7G2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Onhub iOS 11.0.3 MQ7G2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Google Wifi iOS 11.0.3 MQ7G2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone 8 2.4GHz Google Wifi iOS 11.0.3 MQ7G2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz GFRG210 iOS 11.2.5 MQCK2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz GFRG210 iOS 11.2.5 MQCK2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Technicolor DPC3941B iOS 11.2.5 MQCK2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Technicolor DPC3941B iOS 11.2.5 MQCK2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Onhub iOS 11.2.5 MQCK2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Onhub iOS 11.2.5 MQCK2LL Specific Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Google Wifi iOS 11.2.5 MQCK2LL Broadcast Probe.pcap'),
('iPhone 8/X', './testdata/pcaps/iPhone X 2.4GHz Google Wifi iOS 11.2.5 MQCK2LL Specific Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz GFRG210 sw 4.2.1 hw MC086LL Broadcast Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz GFRG210 sw 4.2.1 hw MC086LL Specific Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz WNDR3800 sw 4.2.1 hw MC086LL Broadcast Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz WNDR3800 sw 4.2.1 hw MC086LL Specific Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz Google Wifi sw 4.2.1 hw MC086LL Broadcast Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz Google Wifi sw 4.2.1 hw MC086LL Specific Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz OnHub sw 4.2.1 hw MC086LL Broadcast Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPod Touch 2nd gen 2.4GHz OnHub sw 4.2.1 hw MC086LL Specific Probe.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPhone 3GS 2.4GHz.pcap'),
('iPod Touch 2nd gen or iPhone 3GS', './testdata/pcaps/iPhone 3GS 2.4GHz M137LL.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 LG-D855 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz Google Wifi Android 5.1.1 LG-AS330 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz Google Wifi An5.0 LG-D855 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 LG-D855 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz GFRG210 Android 5.1.1 LG-AS330 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz Google Wifi Android 5.1.1 LG-AS330 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz GFRG210 Android 5.1.1 LG-AS330 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 LG-D855 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz OnHub Android 5.1.1 LG-AS330 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz Google Wifi An5.0 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz Google Wifi An5.0 LG-D855 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG K7 2.4GHz OnHub Android 5.1.1 LG-AS330 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 LG-D855 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 LG-D855 #2 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz Google Wifi An5.0 LG-D855 #2 Broadcast Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz Google Wifi An5.0 LG-D855 #2 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz GFRG210 An5.0 LG-D855 #2 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 LG-D855 #2 Specific Probe.pcap'),
('LG G3 or K7', './testdata/pcaps/LG G3 2.4GHz OnHub An5.0 LG-D855 #2 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz OnHub Android 5.1 LG-H815 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz Google Wifi Android 6.0.1 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz Google Wifi Android 5.1 LG-H815 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz Google Wifi Android 5.1 LG-H815 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz GFRG210 Android 6.0.1 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz GFRG210 Android 5.1 LG-H815 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz GFRG210 Android 6.0.1 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz Google Wifi Android 6.0.1 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz OnHub Android 6.0.1 Broadcast Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz OnHub Android 5.1 LG-H815 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/Nexus 5 5GHz OnHub Android 6.0.1 Specific Probe.pcap'),
('LG G4 or Nexus 5', './testdata/pcaps/LG G4 5GHz GFRG210 Android 5.1 LG-H815 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2.4GHz Specific.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2.4GHz.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz GFRG210 An5.1 XT1032 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz WNDR3800 An5.1 XT1032 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz WNDR3800 An5.1 XT1032 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz Google Wifi An5.1 XT1032 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz GFRG210 An5.1 XT1032 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz Google Wifi An5.1 XT1032 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz OnHub An5.1 XT1032 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz OnHub An5.1 XT1032 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 3rd gen 2.4GHz OnHub Android 6.0 SKU XT1540 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz GFRG210 An6.0 XT1063 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz Google Wifi An6.0 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz GFRG210 An6.0 XT1063 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz Google Wifi An6.0 XT1063 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 3rd gen 2.4GHz OnHub Android 6.0 SKU XT1540 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 3rd gen 2.4GHz Google Wifi Android 6.0 SKU XT1540 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz OnHub An6.0 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 3rd gen 2.4GHz Google Wifi Android 6.0 SKU XT1540 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz OnHub An6.0 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz OnHub An6.0 XT1063 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz Google Wifi An6.0 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz OnHub An6.0 XT1063 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz Google Wifi An6.0 XT1063 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz WNDR3800 An6.0 XT1063 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz WNDR3800 An6.0 XT1063 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz OnHub An5.1 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz Google Wifi An5.1 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz GFRG210 An5.1 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz Google Wifi An4.4.4 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz GFRG210 An5.0.2 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz OnHub An4.4.4 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz GFRG210 An4.4.4 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz OnHub An4.4.4 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz GFRG210 An4.4.4 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz OnHub An5.0.2 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz Google Wifi An5.0.2 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz WNDR3800 An5.0.2 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz WNDR3800 An5.0.2 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 1st gen 2.4GHz Google Wifi An4.4.4 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz GFRG210 An5.0.2 Specific Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz OnHub An5.0.2 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto G 2nd gen 2.4GHz Google Wifi An5.0.2 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 2nd gen 2.4GHz Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz GFRG210 An5.1 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz Google Wifi An5.1 Broadcast Probe.pcap'),
('Moto G or Moto X', './testdata/pcaps/Moto X 1st gen 2.4GHz OnHub An5.1 Broadcast Probe.pcap'),
('Nest Thermostat v1 or v2', './testdata/pcaps/Nest Thermostat v1 2.4GHz GFRG210 sw 5.6-7 hw Diamond 1.10 Broadcast Probe.pcap'),
('Nest Thermostat v1 or v2', './testdata/pcaps/Nest Thermostat v1 2.4GHz OnHub sw 5.6-7 hw Diamond 1.10 Broadcast Probe.pcap'),
('Nest Thermostat v1 or v2', './testdata/pcaps/Nest Thermostat v1 2.4GHz Google Wifi sw 5.6-7 hw Diamond 1.10 Broadcast Probe.pcap'),
('Playstation 3 or 4', './testdata/pcaps/Playstation 3 2.4GHz Google Wifi OS 4.8 model CECH-4301A Specific Probe.pcap'),
('Roku 2 or LT', './testdata/pcaps/Roku 2 XS 2.4GHz Google Wifi sw v7.2.0 build 4100-02 hw 3100X Broadcast Probe.pcap'),
('Roku 2 or LT', './testdata/pcaps/Roku 2 XS 2.4GHz Google Wifi sw v7.2.0 build 4100-02 hw 3100X Specific Probe.pcap'),
('Roku 2 or LT', './testdata/pcaps/Roku 2 XS 2.4GHz GFRG210 sw v7.2.0 build 4100-02 hw 3100X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz GFRG210 sw 7.6.0 build 4125004 hw 4230X1 Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 2.4GHz GFRG210 sw 7.6.0 build 4111-12 hw 5315X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 2 2.4GHz GFRG210 sw 7.6.0 build 4125-04 model 4210X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 5GHz GFRG210 sw 7.6.0 build 4111-12 hw 5315X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 2.4GHz GFRG210 sw 7.6.0 build 4111-12 hw 5315X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 2.4GHz Google Wifi sw 7.6.0 build 4111-12 hw 5315X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku Streaming Stick 5GHz GFRG210 model 3600 Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku Streaming Stick 5GHz GFRG210 model 3600 Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 5GHz GFRG210 sw 7.6.0 build 4111-12 hw 5315X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 2 2.4GHz GFRG210 sw 7.6.0 build 4125-04 model 4210X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz GFRG210 sw 7.6.0 build 4125004 hw 4230X1 Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku TV 2.4GHz Google Wifi sw 7.6.0 build 4111-12 hw 5315X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 5GHz Google Wifi sw 7.6.0 build 4125-04 model 4230X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 5GHz Google Wifi sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 5GHz GFRG210 sw 7.6.0 build 4125-04 model 4230X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 5GHz GFRG210 sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 5GHz WNDR3800 sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz GFRG210 sw 7.6.0 build 4125-04 model 4230X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz GFRG210 sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz Google Wifi sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz Google Wifi sw 7.6.0 build 4125-04 model 4230X Specific Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz WNDR3800 sw 7.6.0 build 4125-04 model 4230X Broadcast Probe.pcap'),
('Roku 2, 3, TV, or Streaming Stick', './testdata/pcaps/Roku 3 2.4GHz WNDR3800 sw 7.6.0 build 4125-04 model 4230X Specific Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku 4 2.4GHz.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku 4 5GHz.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 2.4GHz Google Wifi sw 7.6.0 build 4113-29 model 4620X Specific Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 2.4GHz Google Wifi sw 7.6.0 build 4113-29 model 4620X Broadcast Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 2.4GHz GFRG210 sw 7.6.0 build 4113-29 model 4620X Specific Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 5GHz GFRG210 sw 7.6.0 build 4113-29 model 4620X Broadcast Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 2.4GHz GFRG210 sw 7.6.0 build 4113-29 model 4620X Broadcast Probe.pcap'),
('Roku 4, TV, or Premiere', './testdata/pcaps/Roku Premiere 5GHz GFRG210 sw 7.6.0 build 4113-29 model 4620X Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz GFRG210 An4.0.4 SGH-T879 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz GFRG210 An4.0.4 SGH-T879 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz OnHub An4.0.4 SGH-T879 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz OnHub An4.0.4 SGH-T879 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz Google Wifi An4.0.4 SGH-T879 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy Note 5GHz Google Wifi An4.0.4 SGH-T879 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz WNDR3800 An4.1.2 GT-I9105 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz Google Wifi An4.1.2 GT-I9105 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz GFRG210 An4.1.2 GT-I9105 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz OnHub An4.1.2 GT-I9105 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz WNDR3800 An4.1.2 GT-I9105 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz Google Wifi An4.1.2 GT-I9105 Specific Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz OnHub An4.1.2 GT-I9105 Broadcast Probe.pcap'),
('Samsung Galaxy Note or S2+', './testdata/pcaps/Samsung Galaxy S2+ 5GHz GFRG210 An4.1.2 GT-I9105 Broadcast Probe.pcap'),
('Samsung Galaxy Note 5 or S7 Edge', './testdata/pcaps/Samsung Galaxy S7 Edge 5GHz GFRG210 An6.0.1 SM-G935F Specific Probe.pcap'),
('Samsung Galaxy Note 5 or S7 Edge', './testdata/pcaps/Samsung Galaxy S7 Edge 5GHz GFRG210 An6.0.1 SM-G935F Broadcast Probe.pcap'),
('Samsung Galaxy Note 5 or S7 Edge', './testdata/pcaps/Samsung Galaxy Note 5 5GHz GFRG210 An6.0.1 SM-N920C Broadcast Probe.pcap'),
('Samsung Galaxy Note 5 or S7 Edge', './testdata/pcaps/Samsung Galaxy Note 5 5GHz GFRG210 An6.0.1 SM-N920C Specific Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Infuse 2.4GHz.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz WNDR3800 An4.0.3 GF-I9100 Broadcast Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz WNDR3800 An4.0.3 GF-I9100 Specific Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz GFRG210 An4.0.3 GF-I9100 Specific Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz Google Wifi An4.0.3 GF-I9100 Specific Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz Google Wifi An4.0.3 GF-I9100 Broadcast Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz GFRG210 An4.0.3 GF-I9100 Broadcast Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz OnHub An4.0.3 GF-I9100 Broadcast Probe.pcap'),
('Samsung Galaxy S2 or Infuse', './testdata/pcaps/Samsung Galaxy S2 2.4GHz OnHub An4.0.3 GF-I9100 Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz WNDR3800 Android 5.0.2 SM-T800 Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz GFRG210 Android 6.0.1 SM-G900H Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz OnHub Android 5.0.2 SM-T800 Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz OnHub Android 5.0 SM-G900F Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz WNDR3800 Android 5.0.2 SM-T800 Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz GFRG210 Android 5.0 SM-G900F Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz OnHub Android 5.0.2 SM-T800 Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Probe 1.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Google Wifi Android 6.0.1 SM-G900H Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Google Wifi Android 5.0 SM-G900F Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz GFRG210 Android 5.0.2 SM-T800 Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz OnHub Android 5.0 SM-G900F Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz GFRG210 Android 5.0 SM-G900F Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Probe 3.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz OnHub Android 6.0.1 SM-G900H Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Google Wifi Android 5.0 SM-G900F Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Google Wifi Android 6.0.1 SM-G900H Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz GFRG210 Android 5.0.2 SM-T800 Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz Google Wifi Android 5.0.2 SM-T800 Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy Tab S 2.4GHz Google Wifi Android 5.0.2 SM-T800 Specific Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz GFRG210 Android 6.0.1 SM-G900H Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz OnHub Android 6.0.1 SM-G900H Broadcast Probe.pcap'),
('Samsung Galaxy S5 or Tab S', './testdata/pcaps/Samsung Galaxy S5 2.4GHz Probe 2.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz GFRG210 An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz OnHub An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz OnHub An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz GFRG210 An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz GFRG210 An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz Google Wifi An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz Google Wifi An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz Google Wifi An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz OnHub An7.0 SM-G950U Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz Google Wifi An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 5GHz OnHub An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8 2.4GHz GFRG210 An7.0 SM-G950U Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 2.4GHz GFRG210 An7.0 Samsung Exp 8.1 SM-G955U1 Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 5GHz GFRG210 An7.0 Samsung Exp 8.1 SM-G955U1 Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 2.4GHz GFRG210 An7.0 Samsung Exp 8.1 SM-G955U1 Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 2.4GHz Google Wifi An7.0 Samsung Exp 8.1 SM-G955U1 Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 2.4GHz Google Wifi An7.0 Samsung Exp 8.1 SM-G955U1 Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 5GHz GFRG210 An7.0 Samsung Exp 8.1 SM-G955U1 Broadcast Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 5GHz Google Wifi An7.0 Samsung Exp 8.1 SM-G955U1 Specific Probe.pcap'),
('Samsung Galaxy S8/S8+', './testdata/pcaps/Samsung Galaxy S8+ 5GHz Google Wifi An7.0 Samsung Exp 8.1 SM-G955U1 Broadcast Probe.pcap'),
('Sony Xperia Z4 or Z5', './testdata/pcaps/Sony Xperia Z5 5GHz.pcap'),
('Sony Xperia Z4 or Z5', './testdata/pcaps/Sony Xperia Z5 2.4GHz.pcap'),
('Sony Xperia Z4 or Z5', './testdata/pcaps/Sony Xperia Z4 Tablet 5GHz.pcap'),
('Sony Xperia Z4 or Z5', './testdata/pcaps/Sony Xperia Z4 Tablet 2.4GHz.pcap'),
]
ap_regression = [
# Names where the identified species doesn't exactly match the filename,
# usually because multiple devices are too similar to distinguish. We name
# the file for the specific device which was captured, and add an entry
# here for the best identification which we can manage.
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-224 5GHz SVL-MAT3-1-ch132.pcap'),
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-224 5GHz MTV-PR55-1-ch157.pcap'),
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-225 5GHz MTV-45-1-ch100.pcap'),
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-225 5GHz MTV-47-1-ch40.pcap'),
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-225 5GHz MTV-GWC5-1-ch157.pcap'),
('Aruba AP-22x', './testdata/ap_pcaps/Aruba AP-225 5GHz SVL-MP2-1-ch60.pcap'),
('Aruba AP-27x', './testdata/ap_pcaps/Aruba AP-275 5GHz SLV-MP2-1-ch36.pcap'),
('Aruba AP-27x', './testdata/ap_pcaps/Aruba AP-277 5GHz MTV-CL2-2-ch128.pcap'),
('Google Fiber GFRG2x0', './testdata/ap_pcaps/Google Fiber GFRG210 5GHz gfrg200-46.51.2.pcap'),
]
def get_taxonomy_from_pcap(filename, ap_mode):
process = ['./wifi_signature', '-f', filename]
if ap_mode:
process.append('-b')
(mac, sig) = subprocess.check_output(process).split()
return (mac, sig)
def get_model(filename):
offset = filename.find('2.4GHz')
if offset < 0:
offset = filename.find('5GHz')
if offset < 0:
print 'Invalid filename: %s' % filename
return ''
return filename[0:offset].strip()
def check_pcap(expected_model, pcap, ap_mode=False):
mac, sig = get_taxonomy_from_pcap(pcap, ap_mode)
genus, species, _ = wifi.identify_wifi_device(sig, mac)
actual_model = genus + " " + species if species else genus
if expected_model and expected_model != actual_model:
print 'Mismatch in %s: %s %s != %s' % (pcap, mac, expected_model,
actual_model)
return True
if not expected_model and 'Unknown' not in actual_model:
print 'Mismatch in %s: %s %s != Unknown' % (pcap, mac, actual_model)
return True
if __name__ == '__main__':
dhcp.DHCP_LEASES_FILE = 'testdata/dhcp.leases'
dhcp.DHCP_SIGNATURE_FILE = 'testdata/dhcp.signatures'
rc = 0
client_pcaps = glob.glob('./testdata/pcaps/*.pcap')
for (expected_model, pcap) in client_regression:
client_pcaps.remove(pcap)
if check_pcap(expected_model, pcap, False):
rc = 1
for pcap in client_pcaps:
expected_model = get_model(os.path.basename(pcap))
if not expected_model or check_pcap(expected_model, pcap, False):
rc = 1
ap_pcaps = glob.glob('./testdata/ap_pcaps/*.pcap')
for (expected_model, pcap) in ap_regression:
ap_pcaps.remove(pcap)
if check_pcap(expected_model, pcap, True):
rc = 1
for pcap in ap_pcaps:
expected_model = get_model(os.path.basename(pcap))
if not expected_model or check_pcap(expected_model, pcap, True):
rc = 1
sys.exit(rc)
| |
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from goose.utils import ReplaceSequence
from lxml.html import clean
from urlparse import urlsplit
from goose.text import innerTrim
from configuration import Configuration
from host_utils import HostUtils
from goose.constant import _Const
# TODO: Uncomment and make changes in code if you want to read once from DB
# KNOWN_HOST_REMOVE_SELECTORS = _Const().get_known_host_remove_selectors
class OutputFormatterCleaner(clean.Cleaner):
config = Configuration()
parser = config.get_parser()
safe_attrs_only = True
host_whitelist = ['www.youtube.com', 'player.vimeo.com', 'w.soundcloud.com',
'embed.spotify.com']
def __init__(self, **kw):
super(OutputFormatterCleaner, **kw)
self.safe_attrs = self.__safe_attrs()
def allow_embedded_url(self, el, url):
if (self.whitelist_tags is not None
and el.tag not in self.whitelist_tags):
return False
scheme, netloc, path, query, fragment = urlsplit(url)
netloc = netloc.lower().split(':', 1)[0]
if scheme not in ('http', 'https', ''):
return False
if netloc in self.host_whitelist:
return True
return False
def clean(self, node):
html_string = self.parser.nodeToString(node, method='html')
clean_html_string = self.clean_html(html_string)
return innerTrim(clean_html_string)
def __safe_attrs(self):
attributes = set(clean.defs.safe_attrs)
for remove_attribute in ['class', 'id', 'tabindex']:
attributes.remove(remove_attribute)
return attributes
class DocumentCleaner(object):
def __init__(self, config, article):
# config
self.config = config
# parser
self.parser = self.config.get_parser()
# article
self.article = article
# nodes to remove regexp
self.remove_nodes_re = (
"^side$|combx|retweet|mediaarticlerelated|menucontainer|"
"navbar|storytopbar-bucket|utility-bar|inline-share-tools"
"|comment|PopularQuestions|contact|foot|footer|Footer|footnote"
"|cnn_strycaptiontxt|cnn_html_slideshow|cnn_strylftcntnt"
"|^links$|meta$|shoutbox|sponsor"
"|tags|socialnetworking|socialNetworking|cnnStryHghLght"
"|cnn_stryspcvbx|^inset$|pagetools|post-attributes"
"|welcome_form|contentTools2|the_answers"
"|communitypromo|runaroundLeft|subscribe|vcard|articleheadings"
"|date|^print$|popup|author-dropdown|tools|socialtools|byline"
"|konafilter|KonaFilter|breadcrumbs|^fn$|wp-caption-text"
"|legende|ajoutVideo|timestamp|js_replies|printfriendly|share"
)
# dailymail remove nodes
self.remove_nodes_re += "|related-carousel|xwv-related-videos-container"
# nytimes remove nodes
self.remove_nodes_re += "|visually-hidden|robots-nocontent"
# *.wikipedia.org
self.remove_nodes_re += "|mw-editsection|^cite_ref|noprint|References|siteSub"
self.remove_nodes_re += "|collapsed|mw-headline-anchor|filetoc|noviewer"
# *.wiktionary.org
self.remove_nodes_re += "|ib-brac"
# *.wikibooks.org
self.remove_nodes_re += "|status-icon"
# www.wikidata.org
self.remove_nodes_re += "|wikibase-edittoolbar-container"
# http://www.dailymail.co.uk/news/article-2742786/Complacent-Home-Office-loses-175-000-illegal-immigrants-Fresh-humiliation-officials-admit-went-missing-refused-permission-stay.html
self.remove_nodes_re += "|most-read-news-wrapper|most-watched-videos-wrapper"
self.regexp_namespace = "http://exslt.org/regular-expressions"
self.nauthy_ids_re = "//*[re:test(@id, '%s', 'i')]" % self.remove_nodes_re
self.nauthy_classes_re = "//*[re:test(@class, '%s', 'i')]" % self.remove_nodes_re
self.nauthy_names_re = "//*[re:test(@name, '%s', 'i')]" % self.remove_nodes_re
self.nauthy_tags = ["noscript"]
self.google_re = " google "
self.entries_re = "^[^entry-]more.*$"
self.facebook_re = "[^-]facebook"
self.facebook_braodcasting_re = "facebook-broadcasting"
self.twitter_re = "[^-]twitter"
self.tablines_replacements = ReplaceSequence()\
.create("\n", "\n\n")\
.append("\t")\
.append("^\\s+$")
def set_known_host_remove_selectors(self):
self.known_host_remove_selectors = HostUtils.host_selectors(_Const().get_known_host_remove_selectors,
self.article.domain)
def clean(self):
doc_to_clean = self.article.doc
doc_to_clean = self.remove_scripts_styles(doc_to_clean)
self.set_known_host_remove_selectors()
if self.known_host_remove_selectors:
return self.remove_host_specific_nodes(doc_to_clean)
doc_to_clean = self.clean_body_classes(doc_to_clean)
doc_to_clean = self.clean_article_tags(doc_to_clean)
doc_to_clean = self.remove_drop_caps(doc_to_clean)
doc_to_clean = self.clean_bad_tags(doc_to_clean)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.google_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.entries_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.facebook_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.facebook_braodcasting_re)
doc_to_clean = self.remove_nodes_regex(doc_to_clean, self.twitter_re)
doc_to_clean = self.clean_para_spans(doc_to_clean)
doc_to_clean = self.div_to_para(doc_to_clean, 'div')
doc_to_clean = self.div_to_para(doc_to_clean, 'span')
return doc_to_clean
def clean_body_classes(self, doc):
# we don't need body classes
# in case it matches an unwanted class all the document
# will be empty
elements = self.parser.getElementsByTag(doc, tag="body")
if elements:
self.parser.delAttribute(elements[0], attr="class")
return doc
def clean_article_tags(self, doc):
articles = self.parser.getElementsByTag(doc, tag='article')
for article in articles:
for attr in ['id', 'name', 'class']:
self.parser.delAttribute(article, attr=attr)
return doc
def remove_drop_caps(self, doc):
items = self.parser.css_select(doc, "span[class~=dropcap], span[class~=drop_cap]")
for item in items:
self.parser.drop_tag(item)
return doc
def remove_scripts_styles(self, doc):
# remove scripts
scripts = self.parser.getElementsByTag(doc, tag='script')
for item in scripts:
self.parser.remove(item)
# remove styles
styles = self.parser.getElementsByTag(doc, tag='style')
for item in styles:
self.parser.remove(item)
# remove comments
comments = self.parser.getComments(doc)
for item in comments:
self.parser.remove(item)
return doc
def clean_bad_tags(self, doc):
# ids
naughty_list = self.parser.xpath_re(doc, self.nauthy_ids_re)
for node in naughty_list:
self.parser.remove(node)
# class
naughty_classes = self.parser.xpath_re(doc, self.nauthy_classes_re)
for node in naughty_classes:
self.parser.remove(node)
# name
naughty_names = self.parser.xpath_re(doc, self.nauthy_names_re)
for node in naughty_names:
self.parser.remove(node)
for nauthy_tag in self.nauthy_tags:
nodes = self.parser.getElementsByTag(doc, tag=nauthy_tag)
for node in nodes:
images = self.parser.getElementsByTag(node, tag='img')
if images:
parent = node.getparent()
parent_index = parent.index(node)
for image in images:
parent.insert(parent_index, image)
else:
self.parser.remove(node)
return doc
def remove_host_specific_nodes(self, doc):
nodes = self.parser.css_select(doc, self.known_host_remove_selectors)
for node in nodes:
self.parser.remove(node)
return doc
def remove_nodes_regex(self, doc, pattern):
for selector in ['id', 'class']:
reg = "//*[re:test(@%s, '%s', 'i')]" % (selector, pattern)
naughty_list = self.parser.xpath_re(doc, reg)
for node in naughty_list:
self.parser.remove(node)
return doc
def clean_para_spans(self, doc):
spans = self.parser.css_select(doc, 'p span')
for item in spans:
self.parser.drop_tag(item)
return doc
def get_flushed_buffer(self, replacement_text, doc):
return self.parser.textToPara(replacement_text)
def get_replacement_nodes(self, doc, div):
replacement_text = []
nodes_to_return = []
nodes_to_remove = []
childs = self.parser.childNodesWithText(div)
for kid in childs:
# node is a p
# and already have some replacement text
if self.parser.getTag(kid) == 'p' and len(replacement_text) > 0:
newNode = self.get_flushed_buffer(''.join(replacement_text), doc)
nodes_to_return.append(newNode)
replacement_text = []
nodes_to_return.append(kid)
# node is a text node
elif self.parser.isTextNode(kid):
kid_text_node = kid
kid_text = self.parser.getText(kid)
replace_text = self.tablines_replacements.replaceAll(kid_text)
if(len(replace_text)) > 1:
previous_sibling_node = self.parser.previousSibling(kid_text_node)
while previous_sibling_node is not None \
and self.parser.getTag(previous_sibling_node) == "a" \
and self.parser.getAttribute(previous_sibling_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(previous_sibling_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(previous_sibling_node)
self.parser.setAttribute(previous_sibling_node,
attr='grv-usedalready', value='yes')
prev = self.parser.previousSibling(previous_sibling_node)
previous_sibling_node = prev if prev is not None else None
next_sibling_node = self.parser.nextSibling(kid_text_node)
while next_sibling_node is not None \
and self.parser.getTag(next_sibling_node) == "a" \
and self.parser.getAttribute(next_sibling_node, 'grv-usedalready') != 'yes':
outer = " " + self.parser.outerHtml(next_sibling_node) + " "
replacement_text.append(outer)
nodes_to_remove.append(next_sibling_node)
self.parser.setAttribute(next_sibling_node,
attr='grv-usedalready', value='yes')
next = self.parser.nextSibling(next_sibling_node)
previous_sibling_node = next if next is not None else None
# otherwise
else:
nodes_to_return.append(kid)
# flush out anything still remaining
if(len(replacement_text) > 0):
new_node = self.get_flushed_buffer(''.join(replacement_text), doc)
nodes_to_return.append(new_node)
replacement_text = []
for n in nodes_to_remove:
self.parser.remove(n)
return nodes_to_return
def replace_with_para(self, doc, div):
self.parser.replaceTag(div, 'p')
def div_to_para(self, doc, dom_type):
bad_divs = 0
else_divs = 0
divs = self.parser.getElementsByTag(doc, tag=dom_type)
tags = ['a', 'blockquote', 'dl', 'div', 'img', 'ol', 'p', 'pre', 'table', 'ul']
for div in divs:
items = self.parser.getElementsByTags(div, tags)
if div is not None and len(items) == 0:
self.replace_with_para(doc, div)
bad_divs += 1
elif div is not None:
replaceNodes = self.get_replacement_nodes(doc, div)
for child in self.parser.childNodes(div):
div.remove(child)
for c, n in enumerate(replaceNodes):
div.insert(c, n)
else_divs += 1
return doc
class StandardDocumentCleaner(DocumentCleaner):
pass
| |
# Copyright (C) 2003-2006 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import stat
import time
from paramiko.common import *
from paramiko.sftp import *
class SFTPAttributes (object):
"""
Representation of the attributes of a file (or proxied file) for SFTP in
client or server mode. It attemps to mirror the object returned by
C{os.stat} as closely as possible, so it may have the following fields,
with the same meanings as those returned by an C{os.stat} object:
- st_size
- st_uid
- st_gid
- st_mode
- st_atime
- st_mtime
Because SFTP allows flags to have other arbitrary named attributes, these
are stored in a dict named C{attr}. Occasionally, the filename is also
stored, in C{filename}.
"""
FLAG_SIZE = 1
FLAG_UIDGID = 2
FLAG_PERMISSIONS = 4
FLAG_AMTIME = 8
FLAG_EXTENDED = 0x80000000L
def __init__(self):
"""
Create a new (empty) SFTPAttributes object. All fields will be empty.
"""
self._flags = 0
self.st_size = None
self.st_uid = None
self.st_gid = None
self.st_mode = None
self.st_atime = None
self.st_mtime = None
self.attr = {}
def from_stat(cls, obj, filename=None):
"""
Create an SFTPAttributes object from an existing C{stat} object (an
object returned by C{os.stat}).
@param obj: an object returned by C{os.stat} (or equivalent).
@type obj: object
@param filename: the filename associated with this file.
@type filename: str
@return: new L{SFTPAttributes} object with the same attribute fields.
@rtype: L{SFTPAttributes}
"""
attr = cls()
attr.st_size = obj.st_size
attr.st_uid = obj.st_uid
attr.st_gid = obj.st_gid
attr.st_mode = obj.st_mode
attr.st_atime = obj.st_atime
attr.st_mtime = obj.st_mtime
if filename is not None:
attr.filename = filename
return attr
from_stat = classmethod(from_stat)
def __repr__(self):
return '<SFTPAttributes: %s>' % self._debug_str()
### internals...
def _from_msg(cls, msg, filename=None, longname=None):
attr = cls()
attr._unpack(msg)
if filename is not None:
attr.filename = filename
if longname is not None:
attr.longname = longname
return attr
_from_msg = classmethod(_from_msg)
def _unpack(self, msg):
self._flags = msg.get_int()
if self._flags & self.FLAG_SIZE:
self.st_size = msg.get_int64()
if self._flags & self.FLAG_UIDGID:
self.st_uid = msg.get_int()
self.st_gid = msg.get_int()
if self._flags & self.FLAG_PERMISSIONS:
self.st_mode = msg.get_int()
if self._flags & self.FLAG_AMTIME:
self.st_atime = msg.get_int()
self.st_mtime = msg.get_int()
if self._flags & self.FLAG_EXTENDED:
count = msg.get_int()
for i in range(count):
self.attr[msg.get_string()] = msg.get_string()
def _pack(self, msg):
self._flags = 0
if self.st_size is not None:
self._flags |= self.FLAG_SIZE
if (self.st_uid is not None) and (self.st_gid is not None):
self._flags |= self.FLAG_UIDGID
if self.st_mode is not None:
self._flags |= self.FLAG_PERMISSIONS
if (self.st_atime is not None) and (self.st_mtime is not None):
self._flags |= self.FLAG_AMTIME
if len(self.attr) > 0:
self._flags |= self.FLAG_EXTENDED
msg.add_int(self._flags)
if self._flags & self.FLAG_SIZE:
msg.add_int64(self.st_size)
if self._flags & self.FLAG_UIDGID:
msg.add_int(self.st_uid)
msg.add_int(self.st_gid)
if self._flags & self.FLAG_PERMISSIONS:
msg.add_int(self.st_mode)
if self._flags & self.FLAG_AMTIME:
# throw away any fractional seconds
msg.add_int(long(self.st_atime))
msg.add_int(long(self.st_mtime))
if self._flags & self.FLAG_EXTENDED:
msg.add_int(len(self.attr))
for key, val in self.attr.iteritems():
msg.add_string(key)
msg.add_string(val)
return
def _debug_str(self):
out = '[ '
if self.st_size is not None:
out += 'size=%d ' % self.st_size
if (self.st_uid is not None) and (self.st_gid is not None):
out += 'uid=%d gid=%d ' % (self.st_uid, self.st_gid)
if self.st_mode is not None:
out += 'mode=' + oct(self.st_mode) + ' '
if (self.st_atime is not None) and (self.st_mtime is not None):
out += 'atime=%d mtime=%d ' % (self.st_atime, self.st_mtime)
for k, v in self.attr.iteritems():
out += '"%s"=%r ' % (str(k), v)
out += ']'
return out
def _rwx(n, suid, sticky=False):
if suid:
suid = 2
out = '-r'[n >> 2] + '-w'[(n >> 1) & 1]
if sticky:
out += '-xTt'[suid + (n & 1)]
else:
out += '-xSs'[suid + (n & 1)]
return out
_rwx = staticmethod(_rwx)
def __str__(self):
"create a unix-style long description of the file (like ls -l)"
if self.st_mode is not None:
kind = stat.S_IFMT(self.st_mode)
if kind == stat.S_IFIFO:
ks = 'p'
elif kind == stat.S_IFCHR:
ks = 'c'
elif kind == stat.S_IFDIR:
ks = 'd'
elif kind == stat.S_IFBLK:
ks = 'b'
elif kind == stat.S_IFREG:
ks = '-'
elif kind == stat.S_IFLNK:
ks = 'l'
elif kind == stat.S_IFSOCK:
ks = 's'
else:
ks = '?'
ks += self._rwx((self.st_mode & 0700) >> 6, self.st_mode & stat.S_ISUID)
ks += self._rwx((self.st_mode & 070) >> 3, self.st_mode & stat.S_ISGID)
ks += self._rwx(self.st_mode & 7, self.st_mode & stat.S_ISVTX, True)
else:
ks = '?---------'
# compute display date
if (self.st_mtime is None) or (self.st_mtime == 0xffffffff):
# shouldn't really happen
datestr = '(unknown date)'
else:
if abs(time.time() - self.st_mtime) > 15552000:
# (15552000 = 6 months)
datestr = time.strftime('%d %b %Y', time.localtime(self.st_mtime))
else:
datestr = time.strftime('%d %b %H:%M', time.localtime(self.st_mtime))
filename = getattr(self, 'filename', '?')
# not all servers support uid/gid
uid = self.st_uid
gid = self.st_gid
if uid is None:
uid = 0
if gid is None:
gid = 0
return '%s 1 %-8d %-8d %8d %-12s %s' % (ks, uid, gid, self.st_size, datestr, filename)
| |
import os
import shutil
import logging
from pbcore.io import FastqReader, FastqWriter, FastqRecord
from pbhla.io.BlasrIO import BlasrReader
log = logging.getLogger()
def write_fastq( fasta_records, output_file):
"""
Write a FastaRecord, or list of records, out to file
"""
with FastqWriter( output_file ) as handle:
if isinstance( fasta_records, FastqRecord ):
handle.writeRecord( fasta_records )
elif isinstance( fasta_records, list):
for record in fasta_records:
handle.writeRecord( record )
else:
msg = "Input Record(s) type not recognized"
log.error( msg )
raise TypeError( msg )
check_output_file( output_file )
def consensus_filetype( file_list ):
if all([is_fastq(f) for f in file_list]):
return 'fastq'
elif all([is_fasta(f) for f in file_list]):
return 'fasta'
else:
raise ValueError
def is_fasta( filename ):
if filename.endswith('.fa') or filename.endswith('.fasta'):
return True
return False
def is_fastq( filename ):
if filename.endswith('.fastq') or filename.endswith('.fq'):
return True
return False
def read_fastq_dict( fastq_input ):
records = {}
if isinstance( fastq_input, str ):
for rec in FastqReader( fastq_input ):
name = rec.name.strip().split()[0]
assert name not in records
records[name] = rec
elif isinstance( fastq_input, list ):
for filename in fastq_input:
for rec in FastqReader( filename ):
name = rec.name.strip().split()[0]
assert name not in records
records[name] = rec
return records
def count_hits( filename ):
return len( list( BlasrReader( filename )))
def get_barcode( record ):
return record.name.split('_')[0][7:]
def get_file_source( filename ):
base_name = os.path.basename( filename )
root_name = base_name.split('.')[0]
parts = root_name.split('_')
return parts[1]
def get_base_sequence_name( name ):
name = name.split()[0]
if name.endswith('|quiver'):
name = name.split('|')[0]
if name.endswith('_cns'):
name = name[:-4]
return name
def memoize(function):
cache = {}
def decorated_function(*args):
if args in cache:
return cache[args]
else:
val = function(*args)
cache[args] = val
return val
return decorated_function
def cleanup_directory( directory ):
for entry in os.listdir( directory ):
removal_flag = False
if entry.endswith('aln') or entry.endswith('aln_unsorted'):
removal_flag = True
if entry.startswith('tmp_cns_') or entry.startswith('tmp_reads_'):
removal_flag = True
if removal_flag:
try:
os.remove( os.path.join( directory, entry) )
except:
pass
def write_list_file( file_list, output_file ):
with open(output_file, 'w') as handle:
for filename in file_list:
print >> handle, filename
def read_list_file( list_file ):
list_contents = []
with open(list_file, 'r') as handle:
for line in handle:
value = line.strip().split()[0]
if value:
list_contents.append( value )
return list_contents
def read_dict_file( dict_file ):
dict_contents = {}
with open(dict_file, 'r') as handle:
for line in handle:
try:
key, value = line.strip().split()
dict_contents[key] = value
except:
pass
return dict_contents
def cross_ref_dict( query_dict, ref_dict ):
new_dict = {}
for key in query_dict:
old_value = query_dict[key]
if old_value.startswith('HLA:'):
old_value = old_value.split('_')[0]
try:
new_value = ref_dict[old_value]
except:
new_value = 'N/A'
new_dict[key] = new_value
return new_dict
def validate_file( filename ):
if os.path.isfile( filename ) and (os.path.getsize( filename ) > 0):
return os.path.abspath( filename )
return False
# TODO: Replace all instances of this function with the above "validate_file"
def valid_file( filepath ):
if os.path.isfile( filepath ) and (os.path.getsize( filepath ) > 0):
return True
return False
def check_output_file( filepath ):
if valid_file( filepath ):
return
else:
msg = 'Expected output file not found! "{0}"'.format(filepath)
log.error( msg )
raise IOError( msg )
def copy_file( source, destination ):
shutil.copy( source, destination )
check_output_file( destination )
return destination
def remove_file( filepath ):
if os.path.isfile( filepath ):
try:
os.remove( filepath )
except:
basename = os.path.basename( filepath )
msg = 'Could not delete file! "%s"' % basename
log.error( msg )
raise IOError( msg )
def is_exe( file_path ):
if file_path is None:
return False
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def which(program):
"""
Find and return path to local executables
"""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def create_directory( directory ):
# Skip if the directory exists
if os.path.isdir( directory ):
return
try: # Otherwise attempt to create it
os.mkdir( directory )
except:
msg = 'Could not create directory "{0}"'.format(directory)
log.info( msg )
raise IOError( msg )
def remove_directory( directory ):
try:
shutil.rmtree( directory )
except OSError:
log.warn("No directory named '%s' detected for deletion" % directory)
| |
# -*- coding: utf-8 -*-
import mock
from typing import Any, Dict, Iterable, List, Optional, Text, Tuple
from django.test import TestCase
from django.http import HttpResponse, HttpRequest
from django.test.client import RequestFactory
from django.conf import settings
from zerver.lib.actions import do_deactivate_realm, do_deactivate_user, \
do_reactivate_user, do_reactivate_realm
from zerver.lib.initial_password import initial_password
from zerver.lib.test_helpers import (
HostRequestMock,
)
from zerver.lib.test_classes import (
ZulipTestCase,
WebhookTestCase,
)
from zerver.lib.response import json_response
from zerver.lib.request import \
REQ, has_request_variables, RequestVariableMissingError, \
RequestVariableConversionError, JsonableError
from zerver.decorator import (
api_key_only_webhook_view,
authenticated_json_post_view, authenticated_json_view,
authenticate_notify,
get_client_name, internal_notify_view, is_local_addr,
rate_limit, validate_api_key, logged_in_and_active,
return_success_on_head_request
)
from zerver.lib.validator import (
check_string, check_dict, check_dict_only, check_bool, check_float, check_int, check_list, Validator,
check_variable_type, equals, check_none_or,
)
from zerver.models import \
get_realm, get_user, UserProfile, Client, Realm
import ujson
class DecoratorTestCase(TestCase):
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
# This method should be removed when we migrate to version 3 of Python
import six
if six.PY2:
self.assertRaisesRegex = self.assertRaisesRegexp
super(TestCase, self).__init__(*args, **kwargs)
def test_get_client_name(self):
# type: () -> None
class Request(object):
def __init__(self, GET, POST, META):
# type: (Dict[str, str], Dict[str, str], Dict[str, str]) -> None
self.GET = GET
self.POST = POST
self.META = META
req = Request(
GET=dict(),
POST=dict(),
META=dict(),
)
self.assertEqual(get_client_name(req, is_json_view=True), 'website')
self.assertEqual(get_client_name(req, is_json_view=False), 'Unspecified')
req = Request(
GET=dict(),
POST=dict(),
META=dict(HTTP_USER_AGENT='Mozilla/bla bla bla'),
)
self.assertEqual(get_client_name(req, is_json_view=True), 'website')
self.assertEqual(get_client_name(req, is_json_view=False), 'Mozilla')
req = Request(
GET=dict(),
POST=dict(),
META=dict(HTTP_USER_AGENT='ZulipDesktop/bla bla bla'),
)
self.assertEqual(get_client_name(req, is_json_view=True), 'ZulipDesktop')
self.assertEqual(get_client_name(req, is_json_view=False), 'ZulipDesktop')
req = Request(
GET=dict(client='fancy phone'),
POST=dict(),
META=dict(),
)
self.assertEqual(get_client_name(req, is_json_view=True), 'fancy phone')
self.assertEqual(get_client_name(req, is_json_view=False), 'fancy phone')
def test_REQ_converter(self):
# type: () -> None
def my_converter(data):
# type: (str) -> List[str]
lst = ujson.loads(data)
if not isinstance(lst, list):
raise ValueError('not a list')
if 13 in lst:
raise JsonableError('13 is an unlucky number!')
return lst
@has_request_variables
def get_total(request, numbers=REQ(converter=my_converter)):
# type: (HttpRequest, Iterable[int]) -> int
return sum(numbers)
class Request(object):
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.POST['numbers'] = 'bad_value'
with self.assertRaises(RequestVariableConversionError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "Bad value for 'numbers': bad_value")
request.POST['numbers'] = ujson.dumps('{fun: unfun}')
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'Bad value for \'numbers\': "{fun: unfun}"')
request.POST['numbers'] = ujson.dumps([2, 3, 5, 8, 13, 21])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), "13 is an unlucky number!")
request.POST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
def test_REQ_converter_and_validator_invalid(self):
# type: () -> None
with self.assertRaisesRegex(AssertionError, "converter and validator are mutually exclusive"):
@has_request_variables
def get_total(request, numbers=REQ(validator=check_list(check_int),
converter=lambda: None)):
# type: (HttpRequest, Iterable[int]) -> int
return sum(numbers) # nocoverage -- isn't intended to be run
def test_REQ_validator(self):
# type: () -> None
@has_request_variables
def get_total(request, numbers=REQ(validator=check_list(check_int))):
# type: (HttpRequest, Iterable[int]) -> int
return sum(numbers)
class Request(object):
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
request = Request()
with self.assertRaises(RequestVariableMissingError):
get_total(request)
request.POST['numbers'] = 'bad_value'
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'argument "numbers" is not valid json.')
request.POST['numbers'] = ujson.dumps([1, 2, "what?", 4, 5, 6])
with self.assertRaises(JsonableError) as cm:
get_total(request)
self.assertEqual(str(cm.exception), 'numbers[2] is not an integer')
request.POST['numbers'] = ujson.dumps([1, 2, 3, 4, 5, 6])
result = get_total(request)
self.assertEqual(result, 21)
def test_REQ_argument_type(self):
# type: () -> None
@has_request_variables
def get_payload(request, payload=REQ(argument_type='body')):
# type: (HttpRequest, Dict[str, Dict]) -> Dict[str, Dict]
return payload
class MockRequest(object):
body = {} # type: Any
request = MockRequest()
request.body = 'notjson'
with self.assertRaises(JsonableError) as cm:
get_payload(request)
self.assertEqual(str(cm.exception), 'Malformed JSON')
request.body = '{"a": "b"}'
self.assertEqual(get_payload(request), {'a': 'b'})
# Test we properly handle an invalid argument_type.
with self.assertRaises(Exception) as cm:
@has_request_variables
def test(request, payload=REQ(argument_type="invalid")):
# type: (HttpRequest, Dict[str, Dict]) -> None
pass # nocoverage # this function isn't meant to be called
test(request)
def test_api_key_only_webhook_view(self):
# type: () -> None
@api_key_only_webhook_view('ClientName')
def my_webhook(request, user_profile):
# type: (HttpRequest, UserProfile) -> Text
return user_profile.email
class Request(HostRequestMock):
GET = {} # type: Dict[str, str]
POST = {} # type: Dict[str, str]
COOKIES = {} # type: Dict[str, str]
META = {'PATH_INFO': ''}
webhook_bot_email = 'webhook-bot@zulip.com'
webhook_bot_realm = get_realm('zulip')
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = webhook_bot.api_key
request = Request() # type: Any
request.host = settings.EXTERNAL_HOST
request.POST['api_key'] = 'not_existing_api_key'
with self.assertRaisesRegex(JsonableError, "Invalid API key"):
my_webhook(request)
# Start a valid request here
request.POST['api_key'] = webhook_bot_api_key
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
api_result = my_webhook(request)
mock_warning.assert_called_with(
"User {} attempted to access webhook API on wrong "
"subdomain {}".format(webhook_bot_email, ''))
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
request.host = "acme." + settings.EXTERNAL_HOST
api_result = my_webhook(request)
mock_warning.assert_called_with(
"User {} attempted to access webhook API on wrong "
"subdomain {}".format(webhook_bot_email, 'acme'))
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
api_result = my_webhook(request)
# Verify rate limiting was attempted.
self.assertTrue(rate_limit_mock.called)
# Verify decorator set the magic _email field used by some of our back end logging.
self.assertEqual(request._email, webhook_bot_email)
# Verify the main purpose of the decorator, which is that it passed in the
# user_profile to my_webhook, allowing it return the correct
# email for the bot (despite the API caller only knowing the API key).
self.assertEqual(api_result, webhook_bot_email)
# Now deactivate the user
webhook_bot.is_active = False
webhook_bot.save()
with self.assertRaisesRegex(JsonableError, "Account not active"):
my_webhook(request)
# Reactive the user, but deactivate their realm.
webhook_bot.is_active = True
webhook_bot.save()
webhook_bot.realm.deactivated = True
webhook_bot.realm.save()
with self.assertRaisesRegex(JsonableError, "Realm for account has been deactivated"):
my_webhook(request)
class RateLimitTestCase(TestCase):
def errors_disallowed(self):
# type: () -> mock
# Due to what is probably a hack in rate_limit(),
# some tests will give a false positive (or succeed
# for the wrong reason), unless we complain
# about logging errors. There might be a more elegant way
# make logging errors fail than what I'm doing here.
class TestLoggingErrorException(Exception):
pass
return mock.patch('logging.error', side_effect=TestLoggingErrorException)
def test_internal_local_clients_skip_rate_limiting(self):
# type: () -> None
class Client(object):
name = 'internal'
class Request(object):
client = Client()
META = {'REMOTE_ADDR': '127.0.0.1'}
req = Request()
def f(req):
# type: (Any) -> str
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_debug_clients_skip_rate_limiting(self):
# type: () -> None
class Client(object):
name = 'internal'
class Request(object):
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
req = Request()
def f(req):
# type: (Any) -> str
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
with self.settings(DEBUG_RATE_LIMITING=True):
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_rate_limit_setting_of_false_bypasses_rate_limiting(self):
# type: () -> None
class Client(object):
name = 'external'
class Request(object):
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
user = 'stub' # any non-None value here exercises the correct code path
req = Request()
def f(req):
# type: (Any) -> str
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=False):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertFalse(rate_limit_mock.called)
def test_rate_limiting_happens_in_normal_case(self):
# type: () -> None
class Client(object):
name = 'external'
class Request(object):
client = Client()
META = {'REMOTE_ADDR': '3.3.3.3'}
user = 'stub' # any non-None value here exercises the correct code path
req = Request()
def f(req):
# type: (Any) -> str
return 'some value'
f = rate_limit()(f)
with self.settings(RATE_LIMITING=True):
with mock.patch('zerver.decorator.rate_limit_user') as rate_limit_mock:
with self.errors_disallowed():
self.assertEqual(f(req), 'some value')
self.assertTrue(rate_limit_mock.called)
class ValidatorTestCase(TestCase):
def test_check_string(self):
# type: () -> None
x = "hello" # type: Any
self.assertEqual(check_string('x', x), None)
x = 4
self.assertEqual(check_string('x', x), 'x is not a string')
def test_check_bool(self):
# type: () -> None
x = True # type: Any
self.assertEqual(check_bool('x', x), None)
x = 4
self.assertEqual(check_bool('x', x), 'x is not a boolean')
def test_check_int(self):
# type: () -> None
x = 5 # type: Any
self.assertEqual(check_int('x', x), None)
x = [{}]
self.assertEqual(check_int('x', x), 'x is not an integer')
def test_check_float(self):
# type: () -> None
x = 5.5 # type: Any
self.assertEqual(check_float('x', x), None)
x = 5
self.assertEqual(check_float('x', x), 'x is not a float')
x = [{}]
self.assertEqual(check_float('x', x), 'x is not a float')
def test_check_list(self):
# type: () -> None
x = 999 # type: Any
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x is not a list')
x = ["hello", 5]
error = check_list(check_string)('x', x)
self.assertEqual(error, 'x[1] is not a string')
x = [["yo"], ["hello", "goodbye", 5]]
error = check_list(check_list(check_string))('x', x)
self.assertEqual(error, 'x[1][2] is not a string')
x = ["hello", "goodbye", "hello again"]
error = check_list(check_string, length=2)('x', x)
self.assertEqual(error, 'x should have exactly 2 items')
def test_check_dict(self):
# type: () -> None
keys = [
('names', check_list(check_string)),
('city', check_string),
] # type: List[Tuple[str, Validator]]
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
} # type: Any
error = check_dict(keys)('x', x)
self.assertEqual(error, None)
x = 999
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x is not a dict')
x = {}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'names key is missing from x')
x = {
'names': ['alice', 'bob', {}]
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["names"][2] is not a string')
x = {
'names': ['alice', 'bob'],
'city': 5
}
error = check_dict(keys)('x', x)
self.assertEqual(error, 'x["city"] is not a string')
# test dict_only
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
}
error = check_dict_only(keys)('x', x)
self.assertEqual(error, None)
x = {
'names': ['alice', 'bob'],
'city': 'Boston',
'state': 'Massachusetts',
}
error = check_dict_only(keys)('x', x)
self.assertEqual(error, 'Unexpected arguments: state')
def test_encapsulation(self):
# type: () -> None
# There might be situations where we want deep
# validation, but the error message should be customized.
# This is an example.
def check_person(val):
# type: (Any) -> Optional[str]
error = check_dict([
('name', check_string),
('age', check_int),
])('_', val)
if error:
return 'This is not a valid person'
return None
person = {'name': 'King Lear', 'age': 42}
self.assertEqual(check_person(person), None)
nonperson = 'misconfigured data'
self.assertEqual(check_person(nonperson), 'This is not a valid person')
def test_check_variable_type(self):
# type: () -> None
x = 5 # type: Any
self.assertEqual(check_variable_type([check_string, check_int])('x', x), None)
x = 'x'
self.assertEqual(check_variable_type([check_string, check_int])('x', x), None)
x = [{}]
self.assertEqual(check_variable_type([check_string, check_int])('x', x), 'x is not an allowed_type')
def test_equals(self):
# type: () -> None
x = 5 # type: Any
self.assertEqual(equals(5)('x', x), None)
self.assertEqual(equals(6)('x', x), 'x != 6 (5 is wrong)')
def test_check_none_or(self):
# type: () -> None
x = 5 # type: Any
self.assertEqual(check_none_or(check_int)('x', x), None)
x = None
self.assertEqual(check_none_or(check_int)('x', x), None)
x = 'x'
self.assertEqual(check_none_or(check_int)('x', x), 'x is not an integer')
class DeactivatedRealmTest(ZulipTestCase):
def test_send_deactivated_realm(self):
# type: () -> None
"""
rest_dispatch rejects requests in a deactivated realm, both /json and api
"""
realm = get_realm("zulip")
do_deactivate_realm(get_realm("zulip"))
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Not logged in", status_code=401)
# Even if a logged-in session was leaked, it still wouldn't work
realm.deactivated = False
realm.save()
self.login(self.example_email("hamlet"))
realm.deactivated = True
realm.save()
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")},
**self.api_auth(self.example_email("hamlet")))
self.assert_json_error_contains(result, "has been deactivated", status_code=401)
def test_fetch_api_key_deactivated_realm(self):
# type: () -> None
"""
authenticated_json_view views fail in a deactivated realm
"""
realm = get_realm("zulip")
user_profile = self.example_user('hamlet')
email = user_profile.email
test_password = "abcd1234"
user_profile.set_password(test_password)
self.login(email)
realm.deactivated = True
realm.save()
result = self.client_post("/json/fetch_api_key", {"password": test_password})
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
def test_login_deactivated_realm(self):
# type: () -> None
"""
logging in fails in a deactivated realm
"""
do_deactivate_realm(get_realm("zulip"))
result = self.login_with_return(self.example_email("hamlet"))
self.assert_in_response("has been deactivated", result)
def test_webhook_deactivated_realm(self):
# type: () -> None
"""
Using a webhook while in a deactivated realm fails
"""
do_deactivate_realm(get_realm("zulip"))
email = self.example_email("hamlet")
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
data = self.fixture_data('jira', "created_v2")
result = self.client_post(url, data,
content_type="application/json")
self.assert_json_error_contains(result, "has been deactivated", status_code=400)
class LoginRequiredTest(ZulipTestCase):
def test_login_required(self):
# type: () -> None
"""
Verifies the zulip_login_required decorator blocks deactivated users.
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
# Verify fails if logged-out
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
# Verify succeeds once logged-in
self.login(email)
result = self.client_get('/accounts/accept_terms/')
self.assert_in_response("I agree to the", result)
# Verify fails if user deactivated (with session still valid)
user_profile.is_active = False
user_profile.save()
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
# Verify succeeds if user reactivated
do_reactivate_user(user_profile)
self.login(email)
result = self.client_get('/accounts/accept_terms/')
self.assert_in_response("I agree to the", result)
# Verify fails if realm deactivated
user_profile.realm.deactivated = True
user_profile.realm.save()
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
class FetchAPIKeyTest(ZulipTestCase):
def test_fetch_api_key_success(self):
# type: () -> None
email = self.example_email("cordelia")
self.login(email)
result = self.client_post("/json/fetch_api_key", {"password": initial_password(email)})
self.assert_json_success(result)
def test_fetch_api_key_wrong_password(self):
# type: () -> None
email = self.example_email("cordelia")
self.login(email)
result = self.client_post("/json/fetch_api_key", {"password": "wrong_password"})
self.assert_json_error_contains(result, "password is incorrect")
class InactiveUserTest(ZulipTestCase):
def test_send_deactivated_user(self):
# type: () -> None
"""
rest_dispatch rejects requests from deactivated users, both /json and api
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_deactivate_user(user_profile)
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Not logged in", status_code=401)
# Even if a logged-in session was leaked, it still wouldn't work
do_reactivate_user(user_profile)
self.login(email)
user_profile.is_active = False
user_profile.save()
result = self.client_post("/json/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")})
self.assert_json_error_contains(result, "Account not active", status_code=400)
result = self.client_post("/api/v1/messages", {"type": "private",
"content": "Test message",
"client": "test suite",
"to": self.example_email("othello")},
**self.api_auth(self.example_email("hamlet")))
self.assert_json_error_contains(result, "Account not active", status_code=401)
def test_fetch_api_key_deactivated_user(self):
# type: () -> None
"""
authenticated_json_view views fail with a deactivated user
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
test_password = "abcd1234"
user_profile.set_password(test_password)
user_profile.save()
self.login(email, password=test_password)
user_profile.is_active = False
user_profile.save()
result = self.client_post("/json/fetch_api_key", {"password": test_password})
self.assert_json_error_contains(result, "Account not active", status_code=400)
def test_login_deactivated_user(self):
# type: () -> None
"""
logging in fails with an inactive user
"""
user_profile = self.example_user('hamlet')
do_deactivate_user(user_profile)
result = self.login_with_return(self.example_email("hamlet"))
self.assert_in_response("Please enter a correct email and password", result)
def test_webhook_deactivated_user(self):
# type: () -> None
"""
Deactivated users can't use webhooks
"""
user_profile = self.example_user('hamlet')
email = user_profile.email
do_deactivate_user(user_profile)
api_key = self.get_api_key(email)
url = "/api/v1/external/jira?api_key=%s&stream=jira_custom" % (api_key,)
data = self.fixture_data('jira', "created_v2")
result = self.client_post(url, data,
content_type="application/json")
self.assert_json_error_contains(result, "Account not active", status_code=400)
class TestValidateApiKey(ZulipTestCase):
def setUp(self):
# type: () -> None
zulip_realm = get_realm('zulip')
self.webhook_bot = get_user('webhook-bot@zulip.com', zulip_realm)
self.default_bot = get_user('default-bot@zulip.com', zulip_realm)
def test_validate_api_key_if_profile_does_not_exist(self):
# type: () -> None
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), 'email@doesnotexist.com', 'api_key')
def test_validate_api_key_if_api_key_does_not_match_profile_api_key(self):
# type: () -> None
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), self.webhook_bot.email, 'not_32_length')
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), self.webhook_bot.email, self.default_bot.api_key)
def test_validate_api_key_if_profile_is_not_active(self):
# type: () -> None
self._change_is_active_field(self.default_bot, False)
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), self.default_bot.email, self.default_bot.api_key)
self._change_is_active_field(self.default_bot, True)
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_unset(self):
# type: () -> None
with self.assertRaises(JsonableError):
validate_api_key(HostRequestMock(), self.webhook_bot.email, self.webhook_bot.api_key)
def test_validate_api_key_if_profile_is_incoming_webhook_and_is_webhook_is_set(self):
# type: () -> None
profile = validate_api_key(HostRequestMock(), self.webhook_bot.email, self.webhook_bot.api_key, is_webhook=True)
self.assertEqual(profile.pk, self.webhook_bot.pk)
def test_valid_api_key_if_user_is_on_wrong_subdomain(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
with self.settings(RUNNING_INSIDE_TORNADO=False):
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
validate_api_key(HostRequestMock(host=settings.EXTERNAL_HOST),
self.default_bot.email,
self.default_bot.api_key)
mock_warning.assert_called_with(
"User {} attempted to access API on wrong "
"subdomain {}".format(self.default_bot.email, ''))
with mock.patch('logging.warning') as mock_warning:
with self.assertRaisesRegex(JsonableError,
"Account is not associated with this subdomain"):
validate_api_key(HostRequestMock(host='acme.' + settings.EXTERNAL_HOST),
self.default_bot.email,
self.default_bot.api_key)
mock_warning.assert_called_with(
"User {} attempted to access API on wrong "
"subdomain {}".format(self.default_bot.email, 'acme'))
def _change_is_active_field(self, profile, value):
# type: (UserProfile, bool) -> None
profile.is_active = value
profile.save()
class TestInternalNotifyView(TestCase):
BORING_RESULT = 'boring'
class Request(object):
def __init__(self, POST, META):
# type: (Dict, Dict) -> None
self.POST = POST
self.META = META
self.method = 'POST'
def internal_notify(self, is_tornado, req):
# type: (bool, HttpRequest) -> HttpResponse
boring_view = lambda req: self.BORING_RESULT
return internal_notify_view(is_tornado)(boring_view)(req)
def test_valid_internal_requests(self):
# type: () -> None
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='127.0.0.1'),
)
with self.settings(SHARED_SECRET=secret):
self.assertTrue(authenticate_notify(req))
self.assertEqual(self.internal_notify(False, req), self.BORING_RESULT)
self.assertEqual(req._email, 'internal')
with self.assertRaises(RuntimeError):
self.internal_notify(True, req)
req._tornado_handler = 'set'
with self.settings(SHARED_SECRET=secret):
self.assertTrue(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req), self.BORING_RESULT)
self.assertEqual(req._email, 'internal')
with self.assertRaises(RuntimeError):
self.internal_notify(False, req)
def test_internal_requests_with_broken_secret(self):
# type: () -> None
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='127.0.0.1'),
)
with self.settings(SHARED_SECRET='broken'):
self.assertFalse(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req).status_code, 403)
def test_external_requests(self):
# type: () -> None
secret = 'random'
req = self.Request(
POST=dict(secret=secret),
META=dict(REMOTE_ADDR='3.3.3.3'),
)
with self.settings(SHARED_SECRET=secret):
self.assertFalse(authenticate_notify(req))
self.assertEqual(self.internal_notify(True, req).status_code, 403)
def test_is_local_address(self):
# type: () -> None
self.assertTrue(is_local_addr('127.0.0.1'))
self.assertTrue(is_local_addr('::1'))
self.assertFalse(is_local_addr('42.43.44.45'))
class TestHumanUsersOnlyDecorator(ZulipTestCase):
def test_human_only_endpoints(self):
# type: () -> None
post_endpoints = [
"/api/v1/users/me/presence",
]
for endpoint in post_endpoints:
result = self.client_post(endpoint, **self.api_auth('default-bot@zulip.com'))
self.assert_json_error(result, "This endpoint does not accept bot requests.")
patch_endpoints = [
"/api/v1/settings/display",
"/api/v1/settings/notifications",
"/api/v1/settings/ui",
]
for endpoint in patch_endpoints:
result = self.client_patch(endpoint, **self.api_auth('default-bot@zulip.com'))
self.assert_json_error(result, "This endpoint does not accept bot requests.")
class TestAuthenticatedJsonPostViewDecorator(ZulipTestCase):
def test_authenticated_json_post_view_if_everything_is_correct(self):
# type: () -> None
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm)
response = self._do_test(user_email)
self.assertEqual(response.status_code, 200)
def test_authenticated_json_post_view_if_subdomain_is_invalid(self):
# type: () -> None
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value=''):
self.assert_json_error_contains(self._do_test(user_email),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} attempted to access JSON API on wrong "
"subdomain {}".format(user_email, ''))
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
self.assert_json_error_contains(self._do_test(user_email),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} attempted to access JSON API on wrong "
"subdomain {}".format(user_email, 'acme'))
def test_authenticated_json_post_view_if_user_is_incoming_webhook(self):
# type: () -> None
user_email = 'webhook-bot@zulip.com'
user_realm = get_realm('zulip')
self._login(user_email, user_realm, password="test") # we set a password because user is a bot
self.assert_json_error_contains(self._do_test(user_email), "Webhook bots can only access webhooks")
def test_authenticated_json_post_view_if_user_is_not_active(self):
# type: () -> None
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self._login(user_email, user_realm, password="test")
# Get user_profile after _login so that we have the latest data.
user_profile = get_user(user_email, user_realm)
# we deactivate user manually because do_deactivate_user removes user session
user_profile.is_active = False
user_profile.save()
self.assert_json_error_contains(self._do_test(user_email), "Account not active")
do_reactivate_user(user_profile)
def test_authenticated_json_post_view_if_user_realm_is_deactivated(self):
# type: () -> None
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
user_profile = get_user(user_email, user_realm)
self._login(user_email, user_realm)
# we deactivate user's realm manually because do_deactivate_user removes user session
user_profile.realm.deactivated = True
user_profile.realm.save()
self.assert_json_error_contains(self._do_test(user_email), "Realm for account has been deactivated")
do_reactivate_realm(user_profile.realm)
def _do_test(self, user_email):
# type: (Text) -> HttpResponse
data = {"status": '"started"'}
return self.client_post(r'/json/tutorial_status', data)
def _login(self, user_email, user_realm, password=None):
# type: (Text, Realm, str) -> None
if password:
user_profile = get_user(user_email, user_realm)
user_profile.set_password(password)
user_profile.save()
self.login(user_email, password)
class TestAuthenticatedJsonViewDecorator(ZulipTestCase):
def test_authenticated_json_view_if_subdomain_is_invalid(self):
# type: () -> None
user_email = self.example_email("hamlet")
self.login(user_email)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value=''):
self.assert_json_error_contains(self._do_test(str(user_email)),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} attempted to access JSON API on wrong "
"subdomain {}".format(user_email, ''))
with mock.patch('logging.warning') as mock_warning, \
mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
self.assert_json_error_contains(self._do_test(str(user_email)),
"Account is not associated with this "
"subdomain")
mock_warning.assert_called_with(
"User {} attempted to access JSON API on wrong "
"subdomain {}".format(user_email, 'acme'))
def _do_test(self, user_email):
# type: (str) -> HttpResponse
data = {"status": '"started"'}
return self.client_post(r'/json/tutorial_status', data)
class TestZulipLoginRequiredDecorator(ZulipTestCase):
def test_zulip_login_required_if_subdomain_is_invalid(self):
# type: () -> None
user_email = self.example_email("hamlet")
self.login(user_email)
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
with mock.patch('zerver.decorator.get_subdomain', return_value='zulip'):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 200)
with mock.patch('zerver.decorator.get_subdomain', return_value=''):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
with mock.patch('zerver.decorator.get_subdomain', return_value='acme'):
result = self.client_get('/accounts/accept_terms/')
self.assertEqual(result.status_code, 302)
class TestRequireServerAdminDecorator(ZulipTestCase):
def test_require_server_admin_decorator(self):
# type: () -> None
user_email = self.example_email('hamlet')
user_realm = get_realm('zulip')
self.login(user_email)
result = self.client_get('/activity')
self.assertEqual(result.status_code, 302)
user_profile = get_user(user_email, user_realm)
user_profile.is_staff = True
user_profile.save()
result = self.client_get('/activity')
self.assertEqual(result.status_code, 200)
class ReturnSuccessOnHeadRequestDecorator(ZulipTestCase):
def test_return_success_on_head_request_returns_200_if_request_method_is_head(self):
# type: () -> None
class HeadRequest(object):
method = 'HEAD'
request = HeadRequest()
@return_success_on_head_request
def test_function(request):
# type: (HttpRequest) -> HttpResponse
return json_response(msg=u'from_test_function') # nocoverage. isn't meant to be called
response = test_function(request)
self.assert_json_success(response)
self.assertNotEqual(ujson.loads(response.content).get('msg'), u'from_test_function')
def test_return_success_on_head_request_returns_normal_response_if_request_method_is_not_head(self):
# type: () -> None
class HeadRequest(object):
method = 'POST'
request = HeadRequest()
@return_success_on_head_request
def test_function(request):
# type: (HttpRequest) -> HttpResponse
return json_response(msg=u'from_test_function')
response = test_function(request)
self.assertEqual(ujson.loads(response.content).get('msg'), u'from_test_function')
class RestAPITest(ZulipTestCase):
def test_method_not_allowed(self):
# type: () -> None
self.login(self.example_email("hamlet"))
result = self.client_patch('/json/users')
self.assertEqual(result.status_code, 405)
self.assert_in_response('Method Not Allowed', result)
def test_options_method(self):
# type: () -> None
self.login(self.example_email("hamlet"))
result = self.client_options('/json/users')
self.assertEqual(result.status_code, 204)
self.assertEqual(str(result['Allow']), 'GET, POST')
result = self.client_options('/json/streams/15')
self.assertEqual(result.status_code, 204)
self.assertEqual(str(result['Allow']), 'DELETE, PATCH')
def test_http_accept_redirect(self):
# type: () -> None
result = self.client_get('/json/users',
HTTP_ACCEPT='text/html')
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/login/?next=/json/users"))
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.talent_v4.services.tenant_service import pagers
from google.cloud.talent_v4.types import tenant
from google.cloud.talent_v4.types import tenant as gct_tenant
from google.cloud.talent_v4.types import tenant_service
from google.protobuf import field_mask_pb2 # type: ignore
from .transports.base import TenantServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import TenantServiceGrpcAsyncIOTransport
from .client import TenantServiceClient
class TenantServiceAsyncClient:
"""A service that handles tenant management, including CRUD and
enumeration.
"""
_client: TenantServiceClient
DEFAULT_ENDPOINT = TenantServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = TenantServiceClient.DEFAULT_MTLS_ENDPOINT
tenant_path = staticmethod(TenantServiceClient.tenant_path)
parse_tenant_path = staticmethod(TenantServiceClient.parse_tenant_path)
common_billing_account_path = staticmethod(
TenantServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
TenantServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(TenantServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(
TenantServiceClient.parse_common_folder_path
)
common_organization_path = staticmethod(
TenantServiceClient.common_organization_path
)
parse_common_organization_path = staticmethod(
TenantServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(TenantServiceClient.common_project_path)
parse_common_project_path = staticmethod(
TenantServiceClient.parse_common_project_path
)
common_location_path = staticmethod(TenantServiceClient.common_location_path)
parse_common_location_path = staticmethod(
TenantServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TenantServiceAsyncClient: The constructed client.
"""
return TenantServiceClient.from_service_account_info.__func__(TenantServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TenantServiceAsyncClient: The constructed client.
"""
return TenantServiceClient.from_service_account_file.__func__(TenantServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return TenantServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> TenantServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TenantServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(TenantServiceClient).get_transport_class, type(TenantServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, TenantServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the tenant service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.TenantServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = TenantServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_tenant(
self,
request: Union[tenant_service.CreateTenantRequest, dict] = None,
*,
parent: str = None,
tenant: gct_tenant.Tenant = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_tenant.Tenant:
r"""Creates a new tenant entity.
.. code-block:: python
from google.cloud import talent_v4
def sample_create_tenant():
# Create a client
client = talent_v4.TenantServiceClient()
# Initialize request argument(s)
tenant = talent_v4.Tenant()
tenant.external_id = "external_id_value"
request = talent_v4.CreateTenantRequest(
parent="parent_value",
tenant=tenant,
)
# Make the request
response = client.create_tenant(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4.types.CreateTenantRequest, dict]):
The request object. The Request of the CreateTenant
method.
parent (:class:`str`):
Required. Resource name of the project under which the
tenant is created.
The format is "projects/{project_id}", for example,
"projects/foo".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
tenant (:class:`google.cloud.talent_v4.types.Tenant`):
Required. The tenant to be created.
This corresponds to the ``tenant`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Tenant:
A Tenant resource represents a tenant
in the service. A tenant is a group or
entity that shares common access with
specific privileges for resources like
jobs. Customer may create multiple
tenants to provide data isolation for
different groups.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, tenant])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tenant_service.CreateTenantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if tenant is not None:
request.tenant = tenant
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_tenant,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def get_tenant(
self,
request: Union[tenant_service.GetTenantRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> tenant.Tenant:
r"""Retrieves specified tenant.
.. code-block:: python
from google.cloud import talent_v4
def sample_get_tenant():
# Create a client
client = talent_v4.TenantServiceClient()
# Initialize request argument(s)
request = talent_v4.GetTenantRequest(
name="name_value",
)
# Make the request
response = client.get_tenant(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4.types.GetTenantRequest, dict]):
The request object. Request for getting a tenant by
name.
name (:class:`str`):
Required. The resource name of the tenant to be
retrieved.
The format is
"projects/{project_id}/tenants/{tenant_id}", for
example, "projects/foo/tenants/bar".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Tenant:
A Tenant resource represents a tenant
in the service. A tenant is a group or
entity that shares common access with
specific privileges for resources like
jobs. Customer may create multiple
tenants to provide data isolation for
different groups.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tenant_service.GetTenantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def update_tenant(
self,
request: Union[tenant_service.UpdateTenantRequest, dict] = None,
*,
tenant: gct_tenant.Tenant = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_tenant.Tenant:
r"""Updates specified tenant.
.. code-block:: python
from google.cloud import talent_v4
def sample_update_tenant():
# Create a client
client = talent_v4.TenantServiceClient()
# Initialize request argument(s)
tenant = talent_v4.Tenant()
tenant.external_id = "external_id_value"
request = talent_v4.UpdateTenantRequest(
tenant=tenant,
)
# Make the request
response = client.update_tenant(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4.types.UpdateTenantRequest, dict]):
The request object. Request for updating a specified
tenant.
tenant (:class:`google.cloud.talent_v4.types.Tenant`):
Required. The tenant resource to
replace the current resource in the
system.
This corresponds to the ``tenant`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Strongly recommended for the best service experience.
If
[update_mask][google.cloud.talent.v4.UpdateTenantRequest.update_mask]
is provided, only the specified fields in
[tenant][google.cloud.talent.v4.UpdateTenantRequest.tenant]
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated.
Only top level fields of
[Tenant][google.cloud.talent.v4.Tenant] are supported.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.types.Tenant:
A Tenant resource represents a tenant
in the service. A tenant is a group or
entity that shares common access with
specific privileges for resources like
jobs. Customer may create multiple
tenants to provide data isolation for
different groups.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([tenant, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tenant_service.UpdateTenantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if tenant is not None:
request.tenant = tenant
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_tenant,
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("tenant.name", request.tenant.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def delete_tenant(
self,
request: Union[tenant_service.DeleteTenantRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes specified tenant.
.. code-block:: python
from google.cloud import talent_v4
def sample_delete_tenant():
# Create a client
client = talent_v4.TenantServiceClient()
# Initialize request argument(s)
request = talent_v4.DeleteTenantRequest(
name="name_value",
)
# Make the request
client.delete_tenant(request=request)
Args:
request (Union[google.cloud.talent_v4.types.DeleteTenantRequest, dict]):
The request object. Request to delete a tenant.
name (:class:`str`):
Required. The resource name of the tenant to be deleted.
The format is
"projects/{project_id}/tenants/{tenant_id}", for
example, "projects/foo/tenants/bar".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tenant_service.DeleteTenantRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_tenant,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
await rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
async def list_tenants(
self,
request: Union[tenant_service.ListTenantsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTenantsAsyncPager:
r"""Lists all tenants associated with the project.
.. code-block:: python
from google.cloud import talent_v4
def sample_list_tenants():
# Create a client
client = talent_v4.TenantServiceClient()
# Initialize request argument(s)
request = talent_v4.ListTenantsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_tenants(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.talent_v4.types.ListTenantsRequest, dict]):
The request object. List tenants for which the client
has ACL visibility.
parent (:class:`str`):
Required. Resource name of the project under which the
tenant is created.
The format is "projects/{project_id}", for example,
"projects/foo".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4.services.tenant_service.pagers.ListTenantsAsyncPager:
The List tenants response object.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = tenant_service.ListTenantsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_tenants,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=30.0,
),
default_timeout=30.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListTenantsAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TenantServiceAsyncClient",)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = tf_ans.eval()
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = sess.run(tf_ans)
result_64 = sess.run(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = tf_ans.eval()
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
def testBool(self):
self._testAll(np.random.choice((False, True), size=(2,)))
self._testAll(np.random.choice((False, True), size=(2, 3)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11)))
self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13)))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.test_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, num_elements.eval())
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.test_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, num_elements.eval())
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
self._compareExpandDimsAll(choice([2]), 0)
self._compareExpandDimsAll(choice([2]), 1)
self._compareExpandDimsAll(choice([2]), -1)
self._compareExpandDimsAll(choice([2, 3]), 0)
self._compareExpandDimsAll(choice([2, 3]), 1)
self._compareExpandDimsAll(choice([2, 3]), 2)
self._compareExpandDimsAll(choice([2, 3]), -1)
self._compareExpandDimsAll(choice([2, 3]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), 0)
self._compareExpandDimsAll(choice([2, 3, 5]), 1)
self._compareExpandDimsAll(choice([2, 3, 5]), 2)
self._compareExpandDimsAll(choice([2, 3, 5]), 3)
self._compareExpandDimsAll(choice([2, 3, 5]), -1)
self._compareExpandDimsAll(choice([2, 3, 5]), -2)
self._compareExpandDimsAll(choice([2, 3, 5]), -3)
self._compareExpandDimsAll(choice([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.test_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
self.assertRaises(ValueError, array_ops.expand_dims,
[False, True, True], 4)
def testExpandDimsGradient(self):
with self.test_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
inp = constant_op.constant(True)
self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval())
def testExpandDimsDimType(self):
for dtype in [dtypes.int32, dtypes.int64]:
x = np.zeros([2])
np_ans = np.expand_dims(x, axis=0)
with self.test_session(use_gpu=True):
tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype))
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Nothing to squeeze.
self._compareSqueezeAll(choice([2]))
self._compareSqueezeAll(choice([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(choice([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeSpecificDimensionBool(self):
choice = lambda s: np.random.choice((False, True), size=s)
# Positive squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeAllOnesBool(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.squeeze([[[False]]], [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
def testSqueezeWithUnknownShape(self):
with self.test_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
# multiples could be int32 or int64
for dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype))
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.test_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = tiled.eval()
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.test_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = tiled.eval()
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.test_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, rank, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of given rank
input_shape = np.random.randint(1, 4, size=rank)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=rank).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = tiled.eval()
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
# test low rank, like 5
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(5, use_gpu=True)
# test high rank, like 10
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(10, use_gpu=True)
def testGradientSimpleReduction(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReduction(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
def testGradientStridedReductionGC(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testGradientWithSparseGradWithRank1(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
outputs = array_ops.gather(array_ops.tile(inputs, [3]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testGradientWithSparseGradWithRank3(self):
inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0],
dtype=dtypes.float32)
inputs = array_ops.reshape(inputs, [-1, 1, 1])
outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]),
[1, 5, 9, 3, 7, 2, 2, 2])
with self.test_session():
error = gradient_checker.compute_gradient_error(
inputs, inputs.get_shape().as_list(),
outputs, outputs.get_shape().as_list())
self.assertLess(error, 1e-4)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
if __name__ == "__main__":
test.main()
| |
from __future__ import division, absolute_import, print_function
r''' Test the .npy file format.
Set up:
>>> import sys
>>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
... np.uint8,
... np.int8,
... np.uint16,
... np.int16,
... np.uint32,
... np.int32,
... np.uint64,
... np.int64,
... np.float32,
... np.float64,
... np.complex64,
... np.complex128,
... object,
... ]
>>>
>>> basic_arrays = []
>>>
>>> for scalar in scalars:
... for endian in '<>':
... dtype = np.dtype(scalar).newbyteorder(endian)
... basic = np.arange(15).astype(dtype)
... basic_arrays.extend([
... np.array([], dtype=dtype),
... np.array(10, dtype=dtype),
... basic,
... basic.reshape((3,5)),
... basic.reshape((3,5)).T,
... basic.reshape((3,5))[::-1,::2],
... ])
...
>>>
>>> Pdescr = [
... ('x', 'i4', (2,)),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> PbufferT = [
... ([3,2], [[6.,4.],[6.,4.]], 8),
... ([4,3], [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> Ndescr = [
... ('x', 'i4', (2,)),
... ('Info', [
... ('value', 'c16'),
... ('y2', 'f8'),
... ('Info2', [
... ('name', 'S2'),
... ('value', 'c16', (2,)),
... ('y3', 'f8', (2,)),
... ('z3', 'u4', (2,))]),
... ('name', 'S2'),
... ('z2', 'b1')]),
... ('color', 'S2'),
... ('info', [
... ('Name', 'U8'),
... ('Value', 'c16')]),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> NbufferT = [
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> record_arrays = [
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
... ]
Test the magic string writing.
>>> format.magic(1, 0)
'\x93NUMPY\x01\x00'
>>> format.magic(0, 0)
'\x93NUMPY\x00\x00'
>>> format.magic(255, 255)
'\x93NUMPY\xff\xff'
>>> format.magic(2, 5)
'\x93NUMPY\x02\x05'
Test the magic string reading.
>>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
>>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
>>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
>>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print repr(f.getvalue())
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
import sys
import os
import shutil
import tempfile
import warnings
from io import BytesIO
import numpy as np
from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
dec, SkipTest
)
from numpy.lib import format
tempdir = None
# Module-level setup.
def setup_module():
global tempdir
tempdir = tempfile.mkdtemp()
def teardown_module():
global tempdir
if tempdir is not None and os.path.isdir(tempdir):
shutil.rmtree(tempdir)
tempdir = None
# Generate some basic arrays to test with.
scalars = [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
object,
]
basic_arrays = []
for scalar in scalars:
for endian in '<>':
dtype = np.dtype(scalar).newbyteorder(endian)
basic = np.arange(1500).astype(dtype)
basic_arrays.extend([
# Empty
np.array([], dtype=dtype),
# Rank-0
np.array(10, dtype=dtype),
# 1-D
basic,
# 2-D C-contiguous
basic.reshape((30, 50)),
# 2-D F-contiguous
basic.reshape((30, 50)).T,
# 2-D non-contiguous
basic.reshape((30, 50))[::-1, ::2],
])
# More complicated record arrays.
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
]
record_arrays = [
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
]
#BytesIO that reads a random number of bytes at a time
class BytesIOSRandomSize(BytesIO):
def read(self, size=None):
import random
size = random.randint(1, size)
return super(BytesIOSRandomSize, self).read(size)
def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_randsize(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIOSRandomSize(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_truncated(arr):
f = BytesIO()
format.write_array(f, arr)
#BytesIO is one byte short
f2 = BytesIO(f.getvalue()[0:-1])
arr2 = format.read_array(f2)
return arr2
def assert_equal_(o1, o2):
assert_(o1 == o2)
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
yield assert_raises, ValueError, roundtrip_truncated, arr
def test_long_str():
# check items larger than internal buffer size, gh-4027
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
long_str_arr2 = roundtrip(long_str_arr)
assert_array_equal(long_str_arr, long_str_arr2)
@dec.slow
def test_memmap_roundtrip():
# Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
# Skip these since they can't be mmap'ed.
continue
# Write it out normally and through mmap.
nfn = os.path.join(tempdir, 'normal.npy')
mfn = os.path.join(tempdir, 'memmap.npy')
fp = open(nfn, 'wb')
try:
format.write_array(fp, arr)
finally:
fp.close()
fortran_order = (
arr.flags.f_contiguous and not arr.flags.c_contiguous)
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
shape=arr.shape, fortran_order=fortran_order)
ma[...] = arr
del ma
# Check that both of these files' contents are the same.
fp = open(nfn, 'rb')
normal_bytes = fp.read()
fp.close()
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
yield assert_equal_, normal_bytes, memmap_bytes
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
del ma
def test_compressed_roundtrip():
arr = np.random.rand(200, 200)
npz_file = os.path.join(tempdir, 'compressed.npz')
np.savez_compressed(npz_file, arr=arr)
arr1 = np.load(npz_file)['arr']
assert_array_equal(arr, arr1)
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
else:
fname = 'python3.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
data_dir = os.path.join(os.path.dirname(__file__), 'data')
if sys.version_info[0] >= 3:
xrange = range
else:
import __builtin__
xrange = __builtin__.xrange
expected = np.array([None, xrange, sixu('\u512a\u826f'),
asbytes('\xe4\xb8\x8d\xe8\x89\xaf')],
dtype=object)
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
'py3-objarr.npy', 'py3-objarr.npz']:
path = os.path.join(data_dir, fname)
if (fname.endswith('.npz') and sys.version_info[0] == 2 and
sys.version_info[1] < 7):
# Reading object arrays directly from zipfile appears to fail
# on Py2.6, see cfae0143b4
continue
for encoding in ['bytes', 'latin1']:
if (sys.version_info[0] >= 3 and sys.version_info[1] < 4 and
encoding == 'bytes'):
# The bytes encoding is available starting from Python 3.4
continue
data_f = np.load(path, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
else:
data = data_f
if sys.version_info[0] >= 3:
if encoding == 'latin1' and fname.startswith('py2'):
assert_(isinstance(data[3], str))
assert_array_equal(data[:-1], expected[:-1])
# mojibake occurs
assert_array_equal(data[-1].encode(encoding), expected[-1])
else:
assert_(isinstance(data[3], bytes))
assert_array_equal(data, expected)
else:
assert_array_equal(data, expected)
if sys.version_info[0] >= 3:
if fname.startswith('py2'):
if fname.endswith('.npz'):
data = np.load(path)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
data = np.load(path, fix_imports=False, encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
assert_raises(UnicodeError, np.load, path)
assert_raises(ImportError, np.load, path,
encoding='latin1', fix_imports=False)
def test_pickle_disallow():
data_dir = os.path.join(os.path.dirname(__file__), 'data')
path = os.path.join(data_dir, 'py2-objarr.npy')
assert_raises(ValueError, np.load, path,
allow_pickle=False, encoding='latin1')
path = os.path.join(data_dir, 'py2-objarr.npz')
f = np.load(path, allow_pickle=False, encoding='latin1')
assert_raises(ValueError, f.__getitem__, 'x')
path = os.path.join(tempdir, 'pickle-disabled.npy')
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
format.write_array(f, d, version=(2, 0))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
format.write_array(f, d)
assert_(w[0].category is UserWarning)
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.write_array, f, d, (1, 0))
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
tf = tempfile.mktemp('', 'mmap', dir=tempdir)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(1, 0))
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(2, 0))
ma[...] = d
del ma
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=None)
assert_(w[0].category is UserWarning)
ma[...] = d
del ma
ma = format.open_memmap(tf, mode='r')
assert_array_equal(ma, d)
def test_write_version():
f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
format.write_array(f, arr)
format.write_array(f, arr, version=None)
format.write_array(f, arr)
format.write_array(f, arr, version=(2, 0))
format.write_array(f, arr)
# These should all fail.
bad_versions = [
(1, 1),
(0, 0),
(0, 1),
(2, 2),
(255, 255),
]
for version in bad_versions:
try:
format.write_array(f, arr, version=version)
except ValueError:
pass
else:
raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = asbytes_nested([
'\x93NUMPY\x01\x01',
'\x93NUMPY\x00\x00',
'\x93NUMPY\x00\x01',
'\x93NUMPY\x02\x00',
'\x93NUMPY\x02\x02',
'\x93NUMPY\xff\xff',
])
malformed_magic = asbytes_nested([
'\x92NUMPY\x01\x00',
'\x00NUMPY\x01\x00',
'\x93numpy\x01\x00',
'\x93MATLB\x01\x00',
'\x93NUMPY\x01',
'\x93NUMPY',
'',
])
def test_read_magic():
s1 = BytesIO()
s2 = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s1, arr, version=(1, 0))
format.write_array(s2, arr, version=(2, 0))
s1.seek(0)
s2.seek(0)
version1 = format.read_magic(s1)
version2 = format.read_magic(s2)
assert_(version1 == (1, 0))
assert_(version2 == (2, 0))
assert_(s1.tell() == format.MAGIC_LEN)
assert_(s2.tell() == format.MAGIC_LEN)
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_magic), f
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_array), f
def test_bad_magic_args():
assert_raises(ValueError, format.magic, -1, 1)
assert_raises(ValueError, format.magic, 256, 1)
assert_raises(ValueError, format.magic, 1, -1)
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
s = BytesIO()
d = {'a': 1, 'b': 2}
format.write_array_header_1_0(s, d)
s = BytesIO()
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_read_array_header_1_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(1, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_read_array_header_2_0():
s = BytesIO()
arr = np.ones((3, 6), dtype=float)
format.write_array(s, arr, version=(2, 0))
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
def test_large_file_support():
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
raise SkipTest("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
# seek past end would work too, but linux truncate somewhat
# increases the chances that we have a sparse filesystem and can
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
d = np.arange(5)
np.save(f, d)
# read it back
with open(tf_name, "rb") as f:
f.seek(5368709120)
r = np.load(f)
assert_array_equal(r, d)
if __name__ == "__main__":
run_module_suite()
| |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs
import collections
import re
import string
import sys
from fontTools import ttLib
from fontTools.pens.boundsPen import BoundsPen
from os import path
from nototools import font_data
from nototools import tool_utils
from nototools import unicode_data
from nototools.py23 import basestring
from nototools.py23 import unichr
"""Generate html comparison of codepoints in various fonts."""
_HTML_HEADER_TEMPLATE = """<!DOCTYPE html>
<html lang='en'>
<head>
<meta charset="utf-8">
<title>$title</title>
<style>
$styles
</style>
<style>
table { background-color: #eee; font-size: 20pt; text-align: center }
tr.head { font-weight: bold; font-size: 12pt;
border-style: solid; border-width: 1px; border-color: black;
border-collapse: separate }
td:nth-of-type(1), td:nth-last-of-type(3) { font-size: 12pt; text-align:left }
$mstyles
td:nth-last-of-type(1), td:nth-last-of-type(2) {
font-size: 10pt; text-align:left; max-width: 20em }
.key { background-color: white; font-size: 12pt; border-collapse: separate;
margin-top: 0; border-spacing: 10px 0; text-align: left }
.line { font-size: 20pt; word-break: break-all }
h3 { -webkit-margin-before: 1.75em; -webkit-margin-after: .25em }
.ctx { font-family: $contextfont }
</style>
</head>
<body>
<h3>$title</h3>
"""
_METRICS_STYLES = (
", ".join("td:nth-last-of-type(%d)" % i for i in range(4, 9))
+ " { font-size: 10pt; text-align:right; font-family: sansserif }"
)
# hardcoded for now, this assumes 'noto' is one of the defined font names
_CONTEXT_FONT = "noto"
_HTML_FOOTER = """
</body>
</html>
"""
def _cleanlines(textfile):
"""Strip comments and blank lines from textfile, return list of lines."""
result = []
with open(textfile, "r") as f:
for line in f:
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if line:
result.append(line)
return result
class CodeList(object):
"""An ordered list of code points (ints). These might map to other (PUA) code
points that the font knows how to display."""
@staticmethod
def fromfile(filename):
if filename.endswith("_codes.txt"):
return CodeList.frompairfile(filename)
elif filename.endswith("_cmap.txt"):
return CodeList.fromrangefile(filename)
elif filename.endswith(".ttf") or filename.endswith(".otf"):
return CodeList.fromfontcmap(filename)
else:
raise Exception(
"unrecognized file type %s for CodeList.fromfile" % filename
)
@staticmethod
def fromspec(spec):
codelist_type, text = [t.strip() for t in spec.split(":")]
return CodeList.fromtext(text, codelist_type)
@staticmethod
def fromtext(text, codelist_type):
if codelist_type == "cmap":
return CodeList.fromrangetext(text)
elif codelist_type == "codes":
return CodeList.frompairtext(text)
elif codelist_type == "list":
return CodeList.fromlisttext(text)
else:
raise Exception('unknown codelist type "%s"' % codelist_type)
@staticmethod
def fromfontcmap(fontname):
font = ttLib.TTFont(fontname)
return CodeList.fromset(font_data.get_cmap(font))
@staticmethod
def fromset(cpset):
return UnicodeCodeList(cpset)
@staticmethod
def fromrangetext(cpranges):
return CodeList.fromset(
tool_utils.parse_int_ranges(cpranges, allow_compressed=True)
)
@staticmethod
def fromrangefile(cprange_file):
with open(cprange_file, "r") as f:
return CodeList.fromrangetext(f.read())
@staticmethod
def fromlist(cplist):
return OrderedCodeList(cplist)
@staticmethod
def fromlisttext(cplist):
codes = tool_utils.parse_int_ranges(
cplist, allow_duplicates=True, return_set=False, allow_compressed=True
)
return CodeList.fromlist(codes)
@staticmethod
def fromlistfile(cplist_file):
return CodeList.fromlisttext(_cleanlines(cplist_file))
@staticmethod
def frompairs(cppairs):
return MappedCodeList(cppairs)
@staticmethod
def frompairtext(cppairs_text):
# if no pairs, will treat as listtext. cppairs must have only one item
# or pair per line, however.
pair_list = None
single_list = []
for line in cppairs_text.splitlines():
parts = [int(s, 16) for s in line.split(";")]
if pair_list:
if len(parts) < 2:
parts.append(parts[0])
pair_list.append(tuple(parts)[:2])
elif len(parts) > 1:
pair_list = [(cp, cp) for cp in single_list]
pair_list.append(tuple(parts[:2]))
else:
single_list.append(parts[0])
if pair_list:
return CodeList.frompairs(pair_list)
return CodeList.fromlist(single_list)
@staticmethod
def frompairfile(cppairs_file):
return CodeList.frompairtext("\n".join(_cleanlines(cppairs_file)))
def contains(self, cp):
"""Returns True if cp is in the code list."""
raise NotImplementedError
def codes(self):
"""Returns the codes in preferred order."""
raise NotImplementedError
def codeset(self):
"""Returns the frozenset of codes."""
raise NotImplementedError
def mapped_code(self, cp):
"""Returns the mapped code for this code point."""
raise NotImplementedError
class UnicodeCodeList(CodeList):
"""A codelist based on unicode code point order with no mapping."""
def __init__(self, codeset):
super(CodeList, self).__init__()
self._codeset = frozenset(codeset)
def contains(self, cp):
return cp in self._codeset
def codes(self):
return sorted(self._codeset)
def codeset(self):
return self._codeset
def mapped_code(self, cp):
return cp if cp in self._codeset else None
class MappedCodeList(CodeList):
def __init__(self, codepairs):
super(MappedCodeList, self).__init__()
# hack, TODO: change the column order in the input files
self._codemap = {v: k for k, v in codepairs}
self._codes = tuple(p[1] for p in codepairs)
def contains(self, cp):
return cp in self._codemap
def codes(self):
return self._codes
def codeset(self):
return frozenset(self._codes)
def mapped_code(self, cp):
return self._codemap.get(cp)
class OrderedCodeList(CodeList):
def __init__(self, codes):
super(OrderedCodeList, self).__init__()
self._codes = tuple(codes)
self._codeset = frozenset(codes)
def contains(self, cp):
return cp in self._codeset
def codes(self):
return self._codes
def codeset(self):
return self._codeset
def mapped_code(self, cp):
return cp if cp in self._codeset else None
def _load_codelist(codelist_spec, data_dir, codelistfile_map):
for codelist_type in ["file", "cmap", "codes", "list", None]:
if codelist_type and codelist_spec.startswith(codelist_type + ":"):
codelist_spec = codelist_spec[len(codelist_type) + 1 :].strip()
break
if not codelist_type:
if codelist_spec.endswith(".txt"):
codelist_type = "file"
else:
raise Exception(
'cannot determine type of codelist spec "%s"' % codelist_spec
)
if codelist_type != "file":
codelist = CodeList.fromtext(codelist_spec, codelist_type)
else:
fullpath = path.join(data_dir, codelist_spec)
if not path.isfile(fullpath):
raise Exception('codelist file "%s" not found' % codelist_spec)
codelist = codelistfile_map.get(fullpath)
if codelist is None:
codelist = CodeList.fromfile(fullpath)
codelistfile_map[codelist_spec] = codelist
return codelist
class SequenceList(object):
"""A list of strings generated by a spec."""
def __init__(self, codelists, suffix):
self.codelists = codelists
self.suffix = suffix
def codes(self):
codes = set()
for codelist in self.codelists:
codes |= codelist.codeset()
codes |= set(ord(cp) for cp in self.suffix)
return codes
def __iter__(self):
for codelist in self.codelists:
chars = [unichr(cp) for cp in codelist.codes()]
yield self.suffix.join(chars) + self.suffix
class Target(object):
"""A named collection of data that renders to html or text."""
@staticmethod
def from_table_data(name, codelist, used_fonts):
return CodeTableTarget(name, codelist, used_fonts)
@staticmethod
def from_sequence_data(name, codelists, suffix, font):
sequencelist = SequenceList(codelists, suffix)
return SequenceListTarget(name, sequencelist, font)
def __init__(self, name):
self.name = name
def name(self):
return self.name
def codes(self):
"""Returns the set of codepoints used in this target."""
raise NotImplementedError
def generate_text(self, metrics, flag_sets):
raise NotImplementedError
def generate_html(self, tindex, context, metrics, flag_sets, cp_to_targets):
raise NotImplementedError
class SequenceListTarget(Target):
def __init__(self, name, sequencelist, used_font):
super(SequenceListTarget, self).__init__(name)
self.sequencelist = sequencelist
self.used_font = used_font
def codes(self):
return self.sequencelist.codes()
def generate_text(self, metrics, flag_sets):
raise NotImplementedError
def generate_html(self, tindex, context, metrics, flag_sets, cp_to_targets):
lines = ['<h3 id="target_%d">%s</h3>' % (tindex, self.name)]
lines.append('<div class="%s line">' % self.used_font[0])
for seq in self.sequencelist:
lines.append(seq + "<br/>")
lines.append("</div>")
return "\n".join(lines)
class CodeTableTarget(Target):
def __init__(self, name, codelist, used_fonts):
super(CodeTableTarget, self).__init__(name)
self.codelist = codelist
self.used_fonts = used_fonts
def codes(self):
return self.codelist.codes()
def generate_text(self, metrics, flag_sets):
lines = [self.name]
header = ["idx code"]
header.extend(f[0] for f in self.used_fonts)
header.append("age name")
lines.append(" ".join(header))
for index, cp in enumerate(self.codelist.codes()):
line = ["%3d" % index]
line.append("%5s" % ("%04x" % cp))
for rkey, keyinfos in self.used_fonts:
match = any(codelist.contains(cp) for _, _, codelist in keyinfos)
line.append(rkey if match else ("-" * len(rkey)))
line.append(unicode_data.age(cp))
line.append(_flagged_name(cp, flag_sets))
lines.append(" ".join(line))
return "\n".join(lines)
def generate_html(self, tindex, context, metrics, flag_sets, cp_to_targets):
dump_metrics = False
if dump_metrics:
print("$ %s" % self.name)
def context_string(codelist, cp):
cps = unichr(codelist.mapped_code(cp))
return (context % cps) if context else cps
def _target_line(cp, tindex, tinfo):
info = []
for ix, name in tinfo:
if ix == tindex:
continue
info.append('<a href="#target_%d">%s</a>' % (ix, name))
if not info:
return "(no group)"
return "; ".join(info)
def _generate_header():
header_parts = ['<tr class="head"><th>CP']
for key, _ in self.used_fonts:
header_parts.append("<th>" + key)
if metrics is not None:
header_parts.append("<th>lsb<th>mid<th>rsb<th>wid<th>cy")
header_parts.append("<th>Age<th>Name")
return "".join(header_parts)
if metrics is not None:
# the metrics apply to the rightmost font
fontname = self.used_fonts[-1][1][0][0]
if fontname:
metrics_font = _get_font(fontname)
else:
metrics_font = None
sys.stderr.write("no metrics font\n")
lines = ['<h3 id="target_%d">%s</h3>' % (tindex, self.name)]
char_line = _character_string_html(self.codelist, self.used_fonts[-1])
if char_line:
lines.append(char_line)
lines.append("<table>")
header = _generate_header()
linecount = 0
for cp in self.codelist.codes():
if linecount % 20 == 0:
lines.append(header)
linecount += 1
line = ["<tr>"]
line.append("<td>U+%04x" % cp)
for rkey, keyinfos in self.used_fonts:
cell_class = None
cell_text = None
index = 0
for font, _, rcodelist in keyinfos:
if rcodelist.contains(cp):
if len(keyinfos) > 1:
cell_class = "%s_%d" % (rkey, index)
else:
cell_class = rkey
cell_class = replace_nonalpha(cell_class)
if font:
cell_text = context_string(rcodelist, cp)
else:
cell_text = " * "
cell_class += " star"
break
index += 1
if cell_class:
line.append('<td class="%s">%s' % (cell_class, cell_text))
else:
line.append("<td> ")
name = _flagged_name(cp, flag_sets)
if metrics is not None:
cp_metrics = _get_cp_metrics(metrics_font, cp) if metrics_font else None
if cp_metrics:
lsb, rsb, wid, adv, cy = cp_metrics
if dump_metrics:
print("%04x # %4d, %4d, %4d, %s" % (cp, lsb, adv, cy, name))
if cp in metrics:
nlsb, nadv, ncy = metrics[cp]
else:
nlsb, nadv, ncy = lsb, adv, cy
nrsb = nadv - wid - nlsb
line.append(
"<td>%d%s"
% (lsb, "→<b>%d</b>" % nlsb if lsb != nlsb else "")
)
line.append("<td>%d" % wid)
line.append(
"<td>%d%s"
% (rsb, "→<b>%d</b>" % nrsb if rsb != nrsb else "")
)
line.append(
"<td>%d%s"
% (adv, "→<b>%d</b>" % nadv if adv != nadv else "")
)
line.append(
"<td>%d%s" % (cy, "→<b>%d</b>" % ncy if cy != ncy else "")
)
else:
line.append("<td><td><td><td><td>")
line.append("<td>%s" % unicode_data.age(cp))
line.append("<td>%s" % name)
line.append("<td>%s" % _target_line(cp, tindex, cp_to_targets.get(cp)))
lines.append("".join(line))
lines.append("</table>")
return "\n".join(lines)
def _load_fonts(data_list, data_dir, codelist_map):
"""data_list is a list of tuples of two to four items. The first item is
the key, the second is the name of the font file in data_dir. The
second can be None, otherwise it must exist. The third item, if
present, is the name to use for the font, otherwise it will be read
from the font, it must be present where there is no font. The
fourth item, if present, is the name of a codelist file, it must be present
where there is no font. If present and None, the the unicode cmap from the
font is used. otherwise the font file name is stripped of its extension and
try to find a file from which to create a codelist.
Multiple tuples can share the same key, these form one column and the order
of the files composing the tuple defines the order in which they are searched
for a glyph.
Returns a list of tuples of key, keyinfo, where keyinfo is
a list of tuples of filepath, name, codelist."""
def _load_font(data, codelist_map):
if len(data) < 4:
data = data + tuple([None] * (4 - len(data)))
key, fname, name, codelistfile = data
if not fname:
if not name:
raise Exception("must have name if no font provided")
if not codelistfile:
raise Exception("must have codelist file if no font provided")
fontpath = None
else:
fontpath = path.join(data_dir, fname)
if not path.isfile(fontpath):
raise Exception('font "%s" not found' % fontpath)
if codelistfile:
codelist = _load_codelist(codelistfile, data_dir, codelist_map)
if fname and (not codelistfile or not name):
font = ttLib.TTFont(fontpath)
if not name:
names = font_data.get_name_records(font)
name = names[16] if 16 in names else names[1] if 1 in names else None
if not name:
raise Exception('cannot read name from font "%s"' % fontpath)
if not codelistfile:
codelist = CodeList.fromset(font_data.get_cmap(font))
return key, fontpath, name, codelist
# group by key
keyorder = []
keyinfo = collections.defaultdict(list)
for data in data_list:
key, fontpath, name, codelist = _load_font(data, codelist_map)
if key not in keyinfo:
keyorder.append(key)
keyinfo[key].append((fontpath, name, codelist))
return [(key, keyinfo[key]) for key in keyorder]
def _select_used_fonts(codelist, fonts, prefer_fonts, omit_fonts):
"""Return the fonts we want to use to display the codelist, in order.
If not None, prefer_fonts is a key or list of keys for fonts to order
at the end. If not None, omit_fonts is key or list of keys to omit
even if they would otherwise be used by default, however prefer_fonts
takes precedence over omit_fonts if the same key is in both."""
if prefer_fonts is not None:
if isinstance(prefer_fonts, basestring):
prefer_fonts = [prefer_fonts]
preferred = [None] * len(prefer_fonts)
else:
prefer_fonts = []
preferred = []
if omit_fonts is not None:
if "_all_" in omit_fonts:
omit_fonts = [k for k, _ in fonts]
else:
omit_fonts = [omit_fonts]
if prefer_fonts:
omit_fonts = [k for k in omit_fonts if k not in prefer_fonts]
else:
omit_fonts = []
regular = []
codes = codelist.codes()
for f in fonts:
key, keyinfo = f
if key in omit_fonts:
continue
for name, _, cl in keyinfo:
if any(cl.contains(cp) for cp in codes):
is_preferred = False
for i, k in enumerate(prefer_fonts):
if key == k:
preferred[i] = f
is_preferred = True
break
if not is_preferred:
regular.append(f)
break
return tuple(regular + filter(None, preferred))
def _load_targets(target_data, fonts, data_dir, codelist_map):
"""Target data is a list of tuples of target names, codelist files, an
optional preferred font key or list of keys, and an optional omitted font
key or list of keys. All files should be in data_dir. Codelist_map is a
cache in case the codelist file has already been read. Returns a list of
tuples of target name, codelist, and fontlist."""
def _create_suffix(charlist):
return charlist.decode("unicode-escape")
def _select_font(fonts, font_id):
for f in fonts:
if f[0] == font_id:
return f
raise Exception('no font with id "%s"' % font_id)
result = []
for target in target_data:
target_type, name, codelist_spec = target[:3]
if target_type == "table":
codelist = _load_codelist(codelist_spec, data_dir, codelist_map)
prefer_fonts = target[3] if len(target) > 3 else None
omit_fonts = target[4] if len(target) > 4 else None
used_fonts = _select_used_fonts(codelist, fonts, prefer_fonts, omit_fonts)
if not used_fonts:
raise Exception("no fonts used by target %s" % name)
result.append(Target.from_table_data(name, codelist, used_fonts))
elif target_type == "sequence":
if len(target) < 5:
raise Exception("sequence target too short")
lists = codelist_spec.split(",")
codelists = [CodeList.fromlisttext(cl) for cl in lists]
suffix = _create_suffix(target[3])
font_tuple = _select_font(fonts, target[4])
result.append(
Target.from_sequence_data(name, codelists, suffix, font_tuple)
)
return tuple(result)
def _create_codeset_from_expr(expr_list, flag_sets, data_dir, codelist_map):
"""Processes expr_list in order, building a codeset.
See _read_flag_data_from_file for information on expr_list.
This can modify flag_sets and codelist_map."""
result = ()
for op, exp in expr_list:
if exp not in flag_sets:
# its a codelist
codes = _load_codelist(exp, data_dir, codelist_map).codeset()
else:
codes_or_spec = flag_sets[exp]
if isinstance(codes_or_spec, (set, frozenset)):
codes = codes_or_spec
else:
# replace the spec with the actual codes
if codes_or_spec is None:
# we only know about '_emoji_' and '_math_'
if exp == "_emoji_":
codes = (
unicode_data.get_emoji()
- unicode_data.get_unicode_emoji_variants("proposed_extra")
)
elif exp == "_math_":
codes = unicode_data.chars_with_property("Math")
else:
raise Exception('unknown special codeset "%s"' % exp)
else:
codes = _load_codelist(
codes_or_spec, data_dir, codelist_map
).codeset()
flag_sets[exp] = codes
if op == "|":
if not result:
# it appers that python 'optimizes' |= by replacing the lhs by rhs if
# lhs is an empty set, but this changes the type of lhs to frozenset...
result = set(codes)
else:
result |= codes
elif op == "&":
result &= codes
elif op == "-":
result -= codes
else:
raise Exception('unknown op "%s"' % op)
return result
def _load_flags(flag_data, data_dir, codelist_map):
"""Flag data is a list of tuples of defined sets or flags and expressions, see
_read_flag_data_from_file for more info.
This returns a map from set name to a tuple of (cp_set, bool) where True
means the flag is set for a cp if it is in the cp_set, and false means the
flag is set if the cp is not in the cp_set.
This can fail since the code processing the flag_data does not actually try
to load the codelists."""
flag_sets = {}
flag_map = {}
for flag_info in flag_data:
t0, t1, t2 = flag_info
if t0 == "!define":
set_name = t1
if set_name in ["_emoji_", "_math_"]:
set_codes = None # gets created by _create_codeset_from_expr
else:
set_codes = _load_codelist(t2, data_dir, codelist_map).codeset()
flag_sets[set_name] = set_codes
else:
flag_name = t0
flag_in = t1
flag_set = _create_codeset_from_expr(t2, flag_sets, data_dir, codelist_map)
flag_map[flag_name] = (flag_set, flag_in)
return flag_map
def _load_fonts_targets_flags(font_data, target_data, flag_data, data_dir):
# we cache the codelists to avoid building them twice if they're referenced by
# both fonts and targets, not a big deal but...
codelist_map = {}
fonts = _load_fonts(font_data, data_dir, codelist_map)
targets = _load_targets(target_data, fonts, data_dir, codelist_map)
flags = _load_flags(flag_data, data_dir, codelist_map)
return fonts, targets, flags
def strip_comments_from_file(filename):
with open(filename, "r") as f:
for line in f:
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
yield line
def _read_font_data_from_file(filename):
font_data = []
for line in strip_comments_from_file(filename):
info = line.split(";")
while len(info) < 4:
info.append(None)
font_data.append(tuple(info))
return font_data
def _read_target_data_from_file(filename):
"""Target data uses # to indicate a comment to end of line.
Comments are stripped, then an empty or blank line is ignored.
Targets are either tables or sequences, the default
is a table.
Each line in a table target defines a tuple of four values:
target name, codelist, preferred font ids, and omitted font
ids. Each line in a sequence target defines a tuple of
four values: target name, codelist, suffix, and font id.
A line can also start with one of tree directives,
!define, !default, or !type.
If a line starts with '!define ' we expect a key followed
by '=' and then one or more names separated by space. The
names are turned into a list, and entered into a dictionary
for the key. Once defined a key cannot be redefined.
If a line starts with '!default ' we expect a key of either
'prefer' or 'omit' optionally followed by '=' and a list of
names to prefer or omit; these will become the default
values until the next '!default ' directive. If there is
no '=' the value is reset. An omitted or empty prefer or
omit field will get the fallback, to explicitly request None
and override the fallback the field should contain 'None'.
If a line starts with '!type ' we expect either 'table' or
'sequence' to follow. This will become the type of the
following lines until the next '!type ' directive.
Normally, a line consists of 2-4 fields separated by ';'.
The first two are a target name and a codelist spec.
For table targets, the third is the preferred font ids
separated by space, previously !defined keys can be used
here instead of this list and the list defined for that key
will be used. The fourth is the omitted font ids separated
by space, they are treated similarly. If the preferred or
omit field is missing or empty and a default value for it
has been set, that value is used.
For sequence targets, the third is a hex sequence indicating
the suffix string to apply after each codepoint, and the
fourth is the font id; these must both be present.
This returns a list of the tuples of the type name followed
by the data for that type.
"""
def add_index_list_or_defined(info, index, fallback, defines):
"""Extend or update info[index], possibly using defines"""
if len(info) <= index:
info.append(fallback)
elif info[index] is not None:
item = info[index]
if item in defines:
items = defines[item]
elif item == "None":
items = None
elif item:
items = item.split()
else:
items = fallback
info[index] = items
prefer_fallback = None
omit_fallback = None
target_type = "table"
defines = {}
target_data = []
kDefineDirective = "!define "
kDefaultDirective = "!default "
kTypeDirective = "!type "
for line in strip_comments_from_file(filename):
if line.startswith(kDefineDirective):
# !define key=val val...
name, rest = line[len(kDefineDirective) :].split("=")
name = name.strip()
if name in defines:
raise Exception("name %s already defined in %s" % (name, filename))
rest = rest.strip().split()
defines[name] = tuple(rest)
continue
if line.startswith(kDefaultDirective):
# !default prefer|omit=val val...
values = line[len(kDefaultDirective) :].split("=")
name = values[0].strip()
rest = values[1].strip().split() if len(values) > 1 else None
if not rest:
rest = None
if name == "prefer":
prefer_fallback = rest
elif name == "omit":
omit_fallback = rest
else:
raise Exception("default only understands 'prefer' or 'omit'")
continue
if line.startswith(kTypeDirective):
# !type table|sequence
value = line[len(kTypeDirective) :]
if value in {"table", "sequence"}:
target_type = value
else:
raise Exception("type only understands 'table' or 'sequence'")
continue
info = [k.strip() for k in line.split(";")]
if len(info) < 2:
raise Exception('need at least two fields in "%s"' % line)
if target_type == "table":
# name;character spec or filename;prefer_id... or empty;omit_id... or empty
add_index_list_or_defined(info, 2, prefer_fallback, defines) # preferred
add_index_list_or_defined(info, 3, omit_fallback, defines) # omitted
target_data.append(tuple(["table"] + info))
elif target_type == "sequence":
if len(info) < 4:
raise Exception('need four fields in sequence data in "%s"' % line)
target_data.append(tuple(["sequence"] + info))
return target_data
def _flagged_name(cp, flag_sets):
"""Prepend any flags to cp's unicode name, and return. Flag_sets
is a map from flag name to a tuple of cp set and boolean.
True means add flag if cp in set, False means add flag if it is
not in the set."""
try:
name = unicode_data.name(cp)
except:
raise Exception("no name for %04X" % cp)
flags = []
for k, v in sorted(flag_sets.items()):
if (cp in v[0]) == v[1]:
flags.append(k)
if flags:
name = "(%s) %s" % (", ".join(flags), name)
return name
def generate_text(outfile, title, fonts, targets, flag_sets, metrics, data_dir):
outfile.write(title + "\n")
outfile.write("\n")
outfile.write("Fonts:\n")
max_keylen = max(len(key) for key, _ in fonts)
fmt = " %%%ds: %%s (%%s)" % max_keylen
for key, keyinfos in fonts:
for font, name, _ in keyinfos:
rel_font = path.relpath(font, data_dir) if font else "(no font)"
outfile.write(fmt % (key, name, rel_font) + "\n")
outfile.write("\n")
for target in targets:
outfile.write("\n")
outfile.write(target.generate_text(flag_sets, metrics) + "\n")
def _generate_fontkey(fonts, targets, data_dir):
lines = ['<p style="margin-bottom:5px"><b>Targets</b>']
lines.append('<div style="margin-left:20px"><table class="key">')
for tid, target in enumerate(targets):
lines.append('<tr><th><a href="#target_%s">%s</a>' % (tid, target.name))
lines.append("</table></div>")
lines.append('<p style="margin-bottom:5px"><b>Fonts</b>')
lines.append('<div style="margin-left:20px"><table class="key">')
for key, keyinfos in fonts:
for font, name, _ in keyinfos:
rel_font = path.relpath(font, data_dir) if font else "(no font)"
lines.append("<tr><th>%s<td>%s<td>%s" % (key, name, rel_font))
lines.append("</table></div>")
return "\n".join(lines)
_nonalpha_re = re.compile(r"\W")
def replace_nonalpha(key):
return _nonalpha_re.sub("_", key)
def _generate_styles(fonts, relpath):
face_pat = """@font-face {
font-family: "%s"; src:url("%s")
}"""
facelines = []
classlines = []
for key, keyinfos in fonts:
index = 0
for font, _, _ in keyinfos:
if len(keyinfos) > 1:
kname = "%s_%d" % (replace_nonalpha(key), index)
else:
kname = replace_nonalpha(key)
index += 1
if not font:
classlines.append(".%s { font-size: 12pt }" % kname)
else:
if relpath is None:
font = "file://" + font
else:
font = path.join(relpath, path.basename(font))
facelines.append(face_pat % (kname, font))
classlines.append(
'.%s { font-family: "%s", "noto_0" }' % (kname, kname)
)
lines = []
lines.extend(facelines)
lines.append("")
lines.extend(classlines)
return "\n ".join(lines)
def _character_string_html(codelist, used_font):
C0_controls = frozenset(range(0, 0x20))
rkey, rinfo = used_font
_, _, f_codelist = rinfo[0]
f_codeset = frozenset(f_codelist.codeset() - C0_controls)
cps = [cp for cp in codelist.codes() if cp in f_codeset]
if not cps:
return None
line = ['<bdo class="', rkey, ' line" dir="ltr">']
line.extend(unichr(cp) for cp in cps)
line.append("</bdo>")
return "".join(line)
_FONT_CACHE = {}
def _get_font(fontname):
font = _FONT_CACHE.get(fontname)
if not font:
font = ttLib.TTFont(fontname)
_FONT_CACHE[fontname] = font
return font
GMetrics = collections.namedtuple("GMetrics", "lsb, rsb, wid, adv, cy")
def _get_cp_metrics(font, cp):
# returns metrics for nominal glyph for cp, or None if cp not in font
cmap = font_data.get_cmap(font)
if cp not in cmap:
return None
glyphs = font.getGlyphSet()
g = glyphs[cmap[cp]]
pen = BoundsPen(glyphs)
g.draw(pen)
if not pen.bounds:
return None
xmin, ymin, xmax, ymax = pen.bounds
return GMetrics(xmin, g.width - xmax, xmax - xmin, g.width, (ymin + ymax) / 2)
_expr_re = re.compile(r"(\||&|(?<![0-9a-fA-F])-(?![0-9a-fA-F]))")
def _scan_expr(expr, def_names, used_names):
"""Scans the expression, building a list of operation tuples."""
result = []
op_str = "|"
while expr:
op = op_str
m = _expr_re.search(expr)
if not m:
exp = expr.strip()
expr = None
op_str = None
else:
exp = expr[: m.start()].strip()
expr = expr[m.end() :]
op_str = m.group(1)
if not exp:
raise Exception("empty expression after op %s" % op)
result.append((op, exp))
if exp in def_names:
used_names.add(exp)
return result
def _read_flag_data_from_file(filename):
"""Read flag data file and generate a list of tuples for creating
the flag data map. If filename is None, returns an empty list.
Lines in the file either define a set used by a flag, or define
a flag. Define lines start with '!define ' followed by the name
of the set (_0-9A-Za-z), '=', and the definition (a codelist).
Definition lines have three fields separated by semicolon,
the name of the flag, 'in' or 'not in', and the definition
which can either be a codelist or an expression formed from
names of !defined sets joined with '&' (intersection), '|'
(union), or '-' (set difference). These operations are performed
in order left to right, there's no predecence.
Predefined sets are '_emoji_', the unicode extended emoji values,
and '_math_', codepoints with the 'Math' property.
'#' is a comment to end-of line. Blank lines are ignored.
It's an error if there are multiple defined sets
with the same name or multiple flags with the same name.
This returns a list of 3-tuples, one for each set used by a
flag, then one for each flag. Tuple for defined sets are
('!define', set_name, set_spec),
there set_spec is None if the set_name is special, like '_emoji_'.
Tuples for flags are
(flag_name, True/False, [(op,expr)]),
where the list of op, expr tuples has the op character
('|' '&', '-') and a define name or a codelist."""
if not filename:
return []
predefined = ["_emoji_", "_math_"]
def_names = set(predefined)
def_re = re.compile(r"!define ([a-zA-Z][a-zA-Z0-9_]*)\s*=\s*(.*)\s*")
flag_re = re.compile(r"([^;]+);\s*(in|not in)\s*;\s*(.*)\s*")
def_info = [("!define", item, None) for item in predefined]
flag_info = []
with open(filename, "r") as f:
for line in f.readlines():
ix = line.find("#")
if ix > -1:
line = line[:ix]
line = line.strip()
if not line:
continue
if line.startswith("!"):
m = def_re.match(line)
if not m:
raise Exception('could not match definition line "%s"' % line)
def_name = m.group(1)
def_codelist = m.group(2)
if def_name in def_names:
raise Exception(
'more than one flag definition named "%s"' % def_name
)
def_names.add(def_name)
def_info.append(("!define", def_name, def_codelist))
else:
m = flag_re.match(line)
if not m:
raise Exception('could not match set definition line "%s"' % line)
flag_name = m.group(1)
flag_in_str = m.group(2)
if flag_in_str == "in":
flag_in = True
elif flag_in_str == "not in":
flag_in = False
else:
raise Exception(
"found \"%s\" but expected 'in' or 'not in'" % flag_in_str
)
flag_expr = m.group(3)
flag_info.append([flag_name, flag_in, flag_expr])
used_names = set()
flag_expr_info = []
for flag_name, flag_in, flag_expr in flag_info:
expr_list = _scan_expr(flag_expr, def_names, used_names)
flag_expr_info.append((flag_name, flag_in, expr_list))
used_defs = [t for t in def_info if t[1] in used_names]
return used_defs + flag_expr_info
"""
def _generate_html_lines(outfile, fontkey):
ascii_chars = u'#*0123456789 '
epact_chars = u''.join(unichr(cp) for cp in range(0x102e1, 0x102fb + 1)) + ' '
phaistos_chars = u''.join(unichr(cp) for cp in range(0x101d0, 0x101fc + 1)) + ' '
stringlist = [
ascii_chars,
u''.join(u'%s\u20e3' % c for c in ascii_chars),
epact_chars,
u''.join(u'%s\U000102e0' % c for c in epact_chars),
phaistos_chars,
u''.join(u'%s\U000101fd' % c for c in phaistos_chars),
]
lines = ['<h3>Sequences</h3>']
lines.append('<div class="%s line">' % fontkey)
for string in stringlist:
lines.append(string + '<br/>')
lines.append('</div>')
outfile.write('\n'.join(lines) + '\n')
"""
def generate_html(
outfile,
title,
fonts,
targets,
flag_sets,
context,
metrics,
cp_to_targets,
data_dir,
relpath,
):
"""If not None, relpath is the relative path from the outfile to
the datadir, for use when generating font paths."""
template = string.Template(_HTML_HEADER_TEMPLATE)
styles = _generate_styles(fonts, relpath)
mstyles = _METRICS_STYLES if metrics is not None else ""
contextfont = _CONTEXT_FONT if context else "sansserif"
outfile.write(
template.substitute(
title=title, styles=styles, mstyles=mstyles, contextfont=contextfont
)
+ "\n"
)
outfile.write(_generate_fontkey(fonts, targets, data_dir) + "\n")
# hardcode font key for now
# _generate_html_lines(outfile, 'sym4')
for index, target in enumerate(targets):
outfile.write(
target.generate_html(index, context, metrics, flag_sets, cp_to_targets)
+ "\n"
)
outfile.write(_HTML_FOOTER + "\n")
def _build_cp_to_targets(targets):
"""Return a map from cp to a list of pairs of target group index and
name."""
cp_to_targets = collections.defaultdict(list)
# for i, (name, codelist, _) in enumerate(targets):
for i, target in enumerate(targets):
tinfo = (i, target.name)
for cp in target.codes():
cp_to_targets[cp].append(tinfo)
return cp_to_targets
def generate(
outfile,
fmt,
data_dir,
font_spec,
target_spec,
flag_spec,
title=None,
context=None,
metrics=False,
relpath=None,
):
if not path.isdir(data_dir):
raise Exception('data dir "%s" does not exist' % data_dir)
font_data = _read_font_data_from_file(path.join(data_dir, font_spec))
target_data = _read_target_data_from_file(path.join(data_dir, target_spec))
flag_data = _read_flag_data_from_file(
None if not flag_spec else path.join(data_dir, flag_spec)
)
fonts, targets, flag_sets = _load_fonts_targets_flags(
font_data, target_data, flag_data, data_dir
)
if fmt == "txt":
generate_text(outfile, title, fonts, targets, flag_sets, metrics, data_dir)
elif fmt == "html":
cp_to_targets = _build_cp_to_targets(targets)
generate_html(
outfile,
title,
fonts,
targets,
flag_sets,
context,
metrics,
cp_to_targets,
data_dir,
relpath,
)
else:
raise Exception('unrecognized format "%s"' % fmt)
def _parse_metrics_file(filename):
"""format is 'cp;lsb;adv' with cp in hex."""
metrics = {}
with open(filename, "r") as f:
for line in f:
ix = line.find("#")
if ix >= 0:
line = line[:ix]
line = line.strip()
if not line:
continue
cp, lsb, adv, cy = line.split(";")
cp = int(cp, 16)
lsb = int(lsb)
adv = int(adv)
cy = int(cy)
if cp in metrics:
raise Exception("cp %04x listed twice in %s" % (cp, filename))
metrics[cp] = (lsb, adv, cy)
return metrics
def _call_generate(
outfile,
fmt,
data_dir,
font_spec,
target_spec,
flag_spec,
title=None,
context=None,
metrics=None,
):
data_dir = path.realpath(path.abspath(data_dir))
if metrics is not None:
if metrics == "-":
metrics = {}
else:
metrics = _parse_metrics_file(path.join(data_dir, metrics))
if outfile:
outfile = path.realpath(path.abspath(outfile))
base, ext = path.splitext(outfile)
if ext:
ext = ext[1:]
if not ext:
if not fmt:
fmt = "txt"
ext = "txt"
else:
ext = fmt
elif not fmt:
if ext not in ["html", "txt"]:
raise Exception('don\'t understand "%s" format' % ext)
fmt = ext
elif ext != fmt:
raise Exception(
'mismatching format "%s" and output extension "%s"' % (fmt, ext)
)
outfile = base + "." + ext
outdir = path.dirname(outfile)
if data_dir == outdir:
relpath = ""
elif data_dir.startswith(outdir):
relpath = data_dir[len(outdir) + 1 :]
else:
relpath = None
with codecs.open(outfile, "w", "utf-8") as f:
generate(
f,
fmt,
data_dir,
font_spec,
target_spec,
flag_spec,
title,
context,
metrics,
relpath,
)
else:
if not fmt:
fmt = "txt"
generate(
sys.stdout,
fmt,
data_dir,
font_spec,
target_spec,
flag_spec,
title,
context,
metrics,
)
def main():
DEFAULT_OUT = "dingbats_compare"
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--outfile",
help="Path to output file (will use %s)" % DEFAULT_OUT,
const=DEFAULT_OUT,
metavar="file",
nargs="?",
)
parser.add_argument(
"-t",
"--output_type",
help="output format (defaults based on outfile " 'extension, else "txt")',
choices=["txt", "html"],
)
parser.add_argument(
"-d",
"--data_dir",
help="Path to directory containing fonts " "and data",
metavar="dir",
required=True,
)
parser.add_argument(
"--font_spec",
help="Name of font spec file relative to data dir " "(default 'font_data.txt')",
metavar="file",
default="font_data.txt",
)
parser.add_argument(
"--target_spec",
help="Name of target spec file relative to data dir "
"(default 'target_data.txt')",
metavar="file",
default="target_data.txt",
)
parser.add_argument(
"--flag_spec",
help="Name of flag spec file relative to data dir "
"(uses 'flag_data.txt' with no arg)",
metavar="file",
nargs="?",
const="flag_data.txt",
)
parser.add_argument(
"--title",
help="Title on html page",
metavar="title",
default="Character and Font Comparison",
)
parser.add_argument(
"--context",
help="Context pattern for glyphs (e.g. 'O%%sg')",
metavar="ctx",
nargs="?",
const='<span class="ctx">O</span>%s<span class="ctx">g</span>',
)
parser.add_argument(
"-m",
"--metrics",
help="Report metrics of target font, optionally " "with preferred metrics file",
metavar="file",
nargs="?",
const="-",
)
args = parser.parse_args()
_call_generate(
args.outfile,
args.output_type,
args.data_dir,
args.font_spec,
args.target_spec,
args.flag_spec,
args.title,
args.context,
args.metrics,
)
if __name__ == "__main__":
main()
| |
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" BGP Open message"""
import struct
import netaddr
from yabgp.common import exception as excp
from yabgp.common import constants as bgp_cons
class Open(object):
"""
After a TCP connection is established, the first message sent by each
side is an OPEN message. If the OPEN message is acceptable, a
KEEPALIVE message confirming the OPEN is sent back
"""
def __init__(self, version=None, asn=None, hold_time=None,
bgp_id=None, opt_para_len=None, opt_paras=None):
"""
:param version: BGP Protocol version.
:param asn: AS number.
:param hold_time: Hold time
:param bgp_id: BGP Router ID
:param opt_para_len: Optional Parameters length
:param opt_paras: Optional Parameters
"""
# 1-octet
# +-----------+
# | Version |
# +-----------+-----------+
# | My Autonomous System |
# +-----------+-----------+
# | Hold Time |
# +-----------+-----------+-----------+-----------+
# | BGP Identifier |
# +-----------+-----------+-----------+-----------+
# |OptParm Len|
# +-----------+-----------+-----------+-----------+
# | Optional Parameters (variable) |
# +-----------+-----------+-----------+-----------+
self.version = version
self.asn = asn
self.hold_time = hold_time
self.bgp_id = bgp_id
self.opt_para_len = opt_para_len
self.opt_paras = opt_paras
self.capa_dict = {}
# used to store Capabilities {code: value}
def parse(self, message):
"""Parses a BGP Open message"""
try:
self.version, self.asn, self.hold_time, \
self.bgp_id, self.opt_para_len = struct.unpack('!BHHIB', message[:10])
except:
raise excp.MessageHeaderError(
sub_error=bgp_cons.ERR_MSG_HDR_BAD_MSG_LEN,
data=message[:10])
self.bgp_id = str(netaddr.IPAddress(self.bgp_id))
if self.version != 4:
# Here we just support BGP-4
raise excp.OpenMessageError(
sub_error=bgp_cons.ERR_MSG_OPEN_UNSUP_VERSION,
data=self.version)
if isinstance(self.asn, float):
self.asn = str(self.asn).split('.')
self.asn = 65536 * (int(self.asn[0])) + int(self.asn[1])
if self.asn in (0, 2 ** 16 - 1):
# bad peer asn
raise excp.OpenMessageError(
sub_error=bgp_cons.ERR_MSG_OPEN_BAD_PEER_AS,
data=self.asn)
# Hold Time negotiation is out of this scope
if self.bgp_id in (0, 2 ** 32 - 1):
raise excp.OpenMessageError(
sub_error=bgp_cons.ERR_MSG_OPEN_BAD_BGP_ID,
data=self.bgp_id)
# Optional Parameters
if self.opt_para_len:
self.opt_paras = message[10:]
# While Loop: Parse one Optional Parameter(Capability) each time
while self.opt_paras:
# 1 octet 1 octet variable
# --------------------------------------+
# para_type | para_length | para_value |
# --------------------------------------+
opt_para_type, opt_para_length = struct.unpack('!BB', self.opt_paras[:2])
# Parameter Type 1: Authentication (deprecated) [RFC4271] [RFC5492]
# Parameter Type 2: Capabilities [RFC5492]
# Here we only support Type 2
if opt_para_type != 2:
# if type is not type 2, return an suberror used to Notification
raise excp.OpenMessageError(
sub_error=bgp_cons.ERR_MSG_OPEN_UNSUP_OPT_PARAM,
data=message[10:])
# ---------------------- Parse Capabilities ------------------#
# capabilities belongs to one Optional Parameter Capability
capabilities = self.opt_paras[2:opt_para_length + 2]
while capabilities:
# ---- Parse every capability in this Optional Parameter
capability = Capability()
capability.parse(capabilities)
# (1) for 4 bytes ASN
if capability.capa_code == capability.FOUR_BYTES_ASN:
asn = struct.unpack('!I', capability.capa_value)[0]
self.asn = asn
self.capa_dict['four_bytes_as'] = True
# (2) Multiprotocol Extensions for BGP-4
elif capability.capa_code == capability.MULTIPROTOCOL_EXTENSIONS:
if 'afi_safi' not in self.capa_dict:
self.capa_dict['afi_safi'] = []
afi, res, safi = struct.unpack('!HBB', capability.capa_value)
self.capa_dict['afi_safi'].append((afi, safi))
# (3) Route Refresh
elif capability.capa_code == capability.ROUTE_REFRESH:
self.capa_dict['route_refresh'] = True
# (4) Cisco Route Refresh
elif capability.capa_code == capability.CISCO_ROUTE_REFRESH:
self.capa_dict['cisco_route_refresh'] = True
# (5) Graceful Restart
elif capability.capa_code == capability.GRACEFUL_RESTART:
self.capa_dict['graceful_restart'] = True
# (6) Cisco MultiSession
elif capability.capa_code == capability.CISCO_MULTISESSION_BGP:
self.capa_dict['cisco_multi_session'] = True
# (7) enhanced route refresh
elif capability.capa_code == capability.ENHANCED_ROUTE_REFRESH:
self.capa_dict['enhanced_route_refresh'] = True
# (8) add path
elif capability.capa_code == capability.ADD_PATH:
self.capa_dict['add_path'] = True
else:
self.capa_dict[str(capability.capa_code)] = capability.capa_value
capabilities = capabilities[2 + capability.capa_length:]
# Go to next Optional Parameter
self.opt_paras = self.opt_paras[opt_para_length + 2:]
return {
'Version': self.version,
'ASN': self.asn,
'holdTime': self.hold_time,
'bgpID': self.bgp_id,
'Capabilities': self.capa_dict
}
@staticmethod
def construct_header(msg):
"""Prepends the mandatory header to a constructed BGP message
# 16-octet 2-octet 1-octet
#---------------+--------+---------+------+
# Maker | Length | Type | msg |
#---------------+--------+---------+------+
"""
return b'\xff'*16 + struct.pack('!HB',
len(msg) + 19,
1) + msg
def construct(self, my_capability):
""" Construct a BGP Open message """
capas = b''
# Construct Capabilities Optional Parameter (Parameter Type 2)
if 'afi_safi' in my_capability:
# Multiprotocol extentions capability
capas += Capability(capa_code=1, capa_length=4).construct(my_capability)
if my_capability.get('cisco_route_refresh'):
# Cisco Route refresh capability
capas += Capability(capa_code=128, capa_length=0).construct(my_capability)
if my_capability.get('route_refresh'):
# Route Refresh capability
capas += Capability(capa_code=2, capa_length=0).construct(my_capability)
# 4 bytes ASN
if self.asn > 65535:
capas += Capability(capa_code=65, capa_length=4, capa_value=self.asn).construct(my_capability)
self.asn = 23456
else:
if my_capability.get('four_bytes_as'):
capas += Capability(capa_code=65, capa_length=4, capa_value=self.asn).construct(my_capability)
# for add path
if my_capability.get('add_path'):
capas += Capability(capa_code=69, capa_length=4, capa_value=my_capability['add_path']).construct()
if my_capability.get('enhanced_route_refresh'):
capas += Capability(capa_code=70, capa_length=0).construct()
open_header = struct.pack('!BHHIB', self.version, self.asn, self.hold_time,
self.bgp_id, len(capas))
message = open_header + capas
return self.construct_header(message)
# ========================================================================== Optional Parameters
class Capability(object):
"""
The parameter contains one or more triples <Capability Code,
Capability Length, Capability Value>, where each triple is encoded as
shown below:
+------------------------------+
| Capability Code (1 octet) |
+------------------------------+
| Capability Length (1 octet) |
+------------------------------+
| Capability Value (variable) |
~ ~
+------------------------------+
The use and meaning of these fields are as follows:
Capability Code:
Capability Code is a one-octet unsigned binary integer that
unambiguously identifies individual capabilities.
Capability Length:
Capability Length is a one-octet unsigned binary integer that
contains the length of the Capability Value field in octets.
Capability Value:
Capability Value is a variable-length field that is interpreted
according to the value of the Capability Code field.
"""
# Capability Codes (IANA)
# ---------------------------------------------------------------+
# Range | Registration Procedures | Notes |
# 1-63 | IETF Review | |
# 64-127 | First Come First Served | |
# 128-255 | Reserved for Private Use | IANA does not assign |
# ---------------------------------------------------------------+
# ---------------------------------------------------------------------------------------------------------------+
# Value | Description | Reference |
# 0 | Reserved | [RFC5492] |
# 1 | Multiprotocol Extensions for BGP-4 | [RFC2858] |
# 2 | Route Refresh Capability for BGP-4 | [RFC2918] |
# 3 | Outbound Route Filtering Capability | [RFC5291] |
# 4 | Multiple routes to a destination capability | [RFC3107] |
# 5 | Extended Next Hop Encoding | [RFC5549] |
# 6-63 | Unassigned | |
# 64 | Graceful Restart Capability | [RFC4724] |
# 65 | Support for 4-octet AS number capability | [RFC4893] |
# 66 | Deprecated (2003-03-06) | |
# 67 | Support for Dynamic Capability (capability specific) | [draft-ietf-idr-dynamic-cap] |
# 68 | Multisession BGP Capability | [Chandra_Appanna] |
# 69 | ADD-PATH Capability | [draft-ietf-idr-add-paths] |
# 70 | Enhanced Route Refresh Capability | [draft-keyur-bgp-enhanced-route-refresh] |
# 71-127 | Unassigned | |
# 128-255 | Reserved for Private Use | [RFC5492] |
# ---------------------------------------------------------------------------------------------------------------+
# =================================================================== Capabilities
# http://www.iana.org/assignments/capability-codes/
RESERVED = 0x00 # [RFC5492]
MULTIPROTOCOL_EXTENSIONS = 0x01 # [RFC2858]
ROUTE_REFRESH = 0x02 # [RFC2918]
OUTBOUND_ROUTE_FILTERING = 0x03 # [RFC5291]
MULTIPLE_ROUTES = 0x04 # [RFC3107]
EXTENDED_NEXT_HOP = 0x05 # [RFC5549]
# 6-63 Unassigned
GRACEFUL_RESTART = 0x40 # [RFC4724]
FOUR_BYTES_ASN = 0x41 # [RFC4893]
# 66 Deprecated
DYNAMIC_CAPABILITY = 0x43 # [Chen]
MULTISESSION_BGP = 0x44 # [Appanna]
ADD_PATH = 0x45 # [draft-ietf-idr-add-paths]
ENHANCED_ROUTE_REFRESH = 0x46
# 70-127 Unassigned
CISCO_ROUTE_REFRESH = 0x80 # I Can only find reference to this in the router logs
# 128-255 Reserved for Private Use [RFC5492]
CISCO_MULTISESSION_BGP = 0x83 # [Multisession BGP draft-ietf-idr-bgp-multisession-06]
unassigned = range(70, 128)
reserved = range(128, 256)
def __init__(self, capa_code=None, capa_length=None, capa_value=None):
"""
+------------------------------+
| Capability Code (1 octet) |
+------------------------------+
| Capability Length (1 octet) |
+------------------------------+
| Capability Value (variable) |
~ ~
+------------------------------+
"""
self.capa_code = capa_code
self.capa_length = capa_length
self.capa_value = capa_value
def parse(self, message):
"""
Partition Capabilities message one by one
"""
try:
self.capa_code, self.capa_length = struct.unpack('!BB', message[:2])
except:
raise excp.OpenMessageError(
sub_error=bgp_cons.ERR_MSG_HDR_BAD_MSG_LEN,
data=message[:2])
self.capa_value = message[2:self.capa_length + 2]
def construct(self, my_capability=None):
""" Construct a capability PDU """
# for 4 bytes as
if self.capa_code == self.FOUR_BYTES_ASN:
return struct.pack('!BBBBI', 2, 6, self.FOUR_BYTES_ASN, self.capa_length, self.capa_value)
# for route refresh
elif self.capa_code == self.ROUTE_REFRESH:
return struct.pack('!BBBB', 2, 2, self.ROUTE_REFRESH, 0)
# for cisco route refresh
elif self.capa_code == self.CISCO_ROUTE_REFRESH:
return struct.pack('!BBBB', 2, 2, self.CISCO_ROUTE_REFRESH, 0)
# graceful restart
elif self.capa_code == self.GRACEFUL_RESTART:
return struct.pack('!BBBB', 2, 2, self.GRACEFUL_RESTART, 0)
# for multiprotocol extentions
elif self.capa_code == self.MULTIPROTOCOL_EXTENSIONS:
# <ipv4,unicast> and <ipv4,mplsvpn>
afisafi = b''
for (afi, safi) in my_capability['afi_safi']:
afisafi += struct.pack('!BBBBHBB', 2, 6, self.MULTIPROTOCOL_EXTENSIONS, 4, afi, 0, safi)
return afisafi
# for add path
elif self.capa_code == self.ADD_PATH:
add_path = struct.pack('!BBBBHBB', 2, 6, self.ADD_PATH, self.capa_length, 1, 1, self.capa_value)
return add_path
elif self.capa_code == self.ENHANCED_ROUTE_REFRESH:
return struct.pack('!BBBB', 2, 2, self.ENHANCED_ROUTE_REFRESH, 0)
| |
import collections
import logging
from typing import Dict, List, Optional
import numpy as np
from ray.tune import trial_runner
from ray.tune.result import DEFAULT_METRIC
from ray.tune.trial import Trial
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
logger = logging.getLogger(__name__)
class MedianStoppingRule(FIFOScheduler):
"""Implements the median stopping rule as described in the Vizier paper:
https://research.google.com/pubs/pub46180.html
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
grace_period (float): Only stop trials at least this old in time.
The mean will only be computed from this time onwards. The units
are the same as the attribute named by `time_attr`.
min_samples_required (int): Minimum number of trials to compute median
over.
min_time_slice (float): Each trial runs at least this long before
yielding (assuming it isn't stopped). Note: trials ONLY yield if
there are not enough samples to evaluate performance for the
current result AND there are other trials waiting to run.
The units are the same as the attribute named by `time_attr`.
hard_stop (bool): If False, pauses trials instead of stopping
them. When all other trials are complete, paused trials will be
resumed and allowed to run FIFO.
"""
def __init__(self,
time_attr: str = "time_total_s",
reward_attr: Optional[str] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
grace_period: float = 60.0,
min_samples_required: int = 3,
min_time_slice: int = 0,
hard_stop: bool = True):
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._stopped_trials = set()
self._grace_period = grace_period
self._min_samples_required = min_samples_required
self._min_time_slice = min_time_slice
self._metric = metric
self._worst = None
self._compare_op = None
self._mode = mode
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._worst = float("-inf") if self._mode == "max" else float(
"inf")
self._compare_op = max if self._mode == "max" else min
self._time_attr = time_attr
self._hard_stop = hard_stop
self._trial_state = {}
self._last_pause = collections.defaultdict(lambda: float("-inf"))
self._results = collections.defaultdict(list)
def set_search_properties(self, metric: Optional[str],
mode: Optional[str]) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
self._worst = float("-inf") if self._mode == "max" else float("inf")
self._compare_op = max if self._mode == "max" else min
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial):
if not self._metric or not self._worst or not self._compare_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.run()`".format(self.__class__.__name__, self._metric,
self._mode))
super(MedianStoppingRule, self).on_trial_add(trial_runner, trial)
def on_trial_result(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict) -> str:
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if self._time_attr not in result or self._metric not in result:
return TrialScheduler.CONTINUE
if trial in self._stopped_trials:
assert not self._hard_stop
# Fall back to FIFO
return TrialScheduler.CONTINUE
time = result[self._time_attr]
self._results[trial].append(result)
if time < self._grace_period:
return TrialScheduler.CONTINUE
trials = self._trials_beyond_time(time)
trials.remove(trial)
if len(trials) < self._min_samples_required:
action = self._on_insufficient_samples(trial_runner, trial, time)
if action == TrialScheduler.PAUSE:
self._last_pause[trial] = time
action_str = "Yielding time to other trials."
else:
action_str = "Continuing anyways."
logger.debug(
"MedianStoppingRule: insufficient samples={} to evaluate "
"trial {} at t={}. {}".format(
len(trials), trial.trial_id, time, action_str))
return action
median_result = self._median_result(trials, time)
best_result = self._best_result(trial)
logger.debug("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if self._compare_op(median_result, best_result) != best_result:
logger.debug("MedianStoppingRule: early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE
def on_trial_complete(self, trial_runner: "trial_runner.TrialRunner",
trial: Trial, result: Dict):
self._results[trial].append(result)
def debug_string(self) -> str:
return "Using MedianStoppingRule: num_stopped={}.".format(
len(self._stopped_trials))
def _on_insufficient_samples(self,
trial_runner: "trial_runner.TrialRunner",
trial: Trial, time: float) -> str:
pause = time - self._last_pause[trial] > self._min_time_slice
pause = pause and [
t for t in trial_runner.get_live_trials()
if t.status in (Trial.PENDING, Trial.PAUSED)
]
return TrialScheduler.PAUSE if pause else TrialScheduler.CONTINUE
def _trials_beyond_time(self, time: float) -> List[Trial]:
trials = [
trial for trial in self._results
if self._results[trial][-1][self._time_attr] >= time
]
return trials
def _median_result(self, trials: List[Trial], time: float):
return np.median([self._running_mean(trial, time) for trial in trials])
def _running_mean(self, trial: Trial, time: float) -> np.ndarray:
results = self._results[trial]
# TODO(ekl) we could do interpolation to be more precise, but for now
# assume len(results) is large and the time diffs are roughly equal
scoped_results = [
r for r in results
if self._grace_period <= r[self._time_attr] <= time
]
return np.mean([r[self._metric] for r in scoped_results])
def _best_result(self, trial):
results = self._results[trial]
return self._compare_op([r[self._metric] for r in results])
| |
# -*- coding: utf-8 -*-
import cStringIO
import contextlib
import datetime
import hashlib
import inspect
import logging
import math
import mimetypes
import unicodedata
import os
import re
import time
import urlparse
from PIL import Image
from sys import maxint
import werkzeug
# optional python-slugify import (https://github.com/un33k/python-slugify)
try:
import slugify as slugify_lib
except ImportError:
slugify_lib = None
import openerp
from openerp.osv import orm, osv, fields
from openerp.tools import html_escape as escape, ustr, image_resize_and_sharpen, image_save_for_web
from openerp.tools.safe_eval import safe_eval
from openerp.addons.web.http import request
logger = logging.getLogger(__name__)
def url_for(path_or_uri, lang=None):
if isinstance(path_or_uri, unicode):
path_or_uri = path_or_uri.encode('utf-8')
current_path = request.httprequest.path
if isinstance(current_path, unicode):
current_path = current_path.encode('utf-8')
location = path_or_uri.strip()
force_lang = lang is not None
url = urlparse.urlparse(location)
if request and not url.netloc and not url.scheme and (url.path or force_lang):
location = urlparse.urljoin(current_path, location)
lang = lang or request.context.get('lang')
langs = [lg[0] for lg in request.website.get_languages()]
if (len(langs) > 1 or force_lang) and is_multilang_url(location, langs):
ps = location.split('/')
if ps[1] in langs:
# Replace the language only if we explicitly provide a language to url_for
if force_lang:
ps[1] = lang
# Remove the default language unless it's explicitly provided
elif ps[1] == request.website.default_lang_code:
ps.pop(1)
# Insert the context language or the provided language
elif lang != request.website.default_lang_code or force_lang:
ps.insert(1, lang)
location = '/'.join(ps)
return location.decode('utf-8')
def is_multilang_url(local_url, langs=None):
if not langs:
langs = [lg[0] for lg in request.website.get_languages()]
spath = local_url.split('/')
# if a language is already in the path, remove it
if spath[1] in langs:
spath.pop(1)
local_url = '/'.join(spath)
try:
# Try to match an endpoint in werkzeug's routing table
url = local_url.split('?')
path = url[0]
query_string = url[1] if len(url) > 1 else None
router = request.httprequest.app.get_db_router(request.db).bind('')
func = router.match(path, query_args=query_string)[0]
return func.routing.get('website', False) and func.routing.get('multilang', True)
except Exception:
return False
def slugify(s, max_length=None):
""" Transform a string to a slug that can be used in a url path.
This method will first try to do the job with python-slugify if present.
Otherwise it will process string by stripping leading and ending spaces,
converting unicode chars to ascii, lowering all chars and replacing spaces
and underscore with hyphen "-".
:param s: str
:param max_length: int
:rtype: str
"""
s = ustr(s)
if slugify_lib:
# There are 2 different libraries only python-slugify is supported
try:
return slugify_lib.slugify(s, max_length=max_length)
except TypeError:
pass
uni = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore').decode('ascii')
slug = re.sub('[\W_]', ' ', uni).strip().lower()
slug = re.sub('[-\s]+', '-', slug)
return slug[:max_length]
def slug(value):
if isinstance(value, orm.browse_record):
# [(id, name)] = value.name_get()
id, name = value.id, value.display_name
else:
# assume name_search result tuple
id, name = value
slugname = slugify(name or '').strip().strip('-')
if not slugname:
return str(id)
return "%s-%d" % (slugname, id)
# NOTE: as the pattern is used as it for the ModelConverter (ir_http.py), do not use any flags
_UNSLUG_RE = re.compile(r'(?:(\w{1,2}|\w[A-Za-z0-9-_]+?\w)-)?(-?\d+)(?=$|/)')
def unslug(s):
"""Extract slug and id from a string.
Always return un 2-tuple (str|None, int|None)
"""
m = _UNSLUG_RE.match(s)
if not m:
return None, None
return m.group(1), int(m.group(2))
def urlplus(url, params):
return werkzeug.Href(url)(params or None)
class website(osv.osv):
def _get_menu_website(self, cr, uid, ids, context=None):
# IF a menu is changed, update all websites
return self.search(cr, uid, [], context=context)
def _get_menu(self, cr, uid, ids, name, arg, context=None):
root_domain = [('parent_id', '=', False)]
menus = self.pool.get('website.menu').search(cr, uid, root_domain, order='id', context=context)
menu = menus and menus[0] or False
return dict( map(lambda x: (x, menu), ids) )
_name = "website" # Avoid website.website convention for conciseness (for new api). Got a special authorization from xmo and rco
_description = "Website"
_columns = {
'name': fields.char('Domain'),
'company_id': fields.many2one('res.company', string="Company"),
'language_ids': fields.many2many('res.lang', 'website_lang_rel', 'website_id', 'lang_id', 'Languages'),
'default_lang_id': fields.many2one('res.lang', string="Default language"),
'default_lang_code': fields.related('default_lang_id', 'code', type="char", string="Default language code", store=True),
'social_twitter': fields.char('Twitter Account'),
'social_facebook': fields.char('Facebook Account'),
'social_github': fields.char('GitHub Account'),
'social_linkedin': fields.char('LinkedIn Account'),
'social_youtube': fields.char('Youtube Account'),
'social_googleplus': fields.char('Google+ Account'),
'google_analytics_key': fields.char('Google Analytics Key'),
'user_id': fields.many2one('res.users', string='Public User'),
'partner_id': fields.related('user_id','partner_id', type='many2one', relation='res.partner', string='Public Partner'),
'menu_id': fields.function(_get_menu, relation='website.menu', type='many2one', string='Main Menu',
store= {
'website.menu': (_get_menu_website, ['sequence','parent_id','website_id'], 10)
})
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool['ir.model.data'].xmlid_to_res_id(cr, openerp.SUPERUSER_ID, 'base.public_user'),
}
# cf. Wizard hack in website_views.xml
def noop(self, *args, **kwargs):
pass
def write(self, cr, uid, ids, vals, context=None):
self._get_languages.clear_cache(self)
return super(website, self).write(cr, uid, ids, vals, context)
def new_page(self, cr, uid, name, template='website.default_page', ispage=True, context=None):
context = context or {}
imd = self.pool.get('ir.model.data')
view = self.pool.get('ir.ui.view')
template_module, template_name = template.split('.')
# completely arbitrary max_length
page_name = slugify(name, max_length=50)
page_xmlid = "%s.%s" % (template_module, page_name)
try:
# existing page
imd.get_object_reference(cr, uid, template_module, page_name)
except ValueError:
# new page
_, template_id = imd.get_object_reference(cr, uid, template_module, template_name)
page_id = view.copy(cr, uid, template_id, context=context)
page = view.browse(cr, uid, page_id, context=context)
page.write({
'arch': page.arch.replace(template, page_xmlid),
'name': page_name,
'page': ispage,
})
imd.create(cr, uid, {
'name': page_name,
'module': template_module,
'model': 'ir.ui.view',
'res_id': page_id,
'noupdate': True
}, context=context)
return page_xmlid
def page_for_name(self, cr, uid, ids, name, module='website', context=None):
# whatever
return '%s.%s' % (module, slugify(name, max_length=50))
def page_exists(self, cr, uid, ids, name, module='website', context=None):
try:
name = (name or "").replace("/page/website.", "").replace("/page/", "")
if not name:
return False
return self.pool["ir.model.data"].get_object_reference(cr, uid, module, name)
except:
return False
@openerp.tools.ormcache(skiparg=3)
def _get_languages(self, cr, uid, id, context=None):
website = self.browse(cr, uid, id)
return [(lg.code, lg.name) for lg in website.language_ids]
def get_languages(self, cr, uid, ids, context=None):
return self._get_languages(cr, uid, ids[0], context=context)
def get_alternate_languages(self, cr, uid, ids, req=None, context=None):
langs = []
if req is None:
req = request.httprequest
default = self.get_current_website(cr, uid, context=context).default_lang_code
uri = req.path
if req.query_string:
uri += '?' + req.query_string
shorts = []
for code, name in self.get_languages(cr, uid, ids, context=context):
lg_path = ('/' + code) if code != default else ''
lg = code.split('_')
shorts.append(lg[0])
lang = {
'hreflang': ('-'.join(lg)).lower(),
'short': lg[0],
'href': req.url_root[0:-1] + lg_path + uri,
}
langs.append(lang)
for lang in langs:
if shorts.count(lang['short']) == 1:
lang['hreflang'] = lang['short']
return langs
def get_current_website(self, cr, uid, context=None):
# TODO: Select website, currently hard coded
return self.pool['website'].browse(cr, uid, 1, context=context)
def is_publisher(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
is_website_publisher = Access.check(cr, uid, 'ir.ui.view', 'write', False, context=context)
return is_website_publisher
def is_user(self, cr, uid, ids, context=None):
Access = self.pool['ir.model.access']
return Access.check(cr, uid, 'ir.ui.menu', 'read', False, context=context)
def get_template(self, cr, uid, ids, template, context=None):
if isinstance(template, (int, long)):
view_id = template
else:
if '.' not in template:
template = 'website.%s' % template
module, xmlid = template.split('.', 1)
model, view_id = request.registry["ir.model.data"].get_object_reference(cr, uid, module, xmlid)
return self.pool["ir.ui.view"].browse(cr, uid, view_id, context=context)
def _render(self, cr, uid, ids, template, values=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return self.pool['ir.ui.view'].render(cr, uid, template, values=values, context=context)
def render(self, cr, uid, ids, template, values=None, status_code=None, context=None):
# TODO: remove this. (just kept for backward api compatibility for saas-3)
return request.render(template, values, uid=uid)
def pager(self, cr, uid, ids, url, total, page=1, step=30, scope=5, url_args=None, context=None):
# Compute Pager
page_count = int(math.ceil(float(total) / step))
page = max(1, min(int(page if str(page).isdigit() else 1), page_count))
scope -= 1
pmin = max(page - int(math.floor(scope/2)), 1)
pmax = min(pmin + scope, page_count)
if pmax - pmin < scope:
pmin = pmax - scope if pmax - scope > 0 else 1
def get_url(page):
_url = "%s/page/%s" % (url, page) if page > 1 else url
if url_args:
_url = "%s?%s" % (_url, werkzeug.url_encode(url_args))
return _url
return {
"page_count": page_count,
"offset": (page - 1) * step,
"page": {
'url': get_url(page),
'num': page
},
"page_start": {
'url': get_url(pmin),
'num': pmin
},
"page_previous": {
'url': get_url(max(pmin, page - 1)),
'num': max(pmin, page - 1)
},
"page_next": {
'url': get_url(min(pmax, page + 1)),
'num': min(pmax, page + 1)
},
"page_end": {
'url': get_url(pmax),
'num': pmax
},
"pages": [
{'url': get_url(page), 'num': page}
for page in xrange(pmin, pmax+1)
]
}
def rule_is_enumerable(self, rule):
""" Checks that it is possible to generate sensible GET queries for
a given rule (if the endpoint matches its own requirements)
:type rule: werkzeug.routing.Rule
:rtype: bool
"""
endpoint = rule.endpoint
methods = rule.methods or ['GET']
converters = rule._converters.values()
if not ('GET' in methods
and endpoint.routing['type'] == 'http'
and endpoint.routing['auth'] in ('none', 'public')
and endpoint.routing.get('website', False)
and all(hasattr(converter, 'generate') for converter in converters)
and endpoint.routing.get('website')):
return False
# dont't list routes without argument having no default value or converter
spec = inspect.getargspec(endpoint.method.original_func)
# remove self and arguments having a default value
defaults_count = len(spec.defaults or [])
args = spec.args[1:(-defaults_count or None)]
# check that all args have a converter
return all( (arg in rule._converters) for arg in args)
def enumerate_pages(self, cr, uid, ids, query_string=None, context=None):
""" Available pages in the website/CMS. This is mostly used for links
generation and can be overridden by modules setting up new HTML
controllers for dynamic pages (e.g. blog).
By default, returns template views marked as pages.
:param str query_string: a (user-provided) string, fetches pages
matching the string
:returns: a list of mappings with two keys: ``name`` is the displayable
name of the resource (page), ``url`` is the absolute URL
of the same.
:rtype: list({name: str, url: str})
"""
router = request.httprequest.app.get_db_router(request.db)
# Force enumeration to be performed as public user
url_list = []
for rule in router.iter_rules():
if not self.rule_is_enumerable(rule):
continue
converters = rule._converters or {}
if query_string and not converters and (query_string not in rule.build([{}], append_unknown=False)[1]):
continue
values = [{}]
convitems = converters.items()
# converters with a domain are processed after the other ones
gd = lambda x: hasattr(x[1], 'domain') and (x[1].domain <> '[]')
convitems.sort(lambda x, y: cmp(gd(x), gd(y)))
for (i,(name, converter)) in enumerate(convitems):
newval = []
for val in values:
query = i==(len(convitems)-1) and query_string
for v in converter.generate(request.cr, uid, query=query, args=val, context=context):
newval.append( val.copy() )
v[name] = v['loc']
del v['loc']
newval[-1].update(v)
values = newval
for value in values:
domain_part, url = rule.build(value, append_unknown=False)
page = {'loc': url}
for key,val in value.items():
if key.startswith('__'):
page[key[2:]] = val
if url in ('/sitemap.xml',):
continue
if url in url_list:
continue
url_list.append(url)
yield page
def search_pages(self, cr, uid, ids, needle=None, limit=None, context=None):
name = (needle or "").replace("/page/website.", "").replace("/page/", "")
res = []
for page in self.enumerate_pages(cr, uid, ids, query_string=name, context=context):
if needle in page['loc']:
res.append(page)
if len(res) == limit:
break
return res
def kanban(self, cr, uid, ids, model, domain, column, template, step=None, scope=None, orderby=None, context=None):
step = step and int(step) or 10
scope = scope and int(scope) or 5
orderby = orderby or "name"
get_args = dict(request.httprequest.args or {})
model_obj = self.pool[model]
relation = model_obj._columns.get(column)._obj
relation_obj = self.pool[relation]
get_args.setdefault('kanban', "")
kanban = get_args.pop('kanban')
kanban_url = "?%s&kanban=" % werkzeug.url_encode(get_args)
pages = {}
for col in kanban.split(","):
if col:
col = col.split("-")
pages[int(col[0])] = int(col[1])
objects = []
for group in model_obj.read_group(cr, uid, domain, ["id", column], groupby=column):
obj = {}
# browse column
relation_id = group[column][0]
obj['column_id'] = relation_obj.browse(cr, uid, relation_id)
obj['kanban_url'] = kanban_url
for k, v in pages.items():
if k != relation_id:
obj['kanban_url'] += "%s-%s" % (k, v)
# pager
number = model_obj.search(cr, uid, group['__domain'], count=True)
obj['page_count'] = int(math.ceil(float(number) / step))
obj['page'] = pages.get(relation_id) or 1
if obj['page'] > obj['page_count']:
obj['page'] = obj['page_count']
offset = (obj['page']-1) * step
obj['page_start'] = max(obj['page'] - int(math.floor((scope-1)/2)), 1)
obj['page_end'] = min(obj['page_start'] + (scope-1), obj['page_count'])
# view data
obj['domain'] = group['__domain']
obj['model'] = model
obj['step'] = step
obj['orderby'] = orderby
# browse objects
object_ids = model_obj.search(cr, uid, group['__domain'], limit=step, offset=offset, order=orderby)
obj['object_ids'] = model_obj.browse(cr, uid, object_ids)
objects.append(obj)
values = {
'objects': objects,
'range': range,
'template': template,
}
return request.website._render("website.kanban_contain", values)
def kanban_col(self, cr, uid, ids, model, domain, page, template, step, orderby, context=None):
html = ""
model_obj = self.pool[model]
domain = safe_eval(domain)
step = int(step)
offset = (int(page)-1) * step
object_ids = model_obj.search(cr, uid, domain, limit=step, offset=offset, order=orderby)
object_ids = model_obj.browse(cr, uid, object_ids)
for object_id in object_ids:
html += request.website._render(template, {'object_id': object_id})
return html
def _image_placeholder(self, response):
# file_open may return a StringIO. StringIO can be closed but are
# not context managers in Python 2 though that is fixed in 3
with contextlib.closing(openerp.tools.misc.file_open(
os.path.join('web', 'static', 'src', 'img', 'placeholder.png'),
mode='rb')) as f:
response.data = f.read()
return response.make_conditional(request.httprequest)
def _image(self, cr, uid, model, id, field, response, max_width=maxint, max_height=maxint, cache=None, context=None):
""" Fetches the requested field and ensures it does not go above
(max_width, max_height), resizing it if necessary.
Resizing is bypassed if the object provides a $field_big, which will
be interpreted as a pre-resized version of the base field.
If the record is not found or does not have the requested field,
returns a placeholder image via :meth:`~._image_placeholder`.
Sets and checks conditional response parameters:
* :mailheader:`ETag` is always set (and checked)
* :mailheader:`Last-Modified is set iif the record has a concurrency
field (``__last_update``)
The requested field is assumed to be base64-encoded image data in
all cases.
"""
Model = self.pool[model]
id = int(id)
ids = Model.search(cr, uid,
[('id', '=', id)], context=context)
if not ids and 'website_published' in Model._all_columns:
ids = Model.search(cr, openerp.SUPERUSER_ID,
[('id', '=', id), ('website_published', '=', True)], context=context)
if not ids:
return self._image_placeholder(response)
concurrency = '__last_update'
[record] = Model.read(cr, openerp.SUPERUSER_ID, [id],
[concurrency, field],
context=context)
if concurrency in record:
server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
try:
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format + '.%f')
except ValueError:
# just in case we have a timestamp without microseconds
response.last_modified = datetime.datetime.strptime(
record[concurrency], server_format)
# Field does not exist on model or field set to False
if not record.get(field):
# FIXME: maybe a field which does not exist should be a 404?
return self._image_placeholder(response)
response.set_etag(hashlib.sha1(record[field]).hexdigest())
response.make_conditional(request.httprequest)
if cache:
response.cache_control.max_age = cache
response.expires = int(time.time() + cache)
# conditional request match
if response.status_code == 304:
return response
data = record[field].decode('base64')
image = Image.open(cStringIO.StringIO(data))
response.mimetype = Image.MIME[image.format]
filename = '%s_%s.%s' % (model.replace('.', '_'), id, str(image.format).lower())
response.headers['Content-Disposition'] = 'inline; filename="%s"' % filename
if (not max_width) and (not max_height):
response.data = data
return response
w, h = image.size
max_w = int(max_width) if max_width else maxint
max_h = int(max_height) if max_height else maxint
if w < max_w and h < max_h:
response.data = data
else:
size = (max_w, max_h)
img = image_resize_and_sharpen(image, size)
image_save_for_web(img, response.stream, format=image.format)
# invalidate content-length computed by make_conditional as
# writing to response.stream does not do it (as of werkzeug 0.9.3)
del response.headers['Content-Length']
return response
def image_url(self, cr, uid, record, field, size=None, context=None):
"""Returns a local url that points to the image field of a given browse record."""
model = record._name
id = '%s_%s' % (record.id, hashlib.sha1(record.sudo().write_date).hexdigest()[0:7])
size = '' if size is None else '/%s' % size
return '/website/image/%s/%s/%s%s' % (model, id, field, size)
class website_menu(osv.osv):
_name = "website.menu"
_description = "Website Menu"
_columns = {
'name': fields.char('Menu', required=True, translate=True),
'url': fields.char('Url'),
'new_window': fields.boolean('New Window'),
'sequence': fields.integer('Sequence'),
# TODO: support multiwebsite once done for ir.ui.views
'website_id': fields.many2one('website', 'Website'),
'parent_id': fields.many2one('website.menu', 'Parent Menu', select=True, ondelete="cascade"),
'child_id': fields.one2many('website.menu', 'parent_id', string='Child Menus'),
'parent_left': fields.integer('Parent Left', select=True),
'parent_right': fields.integer('Parent Right', select=True),
}
def __defaults_sequence(self, cr, uid, context):
menu = self.search_read(cr, uid, [(1,"=",1)], ["sequence"], limit=1, order="sequence DESC", context=context)
return menu and menu[0]["sequence"] or 0
_defaults = {
'url': '',
'sequence': __defaults_sequence,
'new_window': False,
}
_parent_store = True
_parent_order = 'sequence'
_order = "sequence"
# would be better to take a menu_id as argument
def get_tree(self, cr, uid, website_id, context=None):
def make_tree(node):
menu_node = dict(
id=node.id,
name=node.name,
url=node.url,
new_window=node.new_window,
sequence=node.sequence,
parent_id=node.parent_id.id,
children=[],
)
for child in node.child_id:
menu_node['children'].append(make_tree(child))
return menu_node
menu = self.pool.get('website').browse(cr, uid, website_id, context=context).menu_id
return make_tree(menu)
def save(self, cr, uid, website_id, data, context=None):
def replace_id(old_id, new_id):
for menu in data['data']:
if menu['id'] == old_id:
menu['id'] = new_id
if menu['parent_id'] == old_id:
menu['parent_id'] = new_id
to_delete = data['to_delete']
if to_delete:
self.unlink(cr, uid, to_delete, context=context)
for menu in data['data']:
mid = menu['id']
if isinstance(mid, str):
new_id = self.create(cr, uid, {'name': menu['name']}, context=context)
replace_id(mid, new_id)
for menu in data['data']:
self.write(cr, uid, [menu['id']], menu, context=context)
return True
class ir_attachment(osv.osv):
_inherit = "ir.attachment"
def _website_url_get(self, cr, uid, ids, name, arg, context=None):
result = {}
for attach in self.browse(cr, uid, ids, context=context):
if attach.url:
result[attach.id] = attach.url
else:
result[attach.id] = self.pool['website'].image_url(cr, uid, attach, 'datas')
return result
def _datas_checksum(self, cr, uid, ids, name, arg, context=None):
return dict(
(attach['id'], self._compute_checksum(attach))
for attach in self.read(
cr, uid, ids, ['res_model', 'res_id', 'type', 'datas'],
context=context)
)
def _compute_checksum(self, attachment_dict):
if attachment_dict.get('res_model') == 'ir.ui.view'\
and not attachment_dict.get('res_id') and not attachment_dict.get('url')\
and attachment_dict.get('type', 'binary') == 'binary'\
and attachment_dict.get('datas'):
return hashlib.new('sha1', attachment_dict['datas']).hexdigest()
return None
def _datas_big(self, cr, uid, ids, name, arg, context=None):
result = dict.fromkeys(ids, False)
if context and context.get('bin_size'):
return result
for record in self.browse(cr, uid, ids, context=context):
if not record.datas: continue
try:
result[record.id] = openerp.tools.image_resize_image_big(record.datas)
except IOError: # apparently the error PIL.Image.open raises
pass
return result
_columns = {
'datas_checksum': fields.function(_datas_checksum, size=40,
string="Datas checksum", type='char', store=True, select=True),
'website_url': fields.function(_website_url_get, string="Attachment URL", type='char'),
'datas_big': fields.function (_datas_big, type='binary', store=True,
string="Resized file content"),
'mimetype': fields.char('Mime Type', readonly=True),
}
def _add_mimetype_if_needed(self, values):
if values.get('datas_fname'):
values['mimetype'] = mimetypes.guess_type(values.get('datas_fname'))[0] or 'application/octet-stream'
def create(self, cr, uid, values, context=None):
chk = self._compute_checksum(values)
if chk:
match = self.search(cr, uid, [('datas_checksum', '=', chk)], context=context)
if match:
return match[0]
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).create(
cr, uid, values, context=context)
def write(self, cr, uid, ids, values, context=None):
self._add_mimetype_if_needed(values)
return super(ir_attachment, self).write(cr, uid, ids, values, context=context)
def try_remove(self, cr, uid, ids, context=None):
""" Removes a web-based image attachment if it is used by no view
(template)
Returns a dict mapping attachments which would not be removed (if any)
mapped to the views preventing their removal
"""
Views = self.pool['ir.ui.view']
attachments_to_remove = []
# views blocking removal of the attachment
removal_blocked_by = {}
for attachment in self.browse(cr, uid, ids, context=context):
# in-document URLs are html-escaped, a straight search will not
# find them
url = escape(attachment.website_url)
ids = Views.search(cr, uid, ["|", ('arch', 'like', '"%s"' % url), ('arch', 'like', "'%s'" % url)], context=context)
if ids:
removal_blocked_by[attachment.id] = Views.read(
cr, uid, ids, ['name'], context=context)
else:
attachments_to_remove.append(attachment.id)
if attachments_to_remove:
self.unlink(cr, uid, attachments_to_remove, context=context)
return removal_blocked_by
class res_partner(osv.osv):
_inherit = "res.partner"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'center': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'size': "%sx%s" % (height, width),
'zoom': zoom,
'sensor': 'false',
}
return urlplus('//maps.googleapis.com/maps/api/staticmap' , params)
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, uid, ids[0], context=context)
params = {
'q': '%s, %s %s, %s' % (partner.street or '', partner.city or '', partner.zip or '', partner.country_id and partner.country_id.name_get()[0][1] or ''),
'z': 10
}
return urlplus('https://maps.google.com/maps' , params)
class res_company(osv.osv):
_inherit = "res.company"
def google_map_img(self, cr, uid, ids, zoom=8, width=298, height=298, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_img(zoom, width, height, context=context) or None
def google_map_link(self, cr, uid, ids, zoom=8, context=None):
partner = self.browse(cr, openerp.SUPERUSER_ID, ids[0], context=context).partner_id
return partner and partner.google_map_link(zoom, context=context) or None
class base_language_install(osv.osv_memory):
_inherit = "base.language.install"
_columns = {
'website_ids': fields.many2many('website', string='Websites to translate'),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
defaults = super(base_language_install, self).default_get(cr, uid, fields, context)
website_id = context.get('params', {}).get('website_id')
if website_id:
if 'website_ids' not in defaults:
defaults['website_ids'] = []
defaults['website_ids'].append(website_id)
return defaults
def lang_install(self, cr, uid, ids, context=None):
if context is None:
context = {}
action = super(base_language_install, self).lang_install(cr, uid, ids, context)
language_obj = self.browse(cr, uid, ids)[0]
website_ids = [website.id for website in language_obj['website_ids']]
lang_id = self.pool['res.lang'].search(cr, uid, [('code', '=', language_obj['lang'])])
if website_ids and lang_id:
data = {'language_ids': [(4, lang_id[0])]}
self.pool['website'].write(cr, uid, website_ids, data)
params = context.get('params', {})
if 'url_return' in params:
return {
'url': params['url_return'].replace('[lang]', language_obj['lang']),
'type': 'ir.actions.act_url',
'target': 'self'
}
return action
class website_seo_metadata(osv.Model):
_name = 'website.seo.metadata'
_description = 'SEO metadata'
_columns = {
'website_meta_title': fields.char("Website meta title", translate=True),
'website_meta_description': fields.text("Website meta description", translate=True),
'website_meta_keywords': fields.char("Website meta keywords", translate=True),
}
# vim:et:
| |
#
#Copyright (c) 2012-2021, NVIDIA CORPORATION.
#SPDX-License-Identifier: Apache-2.0
import copy
import json
import signal
import logging
import msgpack
import itertools
import ssbench
from ssbench.importer import random
from ssbench.ordered_dict import OrderedDict
class Scenario(object):
"""Encapsulation of a benchmark "CRUD" scenario."""
class StopGeneratingException(Exception):
pass
def __init__(self, scenario_filename=None, container_count=None,
user_count=None, operation_count=None, run_seconds=None,
block_size=None, _scenario_data=None,
version=ssbench.version, delete_after=None, policy=None):
"""Initializes the object from a scenario file on disk.
:scenario_filename: path to a scenario file
"""
self.version = version
if _scenario_data is not None:
# This is a "private" way to construct a Scenario object from the
# raw JSON without a file lying around.
self._scenario_data = _scenario_data
elif scenario_filename is not None:
try:
fp = open(scenario_filename)
self._scenario_data = json.load(fp)
except:
logging.exception('Error loading scenario file %r',
scenario_filename)
raise
else:
raise ValueError('Scenario() must get one of scenario_filename '
'or _scenario_data')
# Sanity-check user_count
if user_count is not None:
self.user_count = user_count
else:
self.user_count = self._scenario_data['user_count']
if self.user_count < 1:
raise ValueError('user_count must be >= 1')
# Command-line-specified values trump values in the scenario, and
# within each of those levels, run_seconds trumps operation_count.
if run_seconds is not None:
self.run_seconds = run_seconds
self.operation_count = None
elif operation_count is not None:
self.run_seconds = None
self.operation_count = operation_count
else:
self.run_seconds = self._scenario_data.get('run_seconds', None)
if self.run_seconds is None:
self.operation_count = self._scenario_data.get(
'operation_count', None)
else:
self.operation_count = None
if self.run_seconds is None and self.operation_count is None:
raise ValueError('A scenario requires run_seconds or '
'operation_count')
# storage policy to use for containers
if policy is not None:
self.policy = str(policy)
else:
self.policy = self._scenario_data.get('policy', None)
if self.policy is not None:
self.policy = str(self.policy)
self.block_size = block_size
self.name = self._scenario_data['name']
self.container_base = self._scenario_data.get('container_base',
'ssbench')
if container_count is not None:
self.container_count = container_count
else:
self.container_count = self._scenario_data.get(
'container_count', 100)
policy_name = 'default_policy' if self.policy is None else self.policy
self.containers = ['%s_%06d_%s' % (self.container_base, i, policy_name)
for i in xrange(self.container_count)]
self.container_concurrency = self._scenario_data.get(
'container_concurrency', 10)
# Set up sizes
self.sizes_by_name = OrderedDict()
for size_data in self._scenario_data['sizes']:
size_data_copy = copy.deepcopy(size_data)
self.sizes_by_name[size_data_copy['name']] = size_data_copy
crud_profile = size_data_copy.get(
'crud_profile', self._scenario_data['crud_profile'])
crud_total = sum(crud_profile)
size_data_copy['crud_pcts'] = [
float(c) / crud_total * 100 for c in crud_profile]
# Calculate probability thresholds for each CRUD element for this
# object size category (defaulting to global crud profile).
size_data_copy['crud_thresholds'] = [1, 1, 1, 1]
self._thresholds_for(size_data_copy['crud_thresholds'],
range(4), crud_profile)
# Calculate probability thresholds for each size (from the
# initial_files)
self.bench_size_thresholds = OrderedDict()
self._thresholds_for(
self.bench_size_thresholds,
filter(lambda n: n in self._scenario_data['initial_files'],
self.sizes_by_name.keys()),
self._scenario_data['initial_files'])
# Expiring time(sec) for create object.
if delete_after is not None:
self.delete_after = delete_after
else:
self.delete_after = self._scenario_data.get('delete_after')
def packb(self):
return msgpack.packb({
'_scenario_data': self._scenario_data,
'name': self.name,
'version': self.version,
'user_count': self.user_count,
'operation_count': self.operation_count,
'run_seconds': self.run_seconds,
'container_base': self.container_base,
'container_count': self.container_count,
'container_concurrency': self.container_concurrency,
'delete_after': self.delete_after,
})
@classmethod
def unpackb(cls, packed_or_unpacker):
if isinstance(packed_or_unpacker, msgpack.Unpacker):
data = packed_or_unpacker.next()
else:
data = msgpack.unpackb(packed_or_unpacker)
scenario = cls(container_count=data['container_count'],
user_count=data['user_count'],
operation_count=data['operation_count'],
run_seconds=data['run_seconds'],
version=data['version'],
_scenario_data=data['_scenario_data'],
delete_after=data.get('delete_after'))
return scenario
@property
def crud_pcts(self):
total = sum(self._scenario_data['crud_profile'])
return [float(c) / total * 100
for c in self._scenario_data['crud_profile']]
def _thresholds_for(self, target, indices, data):
initial_sum = sum(map(lambda i: data[i], indices))
last = 0
for idx in indices:
last = last + float(data[idx]) / initial_sum
target[idx] = last
def job(self, size_str, **kwargs):
job = {'size_str': size_str}
job.update(kwargs)
return job
def create_job(self, size_str, i, container=None, head_first=False):
"""
Creates job dict which will create an object.
"""
if container is None:
container = random.choice(self.containers)
return self.job(size_str,
type=ssbench.CREATE_OBJECT,
container=container,
name='%s_%06d' % (size_str, i),
size=random.randint(
self.sizes_by_name[size_str]['size_min'],
self.sizes_by_name[size_str]['size_max']),
block_size=self.block_size,
head_first=head_first,
delete_after=self.delete_after)
def bench_job(self, size_str, crud_index, i):
"""Creates a benchmark work job dict of a given size and crud "index"
(where 0 is Create, 1 is Read, etc.).
:size_str: One of the size strings defined in the scenario file
:crud_index: An index into the CRUD array (0 is Create, etc.)
:i: The job index
:returns: A dictionary representing benchmark work job
"""
if crud_index == 0:
return self.create_job(size_str, i)
elif crud_index == 1:
return self.job(size_str, type=ssbench.READ_OBJECT,
block_size=self.block_size)
elif crud_index == 2:
return self.job(
size_str, type=ssbench.UPDATE_OBJECT,
block_size=self.block_size,
size=random.randint(
self.sizes_by_name[size_str]['size_min'],
self.sizes_by_name[size_str]['size_max']))
elif crud_index == 3:
return self.job(size_str, type=ssbench.DELETE_OBJECT)
def initial_jobs(self):
"""
Generator for the worker jobs necessary to initialize the cluster
contents for the scenario.
:returns: A generator which yields job objects (dicts)
"""
count_by_size = copy.copy(self._scenario_data['initial_files'])
index_per_size = dict.fromkeys(count_by_size.iterkeys(), 1)
container_iter = itertools.cycle(self.containers)
yielded = True
while yielded:
yielded = False
for size_str in filter(
lambda n: n in self._scenario_data['initial_files'],
self.sizes_by_name.keys()):
if count_by_size[size_str]:
yield self.create_job(size_str, index_per_size[size_str],
container=container_iter.next(),
head_first=True)
count_by_size[size_str] -= 1
index_per_size[size_str] += 1
yielded = True
def bench_jobs(self):
"""
Generator for the worker jobs necessary to actually run the scenario.
If self.run_seconds is set, jobs will be for about that many seconds,
regardless of any value for self.operation_count.
If self.run_seconds is not set, exactly self.operation_count jobs will
be yielded.
:returns: A generator which yields job objects (dicts)
"""
max_index_size = max(self._scenario_data['initial_files'].itervalues())
keep_running = [True]
prev_alarm = None
if self.run_seconds:
def _stop_running(signal, frame):
signal = signal # appease the linter
frame = frame # appease the linter
keep_running[0] = False
prev_alarm = signal.signal(signal.SIGALRM, _stop_running)
signal.alarm(self.run_seconds)
index = max_index_size + 1
yielded = 0
while (self.run_seconds and keep_running[0]) or \
yielded < self.operation_count:
r = random.random() # uniform on [0, 1)
for size_str, prob in self.bench_size_thresholds.iteritems():
if r < prob:
this_size_str = size_str
break
# Determine which C/R/U/D type this job will be
size_crud = self.sizes_by_name[this_size_str]['crud_thresholds']
r = random.random() # uniform on [0, 1)
for crud_index, prob in enumerate(size_crud):
if r < prob:
this_crud_index = crud_index
break
yield self.bench_job(this_size_str, this_crud_index, index)
index += 1
yielded += 1
if prev_alarm:
# Deliberately avoiding the complexity of tyring to handle a
# pre-existing alarm timer value, since that shouldn't be
# necessary for all known applications of Scenario.
signal.signal(signal.SIGALRM, prev_alarm)
class ScenarioNoop(Scenario):
"""
A subclass of Scenario which just yields up NOP jobs.
"""
def job(self, size_str, **kwargs):
job = {
'size_str': size_str,
'noop': True,
}
job.update(kwargs)
return job
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 model for Keras.
Adapted from tf.keras.applications.resnet50.ResNet50().
This is ResNet model version 1.5.
Related papers/blogs:
- https://arxiv.org/abs/1512.03385
- https://arxiv.org/pdf/1603.05027v2.pdf
- http://torch.ch/blog/2016/02/04/resnets.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tf2_resnet import imagenet_preprocessing
from tensorflow.python.keras import backend
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers as tf_python_keras_layers
from tensorflow.python.keras import models
from tensorflow.python.keras import regularizers
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
FLAGS = flags.FLAGS
flags.DEFINE_float(
'weight_decay',
default=1e-4,
help=('Weight decay coefficiant for l2 regularization.'))
layers = tf_python_keras_layers
def change_keras_layer(use_tf_keras_layers=False):
"""Change layers to either tf.keras.layers or tf.python.keras.layers.
Layer version of tf.keras.layers is depends on tensorflow version, but
tf.python.keras.layers checks environment variable TF2_BEHAVIOR.
This function is a temporal function to use tf.keras.layers.
Currently, tf v2 batchnorm layer is slower than tf v1 batchnorm layer.
this function is useful for tracking benchmark result for each version.
This function will be removed when we use tf.keras.layers as default.
TODO(b/146939027): Remove this function when tf v2 batchnorm reaches training
speed parity with tf v1 batchnorm.
Args:
use_tf_keras_layers: whether to use tf.keras.layers.
"""
global layers
if use_tf_keras_layers:
layers = tf.keras.layers
else:
layers = tf_python_keras_layers
def _gen_l2_regularizer(use_l2_regularizer=True):
return regularizers.l2(FLAGS.weight_decay) if use_l2_regularizer else None
def identity_block(input_tensor,
kernel_size,
filters,
stage,
block,
use_l2_regularizer=True):
"""The identity block is the block that has no conv layer at shortcut.
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
x = layers.add([x, input_tensor])
x = layers.Activation('relu')(x)
return x
def conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(2, 2),
use_l2_regularizer=True):
"""A block that has a conv layer at shortcut.
Note that from stage 3,
the second conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
Args:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the second conv layer in the block.
use_l2_regularizer: whether to use L2 regularizer on Conv layer.
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if backend.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = layers.Conv2D(
filters1, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2a')(
input_tensor)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2a')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters2,
kernel_size,
strides=strides,
padding='same',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2b')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2b')(
x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(
filters3, (1, 1),
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '2c')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '2c')(
x)
shortcut = layers.Conv2D(
filters3, (1, 1),
strides=strides,
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name=conv_name_base + '1')(
input_tensor)
shortcut = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name=bn_name_base + '1')(
shortcut)
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def resnet50(num_classes,
batch_size=None,
use_l2_regularizer=True,
rescale_inputs=False):
"""Instantiates the ResNet50 architecture.
Args:
num_classes: `int` number of classes for image classification.
batch_size: Size of the batches for each step.
use_l2_regularizer: whether to use L2 regularizer on Conv/Dense layer.
rescale_inputs: whether to rescale inputs from 0 to 1.
Returns:
A Keras model instance.
"""
input_shape = (224, 224, 3)
img_input = layers.Input(shape=input_shape)
if rescale_inputs:
# Hub image modules expect inputs in the range [0, 1]. This rescales these
# inputs to the range expected by the trained model.
x = layers.Lambda(
lambda x: x * 255.0 - backend.constant(
imagenet_preprocessing.CHANNEL_MEANS,
shape=[1, 1, 3],
dtype=x.dtype),
name='rescale')(
img_input)
else:
x = img_input
if backend.image_data_format() == 'channels_first':
x = layers.Lambda(
lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
name='transpose')(x)
bn_axis = 1
else: # channels_last
bn_axis = 3
x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
x = layers.Conv2D(
64, (7, 7),
strides=(2, 2),
padding='valid',
use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='conv1')(
x)
x = layers.BatchNormalization(
axis=bn_axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
name='bn_conv1')(
x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
x = conv_block(
x,
3, [64, 64, 256],
stage=2,
block='a',
strides=(1, 1),
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [64, 64, 256],
stage=2,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [128, 128, 512],
stage=3,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [128, 128, 512],
stage=3,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [256, 256, 1024],
stage=4,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='c',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='d',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='e',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [256, 256, 1024],
stage=4,
block='f',
use_l2_regularizer=use_l2_regularizer)
x = conv_block(
x,
3, [512, 512, 2048],
stage=5,
block='a',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='b',
use_l2_regularizer=use_l2_regularizer)
x = identity_block(
x,
3, [512, 512, 2048],
stage=5,
block='c',
use_l2_regularizer=use_l2_regularizer)
rm_axes = [1, 2] if backend.image_data_format() == 'channels_last' else [2, 3]
x = layers.Lambda(lambda x: backend.mean(x, rm_axes), name='reduce_mean')(x)
x = layers.Dense(
num_classes,
kernel_initializer=initializers.RandomNormal(stddev=0.01),
kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
name='fc1000')(
x)
# A softmax that is followed by the model loss must be done cannot be done
# in float16 due to numeric issues. So we pass dtype=float32.
x = layers.Activation('softmax', dtype='float32')(x)
# Create model.
return models.Model(img_input, x, name='resnet50')
| |
import javax.management.remote.JMXConnector;
import javax.management.remote.JMXConnectorFactory;
import javax.management.remote.JMXServiceURL;
import javax.management.ObjectName;
import javax.management.MBeanInfo;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanOperationInfo;
import javax.management.MBeanParameterInfo;
import java.lang.management.ManagementFactory;
import javax.management.AttributeNotFoundException;
#Python Dependencies
import sys, cmd, optparse
#from urlparse import urljoin
from cmd import Cmd
class ConnectionError : pass
class DomainNotFoundError : pass
class DomainIsNoneError : pass
class ObjectNameNotFoundError : pass
class MBeanNotFoundError : pass
class MBeanAttributeNotFoundError : pass
class MBeanOperationNotFoundError : pass
class SetAttributeError : pass
class OperationNotFoundError : pass
class InvokeError : pass
class JmxClient :
host = None
port = None
domain = None
MBeansPath = None
remote = None
connector = None
def connect(self, h, p) :
if JmxClient.remote :
self.disconnect()
try :
serviceURL = str()
serviceURL = "service:jmx:rmi:///jndi/rmi://"
serviceURL = serviceURL + h + ":" + p + "/jmxrmi"
try :
url = javax.management.remote.JMXServiceURL(serviceURL)
JmxClient.connector = javax.management.remote.JMXConnectorFactory.connect(url)
JmxClient.remote = self.connector.getMBeanServerConnection()
except :
JmxClient.remote = None
JmxClient.connector = None
raise ConnectionError
finally :
if JmxClient.remote :
JmxClient.host = h
JmxClient.port = p
def disconnect(self) :
try :
if JmxClient.remote :
print "diconnect from " + JmxClient.host + ":" + JmxClient.port
try :
JmxClient.connector.close()
except :
pass
finally:
JmxClient.host = None
JmxClient.port = None
JmxClient.remote = None
JmxClient.connector = None
JmxClient.MBeansPath = None
JmxClient.domaisn = None
def domains(self) :
if JmxClient.remote :
domainList = []
domainList = JmxClient.remote.getDomains()
for element in domainList :
print element
else :
print "remote connection is None"
def setDomain(self, d) :
if JmxClient.remote :
if d == '' :
JmxClient.domain = None
JmxClient.MBeansPath = None
return
for element in JmxClient.remote.getDomains() :
if element == d :
JmxClient.domain = d
JmxClient.MBeansPath = []
return
raise DomainNotFoundError
def ls(self) :
if JmxClient.remote :
if JmxClient.domain :
objectName = JmxClient.domain + ":"
if len(JmxClient.MBeansPath) :
objectName = objectName + ','.join(JmxClient.MBeansPath)
objectName2 = objectName + ","
else :
objectName2 = objectName
pool = javax.management.ObjectName(objectName2 + "*")
paths = {}
print objectName
print "-----"
qNames = JmxClient.remote.queryNames(pool, None)
try :
for mbean in qNames :
p = mbean.toString().split(objectName2)[1].split(',')[0]
paths[p] = p
for p in paths :
print "M " + p
except IndexError :
pass
try :
mbean = JmxClient.remote.getMBeanInfo(javax.management.ObjectName(objectName))
for attr in mbean.getAttributes() :
try :
value = JmxClient.remote.getAttribute(javax.management.ObjectName(objectName), attr.getName())
valueStr = str(value)
except :
valueStr = "-- " + attr.getType() + " --"
if attr.isReadable() :
readable = "r"
else :
readable = "-"
if attr.isWritable() :
writable = "w"
else :
writable = "-"
print "A " + readable + writable + " " + attr.getName() + " : " + valueStr
except :
pass
try :
mbean = JmxClient.remote.getMBeanInfo(javax.management.ObjectName(objectName))
for ops in mbean.getOperations() :
params = []
for p in ops.getSignature() :
params.append(p.getType())
print "O " + ops.getReturnType() + " " + ops.getName() + " ( " + ",".join(params) + ")"
pass
except :
pass
else :
raise DomainIsNoneError
def cd(self, path) :
if JmxClient.remote :
if JmxClient.domain :
if path == ".." :
if len(JmxClient.MBeansPath) :
JmxClient.MBeansPath.pop()
else :
for p in path.split(',') :
JmxClient.MBeansPath.append(p)
def get(self, att) :
if JmxClient.remote :
if JmxClient.domain :
objectName = JmxClient.domain + ":"
if len(JmxClient.MBeansPath) :
objectName = objectName + ','.join(JmxClient.MBeansPath)
try :
mbean = JmxClient.remote.getMBeanInfo(javax.management.ObjectName(objectName))
except :
raise MBeanNotFoundError
attr = None
for a in mbean.getAttributes() :
if a.getName() == att :
attr = a
break
if not attr :
raise MBeanAttributeNotFoundError
try :
value = JmxClient.remote.getAttribute(javax.management.ObjectName(objectName), att)
valueStr = str(value)
except :
valueStr = "-- " + attr.getType() + " --"
if attr.isReadable() :
readable = "Y"
else :
readable = "N"
if attr.isWritable() :
writable = "Y"
else :
writable = "N"
print "ObjectName :" + objectName
print "Attribute :" + attr.getName()
print "Value :" + valueStr
print "isReadable : " + readable
print "isWritable : " + writable
def set(self, att, val) :
if JmxClient.remote :
if JmxClient.domain :
objectName = JmxClient.domain + ":"
if len(JmxClient.MBeansPath) :
objectName = objectName + ','.join(JmxClient.MBeansPath)
try :
mbean = JmxClient.remote.getMBeanInfo(javax.management.ObjectName(objectName))
except :
raise MBeanNotFoundError
attr = None
for a in mbean.getAttributes() :
if a.getName() == att :
attr = a
break
if not attr :
raise MBeanAttributeNotFoundError
if attr.isWritable() :
try :
a = javax.management.Attribute(att, val)
JmxClient.remote.setAttribute(javax.management.ObjectName(objectName), a)
except :
raise SetAttributeError
else :
raise SetAttributeError
def invoke(self, op, params) :
if JmxClient.remote :
if JmxClient.domain :
objectName = JmxClient.domain + ":"
if len(JmxClient.MBeansPath) :
objectName = objectName + ','.join(JmxClient.MBeansPath)
try :
mbean = JmxClient.remote.getMBeanInfo(javax.management.ObjectName(objectName))
except :
raise MBeanNotFoundError
ops = None
for o in mbean.getOperations() :
if o.getName() == op :
ops = o
break
if not ops :
raise OperationNotFoundError
sig = []
for s in ops.getSignature() :
sig.append(p.getType())
try :
JmxClient.remote.invoke(javax.management.ObjectName(objectName), op, params, sig)
except :
raise InvokeError
def pwd(self) :
name = ''
if JmxClient.domain :
name = JmxClient.domain + ":" + ",".join(JmxClient.MBeansPath)
return name
class JmxCmd(Cmd):
jmxClient = None
domain = ""
if len(sys.argv) > 1:
prompt = ''
else:
prompt = 'jmx> ' + domain
intro = "Simple Command-line JMX Client"
def do_connect(self, line):
"""connect <hostname or ip_addr> <port>
Establish a connection to the JMX Server. Uses jmxrmi protocol by default"""
JmxCmd.connectArgs = line.split(' ')
JmxCmd.jmxClient = JmxClient()
try :
if len(JmxCmd.connectArgs) < 2:
raise ConnectionError
JmxCmd.jmxClient.connect(JmxCmd.connectArgs[0], JmxCmd.connectArgs[1])
except ConnectionError :
JmxCmd.jmxClient = None
print "Error: failed to connect to '" + line + "'"
def do_disconnect(self, line):
"""Close the JMX Connection"""
try :
if JmxCmd.jmxClient :
JmxCmd.jmxClient.disconnect()
finally :
JmxCmd.jmxClient = None
prompt = 'jmx>'
def do_domains(self, line):
"""getDomains
Retrieve a list of all available JMX Domains"""
if JmxCmd.jmxClient :
JmxCmd.jmxClient.domains()
def do_domain(self, arg):
"""Set the current domain to perform operations on"""
if JmxCmd.jmxClient :
try :
JmxCmd.jmxClient.setDomain(arg)
print "set current doamin to " + arg
except DomainNotFoundError :
print "Error: Domain '" + arg + "' not found. "
def do_cd(self, line) :
"""Change the directory of the MBean path"""
try :
if JmxCmd.jmxClient :
JmxCmd.jmxClient.cd(line)
except ObjectNameNotFoundError :
print "Invalide path"
def do_ls(self, line) :
"""List the mbeans/values in the current path"""
try :
if JmxCmd.jmxClient :
JmxCmd.jmxClient.ls()
except DomainIsNoneError :
print "Domain is none"
def do_pwd(self,line) :
"""print the working directory of the mbean path"""
if JmxCmd.jmxClient :
print JmxCmd.jmxClient.pwd()
def do_get(self, line) :
"""Get the attribute of an mbean"""
if JmxCmd.jmxClient :
try :
JmxCmd.jmxClient.get(line)
except MBeanAttributeNotFoundError :
print "Error: Attribute '" + line + "' not found."
except MBeanNotFoundError :
print "Error: MBean '" + JmxCmd.jmxClient.pwd() + "'1 not found."
def do_set(self, line) :
"""Set an attribute of an mbean"""
if JmxCmd.jmxClient :
JmxCmd.setArgs = line.split(' ')
try :
if len(JmxCmd.setArgs) < 2:
raise SetAttributeError
JmxCmd.jmxClient.set(JmxCmd.setArgs[0], JmxCmd.setArgs[1])
except SetAttributeError :
print "Error: failed to set attrbute value."
def do_invoke(self, line) :
"""Invoke a remote method"""
if JmxCmd.jmxClient :
JmxCmd.invokeArgs = line.split(' ')
try :
JmxCmd.jmxClient.invoke(JmxCmd.invokeArgs[0], JmxCmd.invokeArgs[1:])
except :
print "Error: failed to invoke: " + line
def do_quit(self, arg):
"""Exit"""
if JmxCmd.jmxClient :
JmxCmd.jmxClient.disconnect()
print("bye.")
return True
default_to_shell = True
if __name__ == '__main__':
try :
if len(sys.argv) > 1:
try:
input = open(sys.argv[1], 'rt')
sys.stdin = input
jmxCmd = JmxCmd(input)
jmxCmd.cmdloop()
finally:
input.close()
else:
jmxCmd = JmxCmd()
jmxCmd.cmdloop()
finally :
if jmxCmd :
try :
jmxCmd.disconnect()
except :
pass
| |
# coding=utf-8
"""
Reproject :class:`xarray.DataArray` objects.
Makes assumptions on the data that it matches certain NetCDF-CF criteria
The CRS is stored as the 'spatial_ref' attribute of the 'crs' data variable
Spatial dimensions are either 'latitude' / 'longitude' or 'x' / 'y',
although this should probably instead check the 'standard_name' as defined by CF
"""
from __future__ import division, absolute_import, print_function
import copy
import numpy as np
import rasterio
import rasterio.warp
from rasterio.warp import RESAMPLING as Resampling
from rasterio import Affine
import xarray as xr
def reproject_like(src_data_array, like_data_array, resampling=Resampling.nearest):
"""
Reproject a DataArray object to match the resolution and projection of another DataArray.
Note: Only 2D arrays with dimensions named 'latitude'/'longitude' or 'x'/'y' are currently supported.
Requires an attr 'spatial_ref' to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
:param src_data_array: a `xarray.DataArray` that will be reprojected
:param like_data_array: a `xarray.DataArray` of the target resolution and projection
:return: a `xarray.DataArray` containing the data from the src_data_array, reprojected to match like_data_array
"""
src_crs = src_data_array.attrs['spatial_ref']
dest_crs = like_data_array.attrs['spatial_ref']
if 'latitude' in like_data_array.dims and 'longitude' in like_data_array.dims:
dest_x_dim = 'longitude'
dest_y_dim = 'latitude'
elif 'x' in like_data_array.dims and 'y' in like_data_array.dims:
dest_x_dim = 'x'
dest_y_dim = 'y'
else:
raise ValueError
src_width = like_data_array[dest_x_dim].size - 1
src_height = like_data_array[dest_y_dim].size - 1
src_left = float(like_data_array[dest_x_dim][0])
src_right = float(like_data_array[dest_x_dim][-1])
src_top = float(like_data_array[dest_y_dim][0])
src_bottom = float(like_data_array[dest_y_dim][-1])
dest_resolution_x = (src_right - src_left) / src_width
dest_resolution_y = (src_bottom - src_top) / src_height
dest_resolution = (dest_resolution_x + dest_resolution_y) / 2
return reproject(src_data_array, src_crs, dest_crs, dest_resolution, resampling=resampling)
def reproject(src_data_array, src_crs, dst_crs, resolution=None, resampling=Resampling.nearest,
set_nan=False, copy_attrs=True):
"""
Reproject :class:`xarray.DataArray` objects
Note: Only 2D arrays with dimensions named 'latitude'/'longitude' or 'x'/'y' are currently supported.
Requires an attr 'spatial_ref' to be set containing a valid CRS.
If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT.
:param src_data_array: `xarray.DataArray`
:param src_crs: EPSG code, OGC WKT string, etc
:param dst_crs: EPSG code, OGC WKT string, etc
:param resolution: Size of a destination pixel in destination projection units (eg degrees or metres)
:param resampling: Resampling method - see rasterio.warp.reproject for more details
Possible values are:
Resampling.nearest,
Resampling.bilinear,
Resampling.cubic,
Resampling.cubic_spline,
Resampling.lanczos,
Resampling.average,
Resampling.mode
:param set_nan: If nodata values from the source and any nodata areas created by the reproject should be set to NaN
Note: this causes the data type to be cast to float.
:param copy_attrs: Should the attributes be copied to the destination.
Note: No attempt is made to update spatial attributes, e.g. spatial_ref, bounds, etc
:return: A reprojected :class:`xarray.DataArray`
"""
#TODO: Support lazy loading of data with dask imperative function
src_data = np.copy(src_data_array.load().data)
src_affine = _make_src_affine(src_data_array)
dst_affine, dst_width, dst_height = _make_dst_affine(src_data_array, src_crs, dst_crs, resolution)
dst_data = np.zeros((dst_height, dst_width), dtype=src_data_array.dtype)
nodata = _get_nodata_value(src_data_array) or -999
with rasterio.drivers():
rasterio.warp.reproject(source=src_data,
destination=dst_data,
src_transform=src_affine,
src_crs=src_crs,
src_nodata=nodata,
dst_transform=dst_affine,
dst_crs=dst_crs,
dst_nodata=nodata,
resampling=resampling)
if set_nan:
dst_data = dst_data.astype(np.float)
dst_data[dst_data == nodata] = np.nan
return xr.DataArray(data=dst_data,
coords=_make_coords(src_data_array, dst_affine, dst_width, dst_height),
dims=copy.deepcopy(src_data_array.dims),
attrs=copy.deepcopy(src_data_array.attrs) if copy_attrs else None)
def append_solar_day(dataset, longitude=None):
"""
Append a ``solar_day`` data variable on the given dataset.
The resulting dataset could then have ``groupby`` operations performed on it, such as finding the max value for
each day::
dataset = api.get_dataset(...)
geo_xarray.append_solar_day(dataset)
solar_day_data = dataset.groupby('solar_day').max(dim='time')
:param dataset: An ``xarray.Dataset`` with a ``time`` dimension.
If a ``longitude`` parameter is not specified, the dataset must also contain a spatial dimensions (i.e. ``x, y`` or
``longitude, latitude``) and a ``crs`` variable with a ``spatial_ref`` attribute.
:param longitude: mean longitude of the dataset in WGS84
"""
if longitude is None:
longitude = _get_mean_longitude(dataset)
solar_days = np.array([_solar_day(dt, longitude) for dt in dataset.time.values]).astype('datetime64[D]')
dataset['solar_day'] = xr.DataArray(solar_days, coords={'time': dataset.time}, dims=['time'])
def _solar_day(utc, latitude):
seconds_per_degree = 240
offset_seconds = int(latitude * seconds_per_degree)
offset = np.timedelta64(offset_seconds, 's')
return utc + offset
def _get_mean_longitude(dataset):
x, y = _get_spatial_dims(dataset)
mean_lat = float(dataset[x][0] + dataset[x][-1])/2.
mean_lon = float(dataset[y][0] + dataset[y][-1])/2.
bounds = {'left': mean_lon, 'right': mean_lon, 'top': mean_lat, 'bottom': mean_lat}
left, bottom, right, top = rasterio.warp.transform_bounds(str(dataset.crs), 'EPSG:4326', **bounds)
return left
def _make_coords(src_data_array, dst_affine, dst_width, dst_height):
coords = copy.deepcopy(src_data_array.coords)
new_coords = _warp_spatial_coords(src_data_array, dst_affine, dst_width, dst_height)
coords.update(new_coords)
return coords
def _make_dst_affine(src_data_array, src_crs, dst_crs, dst_resolution=None):
src_bounds = _get_bounds(src_data_array)
src_width, src_height = _get_shape(src_data_array)
with rasterio.drivers():
dst_affine, dst_width, dst_height = rasterio.warp.calculate_default_transform(src_crs, dst_crs,
src_width, src_height,
*src_bounds,
resolution=dst_resolution)
return dst_affine, dst_width, dst_height
def _make_src_affine(src_data_array):
src_bounds = _get_bounds(src_data_array)
src_left, src_bottom, src_right, src_top = src_bounds
src_resolution_x, src_resolution_y = _get_resolution(src_data_array, as_tuple=True)
return Affine.translation(src_left, src_top) * Affine.scale(src_resolution_x, src_resolution_y)
def _get_spatial_dims(data_array):
if 'latitude' in data_array.dims and 'longitude' in data_array.dims:
x_dim = 'longitude'
y_dim = 'latitude'
elif 'x' in data_array.dims and 'y' in data_array.dims:
x_dim = 'x'
y_dim = 'y'
else:
raise KeyError
return x_dim, y_dim
def _get_bounds(data_array):
x_dim, y_dim = _get_spatial_dims(data_array)
left = float(data_array[x_dim][0])
right = float(data_array[x_dim][-1])
top = float(data_array[y_dim][0])
bottom = float(data_array[y_dim][-1])
return left, bottom, right, top
def _get_shape(data_array):
x_dim, y_dim = _get_spatial_dims(data_array)
return data_array[x_dim].size, data_array[y_dim].size
def _get_resolution(data_array, get_avg_res=True, as_tuple=False):
left, bottom, right, top = _get_bounds(data_array)
width, height = _get_shape(data_array)
resolution_x = (right - left) / (width - 1)
resolution_y = (bottom - top) / (height - 1)
if as_tuple:
resolution = (resolution_x, resolution_y)
elif get_avg_res:
resolution = (resolution_x + resolution_y) / 2
else:
assert resolution_x == resolution_y
resolution = resolution_x
return resolution
def _get_nodata_value(data_array):
nodata = (data_array.attrs.get('_FillValue') or
data_array.attrs.get('missing_value') or
data_array.attrs.get('fill_value'))
return nodata
def _warp_spatial_coords(data_array, affine, width, height):
ul = affine * (0, 0)
lr = affine * (width, height)
x_coords = np.linspace(ul[0], lr[0], num=width)
y_coords = np.linspace(ul[1], lr[1], num=height)
x_dim, y_dim = _get_spatial_dims(data_array)
coords = {
x_dim: x_coords,
y_dim: y_coords
}
return coords
| |
from totalimpactwebapp.snap import Snap
from totalimpactwebapp.product import Product
from totalimpactwebapp.profile import Profile
from totalimpactwebapp.profile import get_profile_summary_dict
from totalimpactwebapp.product import refresh_products_from_tiids
from totalimpactwebapp.pinboard import Pinboard
# from totalimpactwebapp.pinboard import auto_populate_pinboard
from totalimpactwebapp.reference_set import save_all_reference_set_lists
from totalimpactwebapp.reference_set import RefsetBuilder
from totalimpactwebapp.product_deets import populate_product_deets
from totalimpactwebapp.drip_email import log_drip_email
from totalimpactwebapp.tweeter import Tweeter
from totalimpactwebapp.tweeter import get_and_save_tweeter_followers
from totalimpact.providers.provider import ProviderFactory
from totalimpact import default_settings
from util import commit
from util import dict_from_dir
from core_tasks import provider_method_wrapper
from totalimpactwebapp import db
from totalimpactwebapp import ti_queues
from db_backup_to_s3 import upload_to_s3
import tasks
from sqlalchemy import and_, or_, func, between
import datetime
import os
import requests
import argparse
import logging
import time
import pickle
from collections import defaultdict, Counter, OrderedDict
import StringIO
import csv
import urllib
import hashlib
import json
# logger is set below, in main
"""
requires these env vars be set in this environment:
DATABASE_URL
"""
try:
# set jason's env variables for local running.
import config
config.set_env_vars_from_dot_env()
except ImportError:
pass
def page_query(q):
offset = 0
while True:
r = False
for elem in q.limit(50).offset(offset):
r = True
yield elem
offset += 50
if not r:
break
# from https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes/WindowedRangeQuery
# cited from many webpages as good way to do paging
def column_windows(session, column, windowsize):
"""Return a series of WHERE clauses against
a given column that break it into windows.
Result is an iterable of tuples, consisting of
((start, end), whereclause), where (start, end) are the ids.
Requires a database that supports window functions,
i.e. Postgresql, SQL Server, Oracle.
Enhance this yourself ! Add a "where" argument
so that windows of just a subset of rows can
be computed.
"""
def int_for_range(start_id, end_id):
if end_id:
return and_(
column>=start_id,
column<end_id
)
else:
return column>=start_id
q = session.query(
column,
func.row_number().\
over(order_by=column).\
label('rownum')
).\
from_self(column)
if windowsize > 1:
q = q.filter("rownum %% %d=1" % windowsize)
intervals = [id for id, in q]
while intervals:
start = intervals.pop(0)
if intervals:
end = intervals[0]
else:
end = None
yield int_for_range(start, end)
def windowed_query(q, column, windowsize, desc=False):
""""Break a Query into windows on a given column."""
for whereclause in column_windows(
q.session,
column, windowsize):
if desc:
for row in q.filter(whereclause).order_by(column.desc()):
yield row
else:
for row in q.filter(whereclause).order_by(column):
yield row
def csv_of_dict(mydicts):
(header, rows) = build_csv_rows_from_dict(mydicts)
mystream = StringIO.StringIO()
dw = csv.DictWriter(mystream, delimiter=',', dialect=csv.excel, fieldnames=header)
dw.writeheader()
for row in rows:
dw.writerow(row)
contents = mystream.getvalue()
mystream.close()
return contents
def build_csv_rows_from_dict(mydicts):
columns = []
for dictrow in mydicts:
columns += dictrow.keys()
columns = sorted(list(set(columns)))
# make header row
ordered_column_names = OrderedDict([(col, None) for col in columns])
# body rows
rows = []
for dictrow in mydicts:
ordered_contents = OrderedDict()
for col in ordered_column_names:
try:
ordered_contents[col] = unicode(dictrow[col]).encode("utf8")
except (AttributeError, KeyError):
ordered_contents[col] = u""
rows += [ordered_contents]
return(ordered_column_names, rows)
def profile_deets(url_slug=None,
min_url_slug=None,
start_days_ago=44,
end_days_ago=30):
start_date = datetime.datetime.utcnow() - datetime.timedelta(days=start_days_ago)
end_date = datetime.datetime.utcnow() - datetime.timedelta(days=end_days_ago)
q = db.session.query(Profile) \
.filter(Profile.created.between(start_date, end_date))
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
elif min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
profile_deets = []
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"profile_deets: {url_slug}".format(
url_slug=profile.url_slug))
profile_deets += [get_profile_summary_dict(profile)]
db.session.expunge(profile)
# print csv_of_dict(profile_deets)
# with open("profile_deets.pickle", "wb") as handle:
# pickle.dump(profile_deets, handle)
# print json.dumps(profile_deets, sort_keys=True, indent=4)
print "****"
csv_contents = csv_of_dict(profile_deets)
print csv_contents
import tempfile
temp_csv_file = tempfile.NamedTemporaryFile(delete=False)
temp_csv_file.write(csv_contents)
temp_csv_file.close()
upload_to_s3(temp_csv_file.name, "exploring/profile_deets.csv")
time.sleep(30)
def profile_deets_live(args):
url_slug = args.get("url_slug", None)
min_url_slug = args.get("min_url_slug", None)
q = profile_query(url_slug, min_url_slug)
number_considered = 0.0
start_time = datetime.datetime.utcnow()
profile_deets = []
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"profile_deets: {url_slug}".format(
url_slug=profile.url_slug))
profile_deets += [get_profile_summary_dict(profile)]
db.session.expunge(profile)
# print csv_of_dict(profile_deets)
# with open("profile_deets.pickle", "wb") as handle:
# pickle.dump(profile_deets, handle)
# print json.dumps(profile_deets, sort_keys=True, indent=4)
print "****"
csv_contents = csv_of_dict(profile_deets)
print csv_contents
import tempfile
temp_csv_file = tempfile.NamedTemporaryFile(delete=False)
temp_csv_file.write(csv_contents)
temp_csv_file.close()
upload_to_s3(temp_csv_file.name, "exploring/profile_deets.csv")
time.sleep(30)
def add_product_deets_for_everyone(url_slug=None, skip_until_url_slug=None):
if url_slug:
profile_iterator = [Profile.query.filter_by(url_slug=url_slug).first()]
else:
q = db.session.query(Profile)
profile_iterator = windowed_query(q, Profile.url_slug, 25)
# profile_iterator = page_query(Profile.query.order_by(Profile.url_slug.asc()))
run_id = datetime.datetime.utcnow().isoformat()
for profile in profile_iterator:
if skip_until_url_slug and skip_until_url_slug.lower() > profile.url_slug.lower():
logger.info(u"in add_product_deets_for_everyone and skipping {url_slug}".format(
url_slug=profile.url_slug))
continue
logger.info(u"add_product_deets_for_everyone: {url_slug}".format(
url_slug=profile.url_slug))
for product in profile.products_not_removed:
# logger.info(u"add_product_deets_for_everyone: {url_slug}, tiid={tiid}".format(
# url_slug=profile.url_slug, tiid=product.tiid))
product_deets = populate_product_deets(profile, product) # not delayed
product_deets.run_id = run_id
db.session.add(product_deets)
db.session.commit()
def dedup_everyone(url_slug=None, min_url_slug=None):
q = db.session.query(Profile)
if url_slug:
q = q.filter(Profile.url_slug==url_slug)
elif min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
if profile.is_live:
logger.info(u"dedup: {url_slug}".format(url_slug=profile.url_slug))
response = profile.remove_duplicates()
def mint_stripe_customers_for_all_profiles():
stripe.api_key = os.getenv("STRIPE_API_KEY")
for profile in page_query(Profile.query.order_by(Profile.email.asc())):
if profile.stripe_id:
print u"Already a Stripe customer for {email}; skipping".format(
email=profile.email
)
continue
full_name = u"{first} {last}".format(
first=profile.given_name,
last=profile.surname
)
if profile.is_advisor:
print u"making an Advisor Stripe customer for {email} ".format(email=profile.email)
stripe_customer = stripe.Customer.create(
description=full_name,
email=profile.email,
plan="base",
coupon="ADVISOR_96309"
)
else:
print u"making a regular Stripe customer for {email} ".format(email=profile.email)
stripe_customer = stripe.Customer.create(
description=full_name,
email=profile.email,
plan="base"
)
print u"Successfully made stripe id " + stripe_customer.id
profile.stripe_id = stripe_customer.id
db.session.merge(profile)
db.session.commit()
def write_500_random_profile_urls():
urls = []
sample_size = 500
for profile in page_query(Profile.query):
products_count = len(profile.tiids)
if products_count > 0:
url = "https://staging-impactstory.org/" + profile.url_slug
urls.append([products_count, url])
logger.info(u"getting a new profile url out: {url}".format(
url=url
))
sampled_urls = random.sample(urls, sample_size)
logger.info(u"writing our {sample_size} sampled profile URLs".format(
sample_size=sample_size
))
for row in sampled_urls:
try:
print "{products_count},{url}".format(
products_count=row[0],
url=row[1]
)
except UnicodeEncodeError:
pass # whatever, we don't need exactly 500
def email_report_to_live_profiles(url_slug=None, min_url_slug=None, max_emails=None):
number_emails_sent = 0
q = profile_query(url_slug, min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
# logger.debug(u"in email_report_to_live_profiles for {url_slug}".format(
# url_slug=profile.url_slug))
try:
if not profile.is_live:
pass
logger.info(u"not sending, profile is not live {url_slug}".format(url_slug=profile.url_slug))
elif not profile.email or (u"@" not in profile.email):
pass
logger.info(u"not sending, no email address for {url_slug}".format(url_slug=profile.url_slug))
elif profile.notification_email_frequency == "none":
pass
logger.info(u"not sending, {url_slug} is unsubscribed".format(url_slug=profile.url_slug))
elif profile.last_email_sent and ((datetime.datetime.utcnow() - profile.last_email_sent).days < 7):
pass
logger.info(u"not sending, {url_slug} already got email this week".format(url_slug=profile.url_slug))
else:
# logger.info(u"checking email for {url_slug}".format(url_slug=profile.url_slug))
# status = tasks.send_email_if_new_diffs.delay(profile.id)
status = tasks.send_email_if_new_diffs(profile)
if status=="email sent":
number_emails_sent += 1
if max_emails:
logger.info(u"sent an email, have {num} left before hitting max".format(
num = max_emails-number_emails_sent))
logger.info(u"checked email for {url_slug}, status={status}".format(
url_slug=profile.url_slug, status=status))
except Exception as e:
logger.warning(u"EXCEPTION in email_report_to_everyone_who_needs_one for {url_slug}, skipping to next profile. Error {e}".format(
url_slug=profile.url_slug, e=e))
pass
if max_emails and number_emails_sent >= max_emails:
logger.info(u"Reached max_number_profiles_to_consider, so no done queueing email")
break
return
def email_all_profiles_about_tng(url_slug=None, min_url_slug=None, max_emails=None):
number_emails_sent = 0
q = profile_query(url_slug, min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
logger.debug(u"in email_all_profiles_about_tng for {url_slug}".format(
url_slug=profile.url_slug))
try:
if not profile.email or (u"@" not in profile.email):
pass
logger.info(u"not sending, no email address for {url_slug}".format(url_slug=profile.url_slug))
else:
logger.info(u"emailing {url_slug} about tng".format(url_slug=profile.url_slug))
tasks.send_tng_email(profile)
except Exception as e:
logger.warning(u"EXCEPTION in email_all_profiles_about_tng for {url_slug}, skipping to next profile. Error {e}".format(
url_slug=profile.url_slug, e=e))
pass
return
def build_refsets(save_after_every_profile=False):
refset_builder = RefsetBuilder()
q = db.session.query(Profile)
for profile in windowed_query(q, Profile.url_slug, 25):
refset_builder.process_profile(profile)
if save_after_every_profile:
save_all_reference_set_lists(refset_builder)
save_all_reference_set_lists(refset_builder)
def collect_embed(url_slug=None, min_url_slug=None):
q = profile_query(url_slug, min_url_slug)
start_time = datetime.datetime.utcnow()
number_considered = 0.0
number_markups = 0.0
for profile in windowed_query(q, Profile.url_slug, 25):
logger.debug("-->collecting embed for {url_slug}".format(
url_slug=profile.url_slug))
for product in profile.display_products:
if not product.embed_markup:
number_considered += 1
try:
embed_markup = product.get_embed_markup()
except Exception:
print "got an exception, skipping", product.aliases.best_url
continue
if embed_markup:
print "GOT MARKUP for", product.tiid, product.host, product.aliases.best_url, embed_markup
# print " got an embed for", product.genre, "!"
product.embed_markup = embed_markup
db.session.add(product)
commit(db)
number_markups += 1
elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds
print "elapsed seconds=", elapsed_seconds, "; number per second=", number_considered/(0.1+elapsed_seconds)
def live_profile_query():
from totalimpactwebapp.profile import default_free_trial_days
min_created_date = datetime.datetime.utcnow() - datetime.timedelta(days=default_free_trial_days)
q = db.session.query(Profile).filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None, Profile.created>=min_created_date))
return q
def profile_query(url_slug=None, min_url_slug=None):
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
q = live_profile_query()
if min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
return q
def borked_pinboards_for_life_profiles(url_slug=None, min_url_slug=None):
problem_urls = []
blank_urls = []
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
q = live_profile_query()
if min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
all_profile_tiids = [product.tiid for product in profile.products] #includes removed
print profile.url_slug, profile.id
board = Pinboard.query.filter_by(profile_id=profile.id).first()
if board:
tiids = [tiid for (dummy, tiid) in board.contents["one"]]
if not tiids:
logger.debug("{url} has blank pinboard".format(
url=profile.url_slug))
blank_urls.append(profile.url_slug)
for tiid in tiids:
if tiid not in all_profile_tiids:
logger.debug("{url} does not own this pinboard {board}".format(
url=profile.url_slug, board=board))
problem_urls.append(profile.url_slug)
logger.debug("problem urls: {urls}".format(
urls=problem_urls))
logger.debug("blank urls: {urls}".format(
urls=blank_urls))
def new_metrics_for_live_profiles(url_slug=None, min_url_slug=None, start_days_ago=7):
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
min_created_date = datetime.datetime.utcnow() - datetime.timedelta(days=30)
q = db.session.query(Profile).filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None, Profile.created>=min_created_date))
if min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
# also, only if not refreshed recently
min_last_refreshed = datetime.datetime.utcnow() - datetime.timedelta(days=start_days_ago)
q = q.filter(Profile.last_refreshed <= min_last_refreshed)
start_time = datetime.datetime.utcnow()
number_profiles = 0.0
total_refreshes = 0
for profile in windowed_query(q, Profile.url_slug, 25):
number_profiles += 1
print profile.url_slug, profile.id, profile.last_refreshed, len(profile.display_products)
number_refreshes = len(profile.display_products)
if number_refreshes:
profile.refresh_products(source="scheduled")
total_refreshes += number_refreshes
pause_length = min(number_refreshes * 3, 120)
print "pausing", pause_length, "seconds after refreshing", number_refreshes, "products"
time.sleep(pause_length)
print total_refreshes, "total refreshes across", number_profiles, "profiles"
elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds
print "elapsed seconds=", elapsed_seconds, "; number profiles per second=", number_profiles/(0.1+elapsed_seconds)
from totalimpactwebapp.countries_info import country_iso_by_name
def update_mendeley_countries_for_live_profiles(url_slug=None, min_url_slug=None):
q = profile_query(url_slug, min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"{url_slug} processing mendeley countries".format(
url_slug=profile.url_slug))
for product in profile.display_products:
metric = product.get_metric_by_name("mendeley", "countries")
if metric:
snap = metric.most_recent_snap
if not snap.raw_value:
# logger.error(u"{url_slug} has NO SNAP for tiid {tiid}".format(
# url_slug=profile.url_slug, tiid=product.tiid))
# don't save this one to the db
continue
new_snap_value = {}
for country_name, country_count in snap.raw_value.iteritems():
if country_name in country_iso_by_name:
iso = country_iso_by_name[country_name]
new_snap_value[iso] = country_count
# logger.error(u"{country_name} -> {iso}".format(
# country_name=country_name, iso=iso))
else:
if len(country_name) != 2:
logger.error(u"Can't find country {country} in lookup".format(
country=country_name))
new_snap_value[country_name] = country_count
if new_snap_value:
logger.info(u"New snap value {snap}".format(
snap=new_snap_value))
snap.raw_value = new_snap_value
db.session.add(snap)
commit(db)
def collect_new_mendeley(url_slug=None, min_url_slug=None):
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
q = db.session.query(Profile).filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None))
if min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
start_time = datetime.datetime.utcnow()
number_profiles = 0.0
total_refreshes = 0
for profile in windowed_query(q, Profile.url_slug, 25):
number_profiles += 1
number_refreshes = 0.0
print profile.url_slug, profile.id
for product in profile.display_products:
if product.get_metric_by_name("mendeley", "readers"):
number_refreshes += 1
refresh_products_from_tiids(product.profile_id, [product.tiid], source="scheduled")
if number_refreshes:
total_refreshes += number_refreshes
pause_length = min(number_refreshes * 2, 60)
print "pausing", pause_length, "seconds after refreshing", number_refreshes, "products"
time.sleep(pause_length)
print total_refreshes, "total refreshes across", number_profiles, "profiles"
elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds
print "elapsed seconds=", elapsed_seconds, "; number profiles per second=", number_profiles/(0.1+elapsed_seconds)
def linked_accounts(account_type, url_slug=None, min_url_slug=None):
column_name = account_type+"_id"
if url_slug:
q = db.session.query(Profile).filter(getattr(Profile, column_name) != None).filter(Profile.url_slug==url_slug)
else:
if min_url_slug:
q = db.session.query(Profile).filter(getattr(Profile, column_name) != None).filter(Profile.url_slug>=min_url_slug)
else:
q = db.session.query(Profile).filter(getattr(Profile, column_name) != None)
number_considered = 0.0
for profile in windowed_query(q, Profile.url_slug, 25):
number_considered += 1
logger.info(u"{url_slug} previous number of account products: {num}".format(
url_slug=profile.url_slug, num=len(profile.account_products)))
existing_account_product_list = [p for p in profile.account_products if p.index_name==account_type]
if existing_account_product_list:
existing_account_product = existing_account_product_list[0]
if existing_account_product.followers:
logger.info(u"{url_slug} already has an account_product for {account_type}, so skipping".format(
url_slug=profile.url_slug, account_type=account_type))
else:
logger.info(u"{url_slug} already has an account_product for {account_type}, but no followers, so refreshing".format(
url_slug=profile.url_slug, account_type=account_type))
refresh_products_from_tiids(existing_account_product.profile_id, [existing_account_product.tiid], source="scheduled")
else:
logger.info(u"{url_slug} had no account_product for {account_type}, so adding".format(
url_slug=profile.url_slug, account_type=account_type))
tiids = profile.update_products_from_linked_account(account_type, update_even_removed_products=False)
if tiids:
logger.info(u"{url_slug} added {num} products for {account_type}".format(
url_slug=profile.url_slug, num=len(tiids), account_type=account_type))
tiids_that_need_twitter = """sl1uu922rwpl1htii64upwjs
zh61suvqesowwqi1qcn0v413
5jugcbn9anjkrb84y3zyqwix""".split()
def refresh_twitter(min_tiid=None):
if min_tiid:
q = db.session.query(Product).filter(Product.profile_id != None).filter(Product.tiid>min_tiid)
else:
q = db.session.query(Product).filter(Product.profile_id != None).filter(Product.tiid.in_(tiids_that_need_twitter))
start_time = datetime.datetime.utcnow()
number_considered = 0.0
number_refreshed = 0
for product in windowed_query(q, Product.tiid, 25):
number_considered += 1
try:
if product.biblio.repository=="Twitter" and len(product.metrics)==0:
print "refreshing", product.tiid, number_refreshed
refresh_products_from_tiids(product.profile_id, [product.tiid], source="scheduled")
number_refreshed += 1
if number_refreshed >= 15:
#api limit
print "refreshed 15, so breaking now"
break
except AttributeError:
pass
def refresh_tweeted_products(min_tiid=None):
if min_tiid:
q = db.session.query(Product).filter(Product.profile_id != None).filter(Product.tiid>min_tiid)
else:
q = db.session.query(Product).filter(Product.profile_id != None)
start_time = datetime.datetime.utcnow()
number_considered = 0.0
number_refreshed = 0
for product in windowed_query(q, Product.tiid, 25):
number_considered += 1
try:
if product.get_metric_by_name("altmetric_com", "tweets"):
print number_refreshed, ". refreshing: ", product.tiid
refresh_products_from_tiids(product.profile_id, [product.tiid], source="scheduled")
number_refreshed += 1
time.sleep(0.5)
elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds
print "elapsed seconds=", elapsed_seconds, "; number per second=", number_considered/(0.1+elapsed_seconds)
except AttributeError:
pass
def run_through_altmetric_tweets(url_slug=None, min_url_slug=None):
q = profile_query(url_slug, min_url_slug)
total_objects_saved = 0
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"{url_slug}".format(
url_slug=profile.url_slug))
new_objects = save_product_tweets_for_profile(profile)
total_objects_saved += len(new_objects)
print "total_objects_saved", total_objects_saved
def run_through_twitter_pages(url_slug=None, min_url_slug=None):
if url_slug:
q = db.session.query(Profile).filter(Profile.twitter_id != None).filter(Profile.url_slug==url_slug)
else:
q = db.session.query(Profile).filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None)).filter(Profile.twitter_id != None)
if min_url_slug:
q = q.filter(Profile.url_slug>=min_url_slug)
from totalimpactwebapp.tweet import save_recent_tweets
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"{url_slug} has twitter handle {twitter_handle}, now saving tweets".format(
url_slug=profile.url_slug, twitter_handle=profile.twitter_id))
save_recent_tweets(profile.id, profile.twitter_id)
# def star_best_products(args):
# url_slug = args.get("url_slug", None)
# min_url_slug = args.get("min_url_slug", None)
# q = profile_query(url_slug, min_url_slug)
# number_considered = 0.0
# start_time = datetime.datetime.utcnow()
# for profile in windowed_query(q, Profile.url_slug, 25):
# number_considered += 1
# board = Pinboard.query.filter_by(profile_id=profile.id).first()
# if board:
# # already has one! skip and keep going
# continue
# if not profile.products:
# # print "no products"
# continue
# logger.info(u"*******saved pinboard for {url_slug}".format(
# url_slug=profile.url_slug))
# contents = auto_populate_pinboard(profile)
# board = Pinboard(profile_id=profile.id, contents=contents)
# db.session.add(board)
# commit(db)
# elapsed_seconds = (datetime.datetime.utcnow() - start_time).seconds
# print "elapsed seconds=", elapsed_seconds, "; number per second=", number_considered/(0.1+elapsed_seconds)
def count_news_for_subscribers(url_slug=None, min_url_slug=None):
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
if min_url_slug:
q = db.session.query(Profile).filter(Profile.stripe_id!=None).filter(Profile.url_slug>=min_url_slug)
else:
q = db.session.query(Profile).filter(Profile.stripe_id!=None)
number_considered = 0.0
total_with_news = 0
total_number_of_products_with_news = defaultdict(int)
start_time = datetime.datetime.utcnow()
for profile in windowed_query(q, Profile.url_slug, 25):
if profile.is_paid_subscriber:
number_considered += 1
logger.info(u"count_news_for_subscribers: {url_slug}".format(
url_slug=profile.url_slug))
number_of_products_with_news = 0
for product in profile.products_not_removed:
metric = product.get_metric_by_name("altmetric_com", "news")
if metric:
number_of_products_with_news += 1
if number_of_products_with_news:
total_number_of_products_with_news[number_of_products_with_news] += 1
total_with_news += 1
logger.info(u"of {total} profiles, total_with_news:{total_with_news} ({percent}%)\ntotal_number_of_products_with_news:{total_number_of_products_with_news}".format(
total=number_considered,
total_with_news=total_with_news,
percent=100*total_with_news/number_considered,
total_number_of_products_with_news=total_number_of_products_with_news))
else:
logger.info(u"count_news_for_subscribers: not counting {url_slug} because not a subscriber".format(
url_slug=profile.url_slug))
def send_drip_emails(url_slug=None, min_url_slug=None):
MIN_AGE_DAYS_FOR_DRIP_EMAIL = 28
DRIP_MILESTONE = "last-chance"
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
drip_email_create_date = datetime.datetime.utcnow() - datetime.timedelta(days=MIN_AGE_DAYS_FOR_DRIP_EMAIL)
logger.info(u"in send_drip_emails with drip_email_create_date:{drip_email_create_date}".format(
drip_email_create_date=drip_email_create_date))
# only profiles that have null stripe ids and are at least drip email days old
q = db.session.query(Profile) \
.filter(Profile.stripe_id==None) \
.filter(Profile.created <= drip_email_create_date)
if min_url_slug:
q = q.filter(Profile.url_slug >= min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
# logger.info(u"in send_drip_emails with {url_slug}".format(
# url_slug=profile.url_slug))
if profile.is_trialing and not profile.received_drip_email(DRIP_MILESTONE):
logger.info(u"in send_drip_emails, sending email to: {url_slug}".format(
url_slug=profile.url_slug))
tasks.send_drip_email(profile, DRIP_MILESTONE)
drip_log = log_drip_email(profile, DRIP_MILESTONE)
logger.info(u"in send_drip_emails, SENT EMAIL to: {url_slug}".format(
url_slug=profile.url_slug))
else:
pass
# logger.info(u"in send_drip_emails, but NOT sending email to: {url_slug}".format(
# url_slug=profile.url_slug))
def ip_deets():
from totalimpactwebapp.interaction import Interaction
from totalimpactwebapp.interaction import get_ip_insights
q = db.session.query(Interaction)
cache = {}
for interaction in windowed_query(q, Interaction.ip, 25):
if interaction.country:
continue
if interaction.ip in cache:
interaction.country, interaction.user_type = cache[interaction.ip]
else:
insights = get_ip_insights(interaction.ip)
interaction.country = insights.country.iso_code
interaction.user_type = insights.traits.user_type
cache[interaction.ip] = interaction.country, interaction.user_type
print interaction.country, interaction.user_type
db.session.add(interaction)
commit(db)
def countries_for_all_profiles(url_slug=None, min_created_date=None):
if url_slug:
q = db.session.query(Profile).filter(Profile.url_slug==url_slug)
else:
q = db.session.query(Profile).filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None))
if min_created_date:
q = q.filter(Profile.created >= min_created_date)
countries = {}
start_time = datetime.datetime.utcnow()
number_profiles = 0.0
total_refreshes = 0
for profile in windowed_query(q, Profile.created, 25): # sort by created
if number_profiles > 500:
print "ok, got country data for 500 profiles. quitting."
return True
number_profiles += 1
number_refreshes = 0.0
print "getting countries for", profile.url_slug, profile.id, profile.created
for my_country in profile.countries:
pass
def refresh_tiid(tiid):
tiids = refresh_products_from_tiids(None, [tiid])
print tiids
return tiids
def update_twitter_followers(max_pages):
last_updated_days_ago = 1
min_last_updated_date = datetime.datetime.utcnow() - datetime.timedelta(days=last_updated_days_ago)
if not max_pages:
max_pages = 10
for i in range(max_pages):
q = Tweeter.query.filter(Tweeter.is_deleted==None) \
.filter(Tweeter.last_collected_date < min_last_updated_date) \
.order_by(Tweeter.last_collected_date.asc()) \
.limit(100)
tweeters = q.all()
if tweeters:
get_and_save_tweeter_followers(tweeters)
def update_tweet_text_for_live_profiles(url_slug=None, min_url_slug=None):
q = profile_query(url_slug, min_url_slug)
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"in update_tweet_text_for_live_profiles for {url_slug}".format(
url_slug=profile.url_slug))
profile.parse_and_save_tweets()
def update_this_profile(profile):
logger.info(u"**updating {url_slug: <16} is_live: {is_live}, next_refresh: {next_refresh}".format(
url_slug=profile.url_slug, is_live=profile.is_live, next_refresh=profile.next_refresh.isoformat()[0:10]))
try:
if profile.is_live:
number_products_before = len(profile.tiids)
added_tiids = profile.update_all_linked_accounts(add_even_if_removed=False)
number_products_after = number_products_before + len(added_tiids)
if len(added_tiids)==0:
logger.info(u" NO CHANGE on update for {url_slug}, {number_products_before} products".format(
number_products_before=number_products_before,
url_slug=profile.url_slug))
else:
logger.info(u" BEFORE={number_products_before}, AFTER={number_products_after}; {percent} for {url_slug}".format(
number_products_before=number_products_before,
number_products_after=number_products_after,
percent=100.0*(number_products_after-number_products_before)/number_products_before,
url_slug=profile.url_slug))
# refresh all profiles, live and not, after the update from linked accounts is done
profile.refresh_products("scheduled") # puts them on celery
except Exception as e:
logger.exception(e)
logger.debug(u"Exception in main loop on {url_slug}, so skipping".format(
url_slug=profile.url_slug))
def update_profiles(limit=5, url_slug=None):
# shouldn't need this, but hack to get around the fact that we limit profiles
# before knowing how many have too many products
# don't want to get too few profiles, then updates stall out
hack_fudge_factor_for_too_big_profiles = 25
if url_slug:
q = db.session.query(Profile.id).filter(Profile.url_slug==url_slug)
else:
q = db.session.query(Profile.id).filter(Profile.next_refresh <= datetime.datetime.utcnow())
q = q.limit(limit + hack_fudge_factor_for_too_big_profiles)
number_profiles = 0.0
for profile_id in q.all():
product_count = db.session.query(Product.tiid).filter(Product.profile_id==profile_id, Product.removed==None).count()
logger.info(u"profile {profile_id} has {product_count} products".format(
profile_id=profile_id, product_count=product_count))
if product_count > 500:
logger.warning(u"Too many products (n={product_count}) for profile {profile_id}, skipping update".format(
product_count=product_count, profile_id=profile_id))
else:
profile = Profile.query.get(profile_id)
if limit and number_profiles >= limit:
logger.info(u"updated all {limit} profiles, done for now.".format(
limit=limit))
return
update_this_profile(profile)
number_profiles += 1
def live_profile_emails(args):
url_slug = args.get("url_slug", None)
min_url_slug = args.get("min_url_slug", None)
q = profile_query(url_slug, min_url_slug)
number_profiles_updated = 0.0
for profile in windowed_query(q, Profile.url_slug, 25):
print profile.email
def update_all_live_profiles(args):
url_slug = args.get("url_slug", None)
min_url_slug = args.get("min_url_slug", None)
force_all = args.get("force_all", None)
q = profile_query(url_slug, min_url_slug)
if not force_all:
q = q.filter(Profile.next_refresh <= datetime.datetime.utcnow())
limit = args.get("limit", 5)
if url_slug:
limit = 1
number_profiles_updated = 0.0
for profile in windowed_query(q, Profile.next_refresh, 25, desc=False):
product_count = len(profile.products_not_removed)
logger.info(u"profile {url_slug} has {product_count} products".format(
url_slug=profile.url_slug, product_count=product_count))
if product_count > 500:
logger.warning(u"Too many products (n={product_count}) for profile {url_slug}, skipping update".format(
product_count=product_count, url_slug=profile.url_slug))
else:
update_this_profile(profile)
number_profiles_updated += 1
if limit and number_profiles_updated >= limit:
logger.info(u"updated all {limit} profiles, done for now.".format(
limit=limit))
return
pause_length = min(product_count * 3, 120)
print "pausing", pause_length, "seconds after refreshing", product_count, "products"
time.sleep(pause_length)
def say_hi(one, two, three):
print "hi!"
def rq_metrics_for_all_live_profiles(args):
url_slug = args.get("url_slug", None)
tiid = args.get("tiid", None)
no_rq = args.get("no_rq", False)
limit = args.get("limit", 5)
if url_slug:
limit = 1
queue_number = 0
q = db.session.query(Product.tiid).select_from(Profile)
q = q.filter(Product.removed == None)
q = q.join(Profile.products)
if url_slug:
q = q.filter(Profile.url_slug==url_slug)
elif tiid:
q = q.filter(Product.tiid==tiid)
else:
from totalimpactwebapp.profile import default_free_trial_days
min_created_date = datetime.datetime.utcnow() - datetime.timedelta(days=default_free_trial_days)
q = q.filter(or_(Profile.is_advisor!=None, Profile.stripe_id!=None, Profile.created>=min_created_date))
# q = q.filter(Profile.next_refresh <= datetime.datetime.utcnow())
q = q.order_by(Product.last_refresh_finished) # oldest first
q = q.limit(limit)
print "q=", q
all_metrics_provider_names = [p.provider_name for p in ProviderFactory.get_providers(default_settings.PROVIDERS, "metrics")]
for tiid in q.all():
print "tiid", tiid
for provider_name in all_metrics_provider_names:
print "putting {} on rq queue to run metrics through {}".format(
tiid, provider_name)
if no_rq:
print "asked for no-rq, so calling right now"
provider_method_wrapper(tiid, provider_name, "metrics")
else:
job = ti_queues[queue_number].enqueue_call(
func=provider_method_wrapper,
args=(tiid, provider_name, "metrics"),
timeout=60 * 10,
result_ttl=0 # number of seconds
)
job.save()
def debug_biblio_for_live_profiles(args):
url_slug = args.get("url_slug", None)
min_url_slug = args.get("min_url_slug", None)
q = profile_query(url_slug, min_url_slug)
from totalimpact.providers.bibtex import Bibtex
bibtex_provider = Bibtex()
from totalimpactwebapp.product import put_biblio_in_product
for profile in windowed_query(q, Profile.url_slug, 25):
logger.info(u"in debug_biblio_for_live_profiles for {url_slug}".format(
url_slug=profile.url_slug))
for product in profile.products_not_removed:
if product.biblio \
and hasattr(product.biblio, "journal") \
and "journal =" in product.biblio.journal \
and hasattr(product.biblio, "full_citation") \
and "journal" in product.biblio.full_citation:
print "got one:", product.tiid, product.biblio.full_citation
aliases = bibtex_provider.member_items(product.biblio.full_citation)
print aliases
for alias in aliases:
(ns, nid) = alias
if ns=="biblio":
product = put_biblio_in_product(product, nid, provider_name="bibtex")
print product.biblio
db.session.merge(product)
commit(db)
else:
pass
# print ".",
def main(function, args):
if function=="emailreports":
email_report_to_live_profiles(args["url_slug"], args["min_url_slug"], args["max_emails"])
elif function=="email_tng":
email_all_profiles_about_tng(args["url_slug"], args["min_url_slug"], args["max_emails"])
elif function=="dedup":
dedup_everyone(args["url_slug"], args["min_url_slug"])
elif function=="productdeets":
add_product_deets_for_everyone(args["url_slug"], args["skip_until_url_slug"])
elif function=="refsets":
build_refsets(args["save_after_every_profile"])
elif function=="embed":
collect_embed(args["url_slug"], args["min_url_slug"])
elif function=="linked_accounts":
linked_accounts(args["account_type"], args["url_slug"], args["min_url_slug"])
elif function=="refresh_tweeted_products":
refresh_tweeted_products(args["min_tiid"])
elif function=="run_through_twitter_pages":
run_through_twitter_pages(args["url_slug"], args["min_url_slug"])
elif function=="count_news":
count_news_for_subscribers(args["url_slug"], args["min_url_slug"])
elif function=="drip_email":
send_drip_emails(args["url_slug"], args["min_url_slug"])
elif function=="profile_deets":
profile_deets(args["url_slug"], args["min_url_slug"], args["start_days_ago"], args["end_days_ago"])
elif function=="new_mendeley":
collect_new_mendeley(args["url_slug"], args["min_url_slug"])
elif function=="ip_deets":
ip_deets()
elif function=="run_through_altmetric_tweets":
run_through_altmetric_tweets(args["url_slug"], args["min_url_slug"])
elif function=="new_metrics_for_live_profiles":
new_metrics_for_live_profiles(args["url_slug"], args["min_url_slug"], args["start_days_ago"])
elif function=="borked_pinboards_for_life_profiles":
borked_pinboards_for_life_profiles(args["url_slug"], args["min_url_slug"])
elif function=="update_mendeley_countries_for_live_profiles":
update_mendeley_countries_for_live_profiles(args["url_slug"], args["min_url_slug"])
elif function=="update_profiles":
update_profiles(args["limit"], args["url_slug"])
elif function=="refresh_tiid":
refresh_tiid(args["tiid"])
elif function=="update_twitter_followers":
update_twitter_followers(args["max_pages"])
elif function=="update_tweet_text_for_live_profiles":
update_tweet_text_for_live_profiles(args["url_slug"], args["min_url_slug"])
else:
# call function by its name in this module, with all args :)
# http://stackoverflow.com/a/4605/596939
globals()[function](args)
if __name__ == "__main__":
db.create_all()
# get args from the command line:
parser = argparse.ArgumentParser(description="Run stuff.")
parser.add_argument('function', type=str, help="one of emailreports, refsets, dedup, productdeets")
parser.add_argument('--url_slug', default=None, type=str, help="url slug")
parser.add_argument('--min_url_slug', default=None, type=str, help="min_url_slug")
parser.add_argument('--tiid', default=None, type=str, help="tiid")
parser.add_argument('--min_tiid', default=None, type=str, help="min_tiid")
parser.add_argument('--save_after_every_profile', action='store_true', help="use to debug refsets, saves refsets to db after every profile. slow.")
parser.add_argument('--max_emails', default=None, type=int, help="max number of emails to send")
parser.add_argument('--account_type', default=None, type=str, help="account_type")
parser.add_argument('--start_days_ago', type=int)
parser.add_argument('--end_days_ago', type=int)
parser.add_argument('--limit', type=int, default=5)
parser.add_argument('--max_pages', type=int)
parser.add_argument('--force_all', type=int)
parser.add_argument('--no-rq', action="store_true", help="do jobs in this thread")
args = vars(parser.parse_args())
function = args["function"]
arg_string = dict((k, v) for (k, v) in args.iteritems() if v and k!="function")
print u"daily.py {function} with {arg_string}".format(
function=function.upper(), arg_string=arg_string)
global logger
logger = logging.getLogger("ti.daily.{function}".format(
function=function))
main(function, args)
db.session.remove()
| |
# =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2019 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Asher Elmquist
# =============================================================================
#
# Chrono demonstration of a multiple sensors
# Creates a few bodies to be sensed
#
# =============================================================================
import pychrono.core as chrono
import pychrono.sensor as sens
import time
import math
import numpy as np
def main():
# -----------------
# Create the system
# -----------------
mphysicalSystem = chrono.ChSystemNSC()
mphysicalSystem.Set_G_acc(chrono.ChVectorD(0,0,-9.81))
# ----------------------------------------
# add a floor, box and sphere to the scene
# ----------------------------------------
phys_mat = chrono.ChMaterialSurfaceNSC()
phys_mat.SetFriction(0.5)
phys_mat.SetDampingF(0.00000)
phys_mat.SetCompliance (1e-9)
phys_mat.SetComplianceT(1e-9)
floor = chrono.ChBodyEasyBox(10,10,1,1000,True,True,phys_mat)
floor.SetPos(chrono.ChVectorD(0,0,-1))
floor.SetBodyFixed(True)
mphysicalSystem.Add(floor)
box = chrono.ChBodyEasyBox(1,1,1,1000,True,True,phys_mat)
box.SetPos(chrono.ChVectorD(0,0,5))
box.SetRot(chrono.Q_from_AngAxis(.2,chrono.ChVectorD(1,0,0)))
mphysicalSystem.Add(box)
sphere = chrono.ChBodyEasySphere(.5,1000,True,True,phys_mat)
sphere.SetPos(chrono.ChVectorD(0,0,8))
sphere.SetRot(chrono.Q_from_AngAxis(.2,chrono.ChVectorD(1,0,0)))
mphysicalSystem.Add(sphere)
sphere_asset = sphere.GetAssets()[0]
visual_asset = chrono.CastToChVisualization(sphere_asset)
vis_mat = chrono.ChVisualMaterial()
vis_mat.SetAmbientColor(chrono.ChVectorF(0, 0, 0))
vis_mat.SetDiffuseColor(chrono.ChVectorF(.2,.2,.9))
vis_mat.SetSpecularColor(chrono.ChVectorF(.9,.9,.9))
visual_asset.material_list.append(vis_mat)
# -----------------------
# Create a sensor manager
# -----------------------
manager = sens.ChSensorManager(mphysicalSystem)
manager.scene.AddPointLight(chrono.ChVectorF(100,100,100),chrono.ChVectorF(1,1,1),1000.0)
manager.scene.AddPointLight(chrono.ChVectorF(-100,-100,100),chrono.ChVectorF(1,1,1),1000.0)
# ------------------------------------------------
# Create a camera and add it to the sensor manager
# ------------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
cam = sens.ChCameraSensor(
floor, # body camera is attached to
cam_update_rate, # update rate in Hz
offset_pose, # offset pose
image_width, # number of horizontal samples
image_height, # number of vertical channels
cam_fov # vertical field of view
)
cam.SetName("Camera Sensor")
cam.SetLag(cam_lag)
cam.SetCollectionWindow(cam_collection_time)
# ------------------------------------------------------------------
# Create a filter graph for post-processing the data from the camera
# ------------------------------------------------------------------
# Visualizes the image
if vis:
cam.PushFilter(sens.ChFilterVisualize(image_width, image_height, "RGB Image"))
# Save the current image to a png file at the specified path
if (save):
cam.PushFilter(sens.ChFilterSave(out_dir + "/rgb/"))
# Provides the host access to this RGBA8 buffer
cam.PushFilter(sens.ChFilterRGBA8Access())
# Filter the sensor to grayscale
cam.PushFilter(sens.ChFilterGrayscale());
# Render the buffer again to see the new grayscaled image
if (vis):
cam.PushFilter(sens.ChFilterVisualize(int(image_width / 2), int(image_height / 2), "Grayscale Image"))
# Save the grayscaled image at the specified path
if (save):
cam.PushFilter(sens.ChFilterSave(out_dir + "/gray/"))
# Access the grayscaled buffer as R8 pixels
cam.PushFilter(sens.ChFilterR8Access())
# Add a camera to a sensor manager
manager.AddSensor(cam)
# ------------------------------------------------
# Create a lidar and add it to the sensor manager
# ------------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
lidar = sens.ChLidarSensor(
floor, # body lidar is attached to
lidar_update_rate, # scanning rate in Hz
offset_pose, # offset pose
horizontal_samples, # number of horizontal samples
vertical_samples, # number of vertical channels
horizontal_fov, # horizontal field of view
max_vert_angle,
min_vert_angle, # vertical field of view
100 #max lidar range
)
lidar.SetName("Lidar Sensor")
lidar.SetLag(lidar_lag)
lidar.SetCollectionWindow(lidar_collection_time)
# -----------------------------------------------------------------
# Create a filter graph for post-processing the data from the lidar
# -----------------------------------------------------------------
if vis:
# Randers the raw lidar data
lidar.PushFilter(sens.ChFilterVisualize(horizontal_samples, vertical_samples, "Raw Lidar Depth Data"))
# Provides the host access to the Depth,Intensity data
lidar.PushFilter(sens.ChFilterDIAccess())
# Convert Depth,Intensity data to XYZI point cloud data
lidar.PushFilter(sens.ChFilterPCfromDepth())
if vis:
# Visualize the point cloud
lidar.PushFilter(sens.ChFilterVisualizePointCloud(640, 480, 1.0, "Lidar Point Cloud"))
# Provides the host access to the XYZI data
lidar.PushFilter(sens.ChFilterXYZIAccess())
# Add the lidar to the sensor manager
manager.AddSensor(lidar)
# ----------------------------------------------
# Create an IMU sensor and add it to the manager
# ----------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
imu = sens.ChIMUSensor(box, # body imu is attached to
imu_update_rate, # update rate in Hz
offset_pose, # offset pose
imu_noise_none, # noise model
)
imu.SetName("IMU Sensor")
imu.SetLag(imu_lag)
imu.SetCollectionWindow(imu_collection_time)
# Provides the host access to the imu data
imu.PushFilter(sens.ChFilterIMUAccess())
# Add the imu to the sensor manager
manager.AddSensor(imu)
# ----------------------------------------------
# Create an GPS sensor and add it to the manager
# ----------------------------------------------
offset_pose = chrono.ChFrameD(chrono.ChVectorD(-8, 0, 1), chrono.Q_from_AngAxis(0, chrono.ChVectorD(0, 1, 0)))
gps = sens.ChGPSSensor(box, # body imu is attached to
gps_update_rate, # update rate in Hz
offset_pose, # offset pose
gps_reference,
gps_noise_none # noise model
)
gps.SetName("GPS Sensor")
gps.SetLag(gps_lag)
gps.SetCollectionWindow(gps_collection_time)
# Provides the host access to the gps data
gps.PushFilter(sens.ChFilterGPSAccess())
# Add the gps to the sensor manager
manager.AddSensor(gps)
# ---------------
# Simulate system
# ---------------
t1 = time.time()
ch_time = 0
while (ch_time < end_time):
# Access the sensor data
camera_data_RGBA8 = cam.GetMostRecentRGBA8Buffer()
camera_data_R8 = cam.GetMostRecentR8Buffer()
lidar_data_DI = lidar.GetMostRecentDIBuffer()
lidar_data_XYZI = lidar.GetMostRecentXYZIBuffer()
gps_data = gps.GetMostRecentGPSBuffer()
imu_data = imu.GetMostRecentIMUBuffer()
# Check data is present
# If so, print out the max value
if camera_data_RGBA8.HasData():
print("Camera RGBA8:",camera_data_RGBA8.GetRGBA8Data().shape,"max:",np.max(camera_data_RGBA8.GetRGBA8Data()))
if camera_data_R8.HasData():
print("Camera R8:",camera_data_R8.GetChar8Data().shape,"max:",np.max(camera_data_R8.GetChar8Data()))
if lidar_data_DI.HasData():
print("Lidar DI:",lidar_data_DI.GetDIData().shape,"max:",np.max(lidar_data_DI.GetDIData()))
if lidar_data_XYZI.HasData():
print("Lidar XYZI:",lidar_data_XYZI.GetXYZIData().shape,"max:",np.max(lidar_data_XYZI.GetXYZIData()))
if gps_data.HasData():
print("GPS:",gps_data.GetGPSData().shape,"max:",np.max(gps_data.GetGPSData()))
if imu_data.HasData():
print("IMU:",imu_data.GetIMUData().shape,"max:",np.max(imu_data.GetIMUData()))
# Update sensor manager
# Will render/save/filter automatically
manager.Update()
# Perform step of dynamics
mphysicalSystem.DoStepDynamics(step_size)
# Get the current time of the simulation
ch_time = mphysicalSystem.GetChTime()
print("Sim time:",end_time,"Wall time:",time.time()-t1)
# -----------------
# Sensor parameters
# -----------------
# Update rate of each sensor in Hz
cam_update_rate = 5
lidar_update_rate = 5
imu_update_rate = 200
gps_update_rate = 2
# Image width and height
image_width = 1280
image_height = 720
# Camera's horizontal field of view
cam_fov = 1.408
# Lidar horizontal and vertical samples
horizontal_samples = 4500
vertical_samples = 32
# Lidar horizontal and vertical field of view (radians)
horizontal_fov = 2 * chrono.CH_C_PI # 360 degrees
max_vert_angle = chrono.CH_C_PI / 12.
min_vert_angle = -chrono.CH_C_PI / 6.
# Lag time for each sensor
cam_lag = 0
lidar_lag = 0
imu_lag = 0
gps_lag = 0
# Collection window for each sensor
# Typically 1 / update rate
cam_collection_time = 1. / float(cam_update_rate)
lidar_collection_time = 1. / float(lidar_update_rate)
imu_collection_time = 0 # instant
gps_collection_time = 0 # instant
# GPS reference point
# Located in Madison, WI
gps_reference = chrono.ChVectorD(-89.400, 43.070, 260.0)
# IMU and GPS noise models
# Setting to none (does not affect the data)
imu_noise_none = sens.ChIMUNoiseNone()
gps_noise_none = sens.ChGPSNoiseNone()
# ---------------------
# Simulation parameters
# ---------------------
# Simulation step size
step_size = 1e-3
# Simulation end time
end_time = 20.0
# Save camera images
save = False
# Render camera images
vis = True
# Output directory
out_dir = "SENSOR_OUTPUT/SENSORS_PY"
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
# chrono.SetChronoDataPath('path/to/data')
main()
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
import django.test
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
JSON_URL = reverse('horizon:project:network_topology:json')
INDEX_URL = reverse('horizon:project:network_topology:index')
class NetworkTopologyTests(test.TestCase):
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'network_list',
'router_list',
'port_list')})
def test_json_view(self):
self._test_json_view()
@django.test.utils.override_settings(
OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
@test.create_stubs({api.nova: ('server_list',),
api.neutron: ('network_list_for_tenant',
'port_list')})
def test_json_view_router_disabled(self):
self._test_json_view(router_enable=False)
def _test_json_view(self, router_enable=True):
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([self.servers.list(), False])
tenant_networks = [net for net in self.networks.list()
if not net['router:external']]
external_networks = [net for net in self.networks.list()
if net['router:external']]
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest),
self.tenant.id).AndReturn(tenant_networks)
if router_enable:
api.neutron.network_list(
IsA(http.HttpRequest),
**{'router:external': True}).AndReturn(external_networks)
# router1 : gateway port not in the port list
# router2 : no gateway port
# router3 : gateway port included in port list
routers = self.routers.list() + self.routers_with_rules.list()
if router_enable:
api.neutron.router_list(
IsA(http.HttpRequest),
tenant_id=self.tenant.id).AndReturn(routers)
api.neutron.port_list(
IsA(http.HttpRequest)).AndReturn(self.ports.list())
self.mox.ReplayAll()
res = self.client.get(JSON_URL)
self.assertEqual('text/json', res['Content-Type'])
data = json.loads(res.content)
# servers
# result_server_urls = [(server['id'], server['url'])
# for server in data['servers']]
expect_server_urls = [
{'id': server.id,
'name': server.name,
'status': server.status,
'task': None,
'console': 'vnc',
'url': '/project/instances/%s/' % server.id}
for server in self.servers.list()]
self.assertEqual(expect_server_urls, data['servers'])
# rotuers
# result_router_urls = [(router['id'], router['url'])
# for router in data['routers']]
if router_enable:
expect_router_urls = [
{'id': router.id,
'external_gateway_info':
router.external_gateway_info,
'name': router.name,
'status': router.status,
'url': '/project/routers/%s/' % router.id}
for router in routers]
self.assertEqual(expect_router_urls, data['routers'])
else:
self.assertFalse(data['routers'])
# networks
expect_net_urls = []
if router_enable:
expect_net_urls += [{'id': net.id,
'url': None,
'name': net.name,
'router:external': net.router__external,
'subnets': [{'cidr': subnet.cidr}
for subnet in net.subnets]}
for net in external_networks]
expect_net_urls += [{'id': net.id,
'url': '/project/networks/%s/detail' % net.id,
'name': net.name,
'router:external': net.router__external,
'subnets': [{'cidr': subnet.cidr}
for subnet in net.subnets]}
for net in tenant_networks]
for exp_net in expect_net_urls:
if exp_net['url'] is None:
del exp_net['url']
self.assertEqual(expect_net_urls, data['networks'])
# ports
expect_port_urls = [
{'id': port.id,
'device_id': port.device_id,
'device_owner': port.device_owner,
'fixed_ips': port.fixed_ips,
'network_id': port.network_id,
'status': port.status,
'url': '/project/networks/ports/%s/detail' % port.id}
for port in self.ports.list()]
if router_enable:
# fake port for router1 gateway (router1 on ext_net)
router1 = routers[0]
ext_net = external_networks[0]
expect_port_urls.append(
{'id': 'gateway%s' % ext_net.id,
'device_id': router1.id,
'network_id': ext_net.id,
'fixed_ips': []})
self.assertEqual(expect_port_urls, data['ports'])
class NetworkTopologyCreateTests(test.TestCase):
def _test_new_button_disabled_when_quota_exceeded(
self, expected_string, networks_quota=10,
routers_quota=10, instances_quota=10):
quota_data = self.quota_usages.first()
quota_data['networks']['available'] = networks_quota
quota_data['routers']['available'] = routers_quota
quota_data['instances']['available'] = instances_quota
quotas.tenant_quota_usages(
IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(quota_data)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/network_topology/index.html')
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_network_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createnetwork')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Network (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='networks__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, networks_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_create_router_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:createrouter')
classes = 'btn btn-default btn-sm ajax-modal'
link_name = "Create Router (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='Routers__action_create'>" \
"<span class='fa fa-plus'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, routers_quota=0)
@test.create_stubs({quotas: ('tenant_quota_usages',)})
def test_launch_instance_button_disabled_when_quota_exceeded(self):
url = reverse('horizon:project:network_topology:launchinstance')
classes = 'btn btn-default btn-sm btn-launch ajax-modal'
link_name = "Launch Instance (Quota exceeded)"
expected_string = "<a href='%s' class='%s disabled' "\
"id='instances__action_launch'>" \
"<span class='fa fa-cloud-upload'></span>%s</a>" \
% (url, classes, link_name)
self._test_new_button_disabled_when_quota_exceeded(
expected_string, instances_quota=0)
| |
import unittest
from urllib import quote_plus as enc
from merlin import Merlin
from merlin.error import ValidationError
from merlin.facet import Facet as F
from merlin.filter import Field, NF
from merlin.sort import Sort as S
from merlin.search import Search
from merlin.geo import Geo
from merlin.group import Group
class MerlinTest(unittest.TestCase):
def test_simple_search(self):
s = Search(q="shirt")
self.assertEquals(s.build(), "products/search?q=shirt")
def test_simple_search_1(self):
s = Search(q="", num=10)
self.assertEquals(s.build(), "products/search?q=&num=10")
def test_simple_search_2(self):
s = Search(q="shirt", num=10, start=20)
self.assertEquals(s.build(), "products/search?q=shirt&start=20&num=10")
def test_enum_facet(self):
s = Search(
q = "shirt",
facets = F.enum("brand", num=10)
)
self.assertEquals(s.build(),
"products/search?q=shirt&facet=" + enc("field=brand/type=enum/num=10")
)
def test_enum_facet_named(self):
s = Search(
q = "shirt",
facets = F.enum("brand", num=10, key='ponies')
)
self.assertEquals(s.build(),
"products/search?q=shirt&facet=" + enc("field=brand/type=enum/key=ponies/num=10")
)
def test_enum_facet_excluding(self):
s = Search(
q = "shirt",
facets = F.enum("brand", num=10, key='ponies', exclude=['foo', 'bar'])
)
self.assertEquals(s.build(),
"products/search?q=shirt&facet=" +
enc("field=brand/type=enum/key=ponies/num=10/ex=foo,bar")
)
def test_hist_facet(self):
s = Search(
q = "shirt",
facets = F.hist("price", start=10, end=100, gap=5, key='prices')
)
self.assertEquals(s.build(),
"products/search?q=shirt&facet=" +
enc("field=price/type=hist/key=prices/range=[10:100:5]")
)
def test_range_facet(self):
s = Search(
q = "shirt",
facets = F.range("price", key='prices')
)
self.assertEquals(s.build(),
"products/search?q=shirt&facet=" +
enc("field=price/type=range/key=prices")
)
def test_multiple_facets(self):
s = Search(
q = "shirt",
facets = [
F.enum('brand', num=10, key='top_brands'),
F.hist('price', start=0, end=100, gap=10)
]
)
self.assertEquals(s.build(),
"products/search?q=shirt" +
'&facet=' + enc("field=brand/type=enum/key=top_brands/num=10") +
'&facet=' + enc("field=price/type=hist/range=[0:100:10]")
)
def test_sorting(self):
s = Search(
q = "pants",
sort = [
S.desc('brand'),
S.asc('price')
]
)
self.assertEquals(s.build(),
"products/search?q=pants" +
'&sort=' + enc("brand:desc,price:asc")
)
def test_fields(self):
s = Search(
q = "socks",
fields=["one", "two", "three"]
)
self.assertEquals(s.build(),
"products/search?q=socks" +
'&fields=' + enc("one,two,three")
)
def test_filters(self):
s = Search(
filter=NF.cnf(
(Field('Color') == 'Red') & (Field('Color') != 'Blue')
)
)
self.assertEquals(s.build(),
"products/search?q=" +
'&filter=' + enc(r"exp=Color:Red,Color:!Blue/type=cnf")
)
def test_filter_tags(self):
s = Search(
filter=NF.cnf(
(Field('Color') == 'Red') & (Field('Color') != 'Blue'),
tag="redandblue"
)
)
self.assertEquals(s.build(),
"products/search?q=" +
'&filter=' + enc(r"exp=Color:Red,Color:!Blue/type=cnf/tag=redandblue")
)
def test_multi_filters(self):
s = Search(
filter=[
NF.cnf(
(Field('Color') == 'Red') & (Field('Color') != 'Blue')
),
NF.dnf(
Field('Price').between(0, 100)
)
]
)
self.assertEquals(s.build(),
"products/search?q=" +
'&filter=' + enc(r"exp=Color:Red,Color:!Blue/type=cnf") +
'&filter=' + enc(r"exp=Price:[0:100]/type=dnf")
)
def test_multi_values(self):
s = Search(
filter=NF.cnf(
(Field('Color') == ('Blue', 'Red')) & \
(Field('Color') != ('Teal', 'Green'))
)
)
self.assertEquals(s.build(),
"products/search?q=" +
"&filter=" + enc(r"exp=Color:Blue|Color:Red,Color:!Teal,Color:!Green/type=cnf")
)
def test_single_filter(self):
s = Search(
q='hoodie',
filter=NF.cnf(Field('price') <= 20)
)
self.assertEquals(s.build(),
"products/search?q=hoodie" +
'&filter=' + enc(r"exp=price:[:20]/type=cnf")
)
def test_lt_gt_facet(self):
s = Search(
q='hoodie',
filter=NF.cnf(
(Field('price') < 20) & (Field('age') > 10)
)
)
self.assertEquals(s.build(),
"products/search?q=hoodie" +
'&filter=' + enc(r"exp=price:[:20),age:(10:]/type=cnf")
)
def test_group(self):
s = Search(
q='hoodie',
group=Group(field='category', num=10, sort=S.asc('price'))
)
self.assertEquals(s.build(),
"products/search?q=hoodie" +
'&group=' + enc(r"field=category/sort=price:asc/num=10")
)
def test_geo(self):
s = Search(
q='hoodie',
geo=Geo(field='geo', pt=(37.774929, -122.419416), dist=35)
)
self.assertEquals(s.build(),
"products/search?q=hoodie" +
'&geo=' + enc(r"field=geo/pt=(37.774929,-122.419416)/d=35.000")
)
def test_mode(self):
make_s = lambda m: Search(q='hoodie', mode=m)
for m in ('semantic', 'keyword'):
self.assertEquals(make_s(m).build(),
"products/search?q=hoodie" +
'&mode=' + enc(m)
)
with self.assertRaises(ValidationError):
make_s('foo').build()
self.fail("Should have failed!")
def test_needs_num(self):
with self.assertRaises(AssertionError):
Field('price') <= '10'
with self.assertRaises(AssertionError):
Field('price').between('a', 10)
def test_proper_fieldnames(self):
with self.assertRaises(AssertionError):
Field('')
with self.assertRaises(AssertionError):
Field(123)
class EngineTest(unittest.TestCase):
def setUp(self):
self.engine = Merlin('blackbird', 'dev', 'agathon')
def test_hosts(self):
engine = Merlin('blackbird', 'dev', 'agathon')
self.assertEquals(engine.host, 'search-dev.search.blackbird.am')
engine = Merlin('blackbird', 'staging', 'agathon')
self.assertEquals(engine.host, 'search-staging.search.blackbird.am')
engine = Merlin('blackbird', 'prod', 'agathon')
self.assertEquals(engine.host, 'search-prod.search.blackbird.am')
def test_simple_q(self):
s = Search(q='dress')
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 1)
self.assertEquals(r.hits[0]['id'], '111f49eacc7dbc9ab2df53f8ce52ec64')
def test_simple_q_fields(self):
s = Search(q='dress', fields=['images'])
with self.engine(s) as r:
keys_found = set()
for h in r.hits:
keys_found.update(h.keys())
self.assertEquals(len(keys_found), 1)
self.assert_('images' in keys_found,
"field 'images' not in returned results")
def test_price_filter(self):
s = Search(q='',
filter=NF.cnf(Field('price') > 150),
fields=['price']
)
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 1)
self.assertEquals(r.hits[0]['price'], '178.0 USD')
def test_sort(self):
s = Search(q='',
sort = S.asc('price'),
fields= ['price']
)
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 5)
self.assertEquals(r.hits[0]['price'], '59.0 USD')
self.assertEquals(r.hits[-1]['price'], '178.0 USD')
def test_or_search(self):
s = Search(q='',
filter=NF.cnf(
Field('colors') == ('Red', 'Blue')
)
)
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 3)
for h in r.hits:
self.assertIn(h['colors'][0], set(['Red', 'Blue', 'red']))
def test_and_search(self):
s = Search(q='',
filter=NF.cnf(
(Field('colors') == 'Red') & (Field('price') < 178)
)
)
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 1)
self.assertEquals(r.hits[0]['brand'], 'Raoul')
s = Search(q='',
filter=NF.cnf(
(Field('colors') == 'Red') & (Field('price') <= 178)
)
)
with self.engine(s) as r:
self.assertEquals(r.hits.numFound, 2)
def test_error(self):
s = Search(fields=['ponies'])
with self.assertRaises(IOError):
with self.engine(s) as r:
self.fail("should never get here")
def test_hist_facet(self):
s = Search(
facets=F.hist('price', start=0, end=100, gap=50)
)
with self.engine(s) as r:
res = set(r.facets.histograms['price'].items())
wanted = set([(('0.0', '50.0'), 0), (('50.0', '100.0'), 4)])
self.assertEquals(res, wanted)
if __name__ == '__main__':
unittest.main()
| |
import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignoring the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# #14971: Make sure the dotted name resolution works even if the actual
# function doesn't have the same name as is used to find it.
def test_loadTestsFromName__function_with_different_name_than_method(self):
# lambdas have the name '<lambda>'.
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
test = lambda: 1
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertIs(loader.suiteClass, unittest.TestSuite)
| |
# Copyright 2009 by Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Command line wrapper for the multiple alignment programme MAFFT.
"""
from __future__ import print_function
import os
from Bio.Application import _Option, _Switch, _Argument, AbstractCommandline
class MafftCommandline(AbstractCommandline):
"""Command line wrapper for the multiple alignment program MAFFT.
http://align.bmr.kyushu-u.ac.jp/mafft/software/
Example:
--------
>>> from Bio.Align.Applications import MafftCommandline
>>> mafft_exe = "/opt/local/mafft"
>>> in_file = "../Doc/examples/opuntia.fasta"
>>> mafft_cline = MafftCommandline(mafft_exe, input=in_file)
>>> print(mafft_cline)
/opt/local/mafft ../Doc/examples/opuntia.fasta
If the mafft binary is on the path (typically the case on a Unix style
operating system) then you don't need to supply the executable location:
>>> from Bio.Align.Applications import MafftCommandline
>>> in_file = "../Doc/examples/opuntia.fasta"
>>> mafft_cline = MafftCommandline(input=in_file)
>>> print(mafft_cline)
mafft ../Doc/examples/opuntia.fasta
You would typically run the command line with mafft_cline() or via
the Python subprocess module, as described in the Biopython tutorial.
Note that MAFFT will write the alignment to stdout, which you may
want to save to a file and then parse, e.g.::
stdout, stderr = mafft_cline()
with open("aligned.fasta", "w") as handle:
handle.write(stdout)
from Bio import AlignIO
align = AlignIO.read("aligned.fasta", "fasta")
Alternatively, to parse the output with AlignIO directly you can
use StringIO to turn the string into a handle::
stdout, stderr = mafft_cline()
from StringIO import StringIO
from Bio import AlignIO
align = AlignIO.read(StringIO(stdout), "fasta")
Citations:
----------
Katoh, Toh (BMC Bioinformatics 9:212, 2008) Improved accuracy of
multiple ncRNA alignment by incorporating structural information into
a MAFFT-based framework (describes RNA structural alignment methods)
Katoh, Toh (Briefings in Bioinformatics 9:286-298, 2008) Recent
developments in the MAFFT multiple sequence alignment program
(outlines version 6)
Katoh, Toh (Bioinformatics 23:372-374, 2007) Errata PartTree: an
algorithm to build an approximate tree from a large number of
unaligned sequences (describes the PartTree algorithm)
Katoh, Kuma, Toh, Miyata (Nucleic Acids Res. 33:511-518, 2005) MAFFT
version 5: improvement in accuracy of multiple sequence alignment
(describes [ancestral versions of] the G-INS-i, L-INS-i and E-INS-i
strategies)
Katoh, Misawa, Kuma, Miyata (Nucleic Acids Res. 30:3059-3066, 2002)
Last checked against version: MAFFT v6.717b (2009/12/03)
"""
def __init__(self, cmd="mafft", **kwargs):
BLOSUM_MATRICES = ["30", "45", "62", "80"]
self.parameters = \
[
# **** Algorithm ****
# Automatically selects an appropriate strategy from L-INS-i, FFT-NS-
# i and FFT-NS-2, according to data size. Default: off (always FFT-NS-2)
_Switch(["--auto", "auto"],
"Automatically select strategy. Default off."),
# Distance is calculated based on the number of shared 6mers. Default: on
_Switch(["--6merpair", "6merpair", "sixmerpair"],
"Distance is calculated based on the number of shared "
"6mers. Default: on"),
# All pairwise alignments are computed with the Needleman-Wunsch
# algorithm. More accurate but slower than --6merpair. Suitable for a
# set of globally alignable sequences. Applicable to up to ~200
# sequences. A combination with --maxiterate 1000 is recommended (G-
# INS-i). Default: off (6mer distance is used)
_Switch(["--globalpair", "globalpair"],
"All pairwise alignments are computed with the "
"Needleman-Wunsch algorithm. Default: off"),
# All pairwise alignments are computed with the Smith-Waterman
# algorithm. More accurate but slower than --6merpair. Suitable for a
# set of locally alignable sequences. Applicable to up to ~200
# sequences. A combination with --maxiterate 1000 is recommended (L-
# INS-i). Default: off (6mer distance is used)
_Switch(["--localpair", "localpair"],
"All pairwise alignments are computed with the "
"Smith-Waterman algorithm. Default: off"),
# All pairwise alignments are computed with a local algorithm with
# the generalized affine gap cost (Altschul 1998). More accurate but
# slower than --6merpair. Suitable when large internal gaps are
# expected. Applicable to up to ~200 sequences. A combination with --
# maxiterate 1000 is recommended (E-INS-i). Default: off (6mer
# distance is used)
_Switch(["--genafpair", "genafpair"],
"All pairwise alignments are computed with a local "
"algorithm with the generalized affine gap cost "
"(Altschul 1998). Default: off"),
# All pairwise alignments are computed with FASTA (Pearson and Lipman
# 1988). FASTA is required. Default: off (6mer distance is used)
_Switch(["--fastapair", "fastapair"],
"All pairwise alignments are computed with FASTA "
"(Pearson and Lipman 1988). Default: off"),
# Weighting factor for the consistency term calculated from pairwise
# alignments. Valid when either of --blobalpair, --localpair, --
# genafpair, --fastapair or --blastpair is selected. Default: 2.7
_Option(["--weighti", "weighti"],
"Weighting factor for the consistency term calculated "
"from pairwise alignments. Default: 2.7",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Guide tree is built number times in the progressive stage. Valid
# with 6mer distance. Default: 2
_Option(["--retree", "retree"],
"Guide tree is built number times in the progressive "
"stage. Valid with 6mer distance. Default: 2",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Number cycles of iterative refinement are performed. Default: 0
_Option(["--maxiterate", "maxiterate"],
"Number cycles of iterative refinement are performed. "
"Default: 0",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Use FFT approximation in group-to-group alignment. Default: on
_Switch(["--fft", "fft"],
"Use FFT approximation in group-to-group alignment. "
"Default: on"),
# Do not use FFT approximation in group-to-group alignment. Default:
# off
_Switch(["--nofft", "nofft"],
"Do not use FFT approximation in group-to-group "
"alignment. Default: off"),
# Alignment score is not checked in the iterative refinement stage.
# Default: off (score is checked)
_Switch(["--noscore", "noscore"],
"Alignment score is not checked in the iterative "
"refinement stage. Default: off (score is checked)"),
# Use the Myers-Miller (1988) algorithm. Default: automatically
# turned on when the alignment length exceeds 10,000 (aa/nt).
_Switch(["--memsave", "memsave"],
"Use the Myers-Miller (1988) algorithm. Default: "
"automatically turned on when the alignment length "
"exceeds 10,000 (aa/nt)."),
# Use a fast tree-building method (PartTree, Katoh and Toh 2007) with
# the 6mer distance. Recommended for a large number (> ~10,000) of
# sequences are input. Default: off
_Switch(["--parttree", "parttree"],
"Use a fast tree-building method with the 6mer "
"distance. Default: off"),
# The PartTree algorithm is used with distances based on DP. Slightly
# more accurate and slower than --parttree. Recommended for a large
# number (> ~10,000) of sequences are input. Default: off
_Switch(["--dpparttree", "dpparttree"],
"The PartTree algorithm is used with distances "
"based on DP. Default: off"),
# The PartTree algorithm is used with distances based on FASTA.
# Slightly more accurate and slower than --parttree. Recommended for
# a large number (> ~10,000) of sequences are input. FASTA is
# required. Default: off
_Switch(["--fastaparttree", "fastaparttree"],
"The PartTree algorithm is used with distances based "
"on FASTA. Default: off"),
# The number of partitions in the PartTree algorithm. Default: 50
_Option(["--partsize", "partsize"],
"The number of partitions in the PartTree algorithm. "
"Default: 50",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Do not make alignment larger than number sequences. Valid only with
# the --*parttree options. Default: the number of input sequences
_Switch(["--groupsize", "groupsize"],
"Do not make alignment larger than number sequences. "
"Default: the number of input sequences"),
# Adjust direction according to the first sequence
# Mafft V6 beta function
_Switch(["--adjustdirection", "adjustdirection"],
"Adjust direction according to the first sequence. "
"Default off."),
# Adjust direction according to the first sequence
# for highly diverged data; very slow
# Mafft V6 beta function
_Switch(["--adjustdirectionaccurately", "adjustdirectionaccurately"],
"Adjust direction according to the first sequence,"
"for highly diverged data; very slow"
"Default off."),
# **** Parameter ****
# Gap opening penalty at group-to-group alignment. Default: 1.53
_Option(["--op", "op"],
"Gap opening penalty at group-to-group alignment. "
"Default: 1.53",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Offset value, which works like gap extension penalty, for group-to-
# group alignment. Deafult: 0.123
_Option(["--ep", "ep"],
"Offset value, which works like gap extension penalty, "
"for group-to- group alignment. Default: 0.123",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Gap opening penalty at local pairwise alignment. Valid when the --
# localpair or --genafpair option is selected. Default: -2.00
_Option(["--lop", "lop"],
"Gap opening penalty at local pairwise alignment. "
"Default: 0.123",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Offset value at local pairwise alignment. Valid when the --
# localpair or --genafpair option is selected. Default: 0.1
_Option(["--lep", "lep"],
"Offset value at local pairwise alignment. "
"Default: 0.1",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Gap extension penalty at local pairwise alignment. Valid when the -
# -localpair or --genafpair option is selected. Default: -0.1
_Option(["--lexp", "lexp"],
"Gap extension penalty at local pairwise alignment. "
"Default: -0.1",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Gap opening penalty to skip the alignment. Valid when the --
# genafpair option is selected. Default: -6.00
_Option(["--LOP", "LOP"],
"Gap opening penalty to skip the alignment. "
"Default: -6.00",
checker_function=lambda x: isinstance(x, float),
equate=False),
# Gap extension penalty to skip the alignment. Valid when the --
# genafpair option is selected. Default: 0.00
_Option(["--LEXP", "LEXP"],
"Gap extension penalty to skip the alignment. "
"Default: 0.00",
checker_function=lambda x: isinstance(x, float),
equate=False),
# BLOSUM number matrix (Henikoff and Henikoff 1992) is used.
# number=30, 45, 62 or 80. Default: 62
_Option(["--bl", "bl"],
"BLOSUM number matrix is used. Default: 62",
checker_function=lambda x: x in BLOSUM_MATRICES,
equate=False),
# JTT PAM number (Jones et al. 1992) matrix is used. number>0.
# Default: BLOSUM62
_Option(["--jtt", "jtt"],
"JTT PAM number (Jones et al. 1992) matrix is used. "
"number>0. Default: BLOSUM62",
equate=False),
# Transmembrane PAM number (Jones et al. 1994) matrix is used.
# number>0. Default: BLOSUM62
_Option(["--tm", "tm"],
"Transmembrane PAM number (Jones et al. 1994) "
"matrix is used. number>0. Default: BLOSUM62",
filename=True,
equate=False),
# Use a user-defined AA scoring matrix. The format of matrixfile is
# the same to that of BLAST. Ignored when nucleotide sequences are
# input. Default: BLOSUM62
_Option(["--aamatrix", "aamatrix"],
"Use a user-defined AA scoring matrix. "
"Default: BLOSUM62",
filename=True,
equate=False),
# Incorporate the AA/nuc composition information into the scoring
# matrix. Default: off
_Switch(["--fmodel", "fmodel"],
"Incorporate the AA/nuc composition information into "
"the scoring matrix (True) or not (False, default)"),
# **** Output ****
# Name length for CLUSTAL and PHYLIP format output
_Option(["--namelength", "namelength"],
"""Name length in CLUSTAL and PHYLIP output.
MAFFT v6.847 (2011) added --namelength for use with
the --clustalout option for CLUSTAL output.
MAFFT v7.024 (2013) added support for this with the
--phylipout option for PHYLIP output (default 10).
""",
checker_function=lambda x: isinstance(x, int),
equate=False),
# Output format: clustal format. Default: off (fasta format)
_Switch(["--clustalout", "clustalout"],
"Output format: clustal (True) or fasta (False, default)"),
# Output format: phylip format.
# Added in beta with v6.847, fixed in v6.850 (2011)
_Switch(["--phylipout", "phylipout"],
"Output format: phylip (True), or fasta (False, default)"),
# Output order: same as input. Default: on
_Switch(["--inputorder", "inputorder"],
"Output order: same as input (True, default) or alignment "
"based (False)"),
# Output order: aligned. Default: off (inputorder)
_Switch(["--reorder", "reorder"],
"Output order: aligned (True) or in input order (False, "
"default)"),
# Guide tree is output to the input.tree file. Default: off
_Switch(["--treeout", "treeout"],
"Guide tree is output to the input.tree file (True) or "
"not (False, default)"),
# Do not report progress. Default: off
_Switch(["--quiet", "quiet"],
"Do not report progress (True) or not (False, default)."),
# **** Input ****
# Assume the sequences are nucleotide. Deafult: auto
_Switch(["--nuc", "nuc"],
"Assume the sequences are nucleotide (True/False). "
"Default: auto"),
# Assume the sequences are amino acid. Deafult: auto
_Switch(["--amino", "amino"],
"Assume the sequences are amino acid (True/False). "
"Default: auto"),
# MAFFT has multiple --seed commands where the unaligned input is
# aligned to the seed alignment. There can be multiple seeds in the
# form: "mafft --seed align1 --seed align2 [etc] input"
# Effectively for n number of seed alignments.
# TODO - Can we use class _ArgumentList here?
_Option(["--seed", "seed"],
"Seed alignments given in alignment_n (fasta format) "
"are aligned with sequences in input.",
filename=True,
equate=False),
# The input (must be FASTA format)
_Argument(["input"],
"Input file name",
filename=True,
is_required=True),
# mafft-profile takes a second alignment input as an argument:
# mafft-profile align1 align2
_Argument(["input1"],
"Second input file name for the mafft-profile command",
filename=True),
]
AbstractCommandline.__init__(self, cmd, **kwargs)
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| |
import base64
import collections
import hashlib
from xmlrpclib import Fault, ServerProxy
API_ENDPOINT = 'https://secure.gravatar.com/xmlrpc'
def hash_email(email):
"""
:param string email: email address
:returns: the hash of ``email``, suitable for embedding in a URL to retrieve its assigned
image, e.g., ``http://gravatar.com/avatar/<hash>``
"""
return hashlib.md5(email.strip().lower()).hexdigest()
def _check_email_success(response):
for email, success in response.iteritems():
if not success:
raise InvalidEmailError(email)
class Rating:
"""
Image rating.
====== =====
Member Value
====== =====
G 0
PG 1
R 2
X 3
====== =====
"""
G = 0
PG = 1
R = 2
X = 3
class GravatarError(Exception):
pass
class SecureError(GravatarError):
pass
class InternalError(GravatarError):
pass
class AuthenticationError(GravatarError):
pass
class ParameterMissingError(GravatarError):
pass
class ParameterIncorrectError(GravatarError):
pass
class MiscError(GravatarError):
pass
class UnknownError(GravatarError):
pass
class InvalidEmailError(GravatarError):
pass
class InvalidUrlError(GravatarError):
pass
class InvalidDataError(GravatarError):
pass
class InvalidImageIdError(GravatarError):
pass
class Image(collections.namedtuple('Image', ['id', 'url', 'rating'])):
"""
Represents an image in a user account.
:var id: unique ID used to refer to this image
:type id: `string`
:var url: unique URL to retrieve this image, even if it is unassigned
:type url: `string`
:var rating: rating for the image
:type rating: `int` (see :class:`Rating`)
"""
pass
class User(object):
"""
Represents a user account.
"""
def __init__(self, email, password=None, apikey=None):
"""
At least one of ``password`` and ``apikey`` must be specified.
:param string email: an email address belonging to the account
:param string password: password for the account
:param string apikey: API key for your application
"""
self._server = ServerProxy(API_ENDPOINT+'?user='+hash_email(email))
if password is None and apikey is None:
raise ValueError("Must specify either 'password' or 'apikey' parameter")
self.password = password
self.apikey = apikey
def exists(self, *emails):
"""
:param emails: email addresses to check
:type emails: vararg list of `string`
:returns: dictionary where each key is an email address from the passed-in list and each
value is a boolean of whether that email address belongs to a Gravatar account and has
an image assigned to it.
:rtype: {`string`: `boolean`}
"""
hashes = dict([(hash_email(email), email) for email in emails])
return dict([(hashes[hash], found==1)
for hash, found in self._call('exists', hashes=hashes.keys()).iteritems()])
def emails(self):
"""
:returns: dictionary where each key is an email address belonging to the user account and
each value is the :class:`Image` assigned to it, or ``None`` if no image is assigned
:rtype: {`string`: :class:`Image`}
"""
return dict([(email, Image(id=userimage['userimage'], url=userimage['userimage_url'],
rating=userimage['rating'])
if len(userimage['userimage']) > 0 else None)
for email, userimage in self._call('addresses').iteritems()])
def images(self):
"""
:returns: images belonging to the user account
:rtype: list of :class:`Image`
"""
return [Image(id=id, url=url, rating=int(rating))
for id, (rating, url) in self._call('userimages').iteritems()]
def saveData(self, data, rating):
"""
Save the data as a new image in the user account.
:param string data: binary image data to save
:param rating: rating for the new image
:type rating: `int` (see :class:`Rating`)
:returns: ID of new image
:rtype: `string`
"""
id = self._call('saveData', data=base64.b64encode(data), rating=rating)
if not id:
raise InvalidDataError()
return id
def saveUrl(self, url, rating):
"""
Read the image pointed to by the URL and save it as a new image in the user account.
:param string url: URL pointing to an image to save
:param rating: rating for the new image
:type rating: `int` (see :class:`Rating`)
:returns: ID of new image
:rtype: `string`
"""
id = self._call('saveUrl', url=url, rating=rating)
if not id:
raise InvalidURLError(url)
return id
def useImage(self, id, *emails):
"""
Assign the image identified by an ID to every email address passed in.
:param string id: ID of image to assign
:param emails: email addresses to assign the image to
:type emails: vararg list of `string`
"""
_check_email_success(self._call('useUserimage', userimage=id, addresses=emails))
def removeImage(self, *emails):
"""
For every email address passed in, unassign its image.
:param emails: email addresses to be unassigned
:type emails: vararg list of `string`
"""
_check_email_success(self._call('removeImage', addresses=emails))
def deleteImage(self, id):
"""
Delete the image from the user account, and unassign it from any email addresses.
:param string id: ID of image to delete
"""
if not self._call('deleteUserimage', userimage=id):
raise InvalidImageIdError(id)
def test(self):
"""
:returns: the server's number of seconds since the current epoch.
:rtype: `int`
"""
return self._call('test')['response']
def _call(self, method, **kwargs):
if self.password is not None:
kwargs['password'] = self.password
if self.apikey is not None:
kwargs['apikey'] = self.apikey
try:
return getattr(self._server.grav, method)(kwargs)
except Fault as fault:
raise {
-7: SecureError,
-8: InternalError,
-9: AuthenticationError,
-10: ParameterMissingError,
-11: ParameterIncorrectError,
-100: MiscError,
}.get(fault.faultCode, UnknownError)(fault.faultString)
| |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from msrestazure.tools import is_valid_resource_id, resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
from azure.cli.core.util import sdk_no_wait
from knack.util import CLIError
def list_policy_events(
cmd,
client,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None,
policy_set_definition_name=None,
policy_definition_name=None,
policy_assignment_name=None,
from_value=None,
to_value=None,
order_by_clause=None,
select_clause=None,
top_value=None,
filter_clause=None,
apply_clause=None):
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(
top=top_value,
order_by=order_by_clause,
select=select_clause,
from_property=from_value,
to=to_value,
filter=filter_clause,
apply=apply_clause)
subscription_id = get_subscription_id(cmd.cli_ctx)
if policy_assignment_name:
if resource_group_name:
events = client.list_query_results_for_resource_group_level_policy_assignment(
subscription_id,
resource_group_name,
policy_assignment_name,
query_options)
else:
events = client.list_query_results_for_subscription_level_policy_assignment(
subscription_id,
policy_assignment_name,
query_options)
elif policy_definition_name:
events = client.list_query_results_for_policy_definition(
subscription_id,
policy_definition_name,
query_options)
elif policy_set_definition_name:
events = client.list_query_results_for_policy_set_definition(
subscription_id,
policy_set_definition_name,
query_options)
elif resource:
if not is_valid_resource_id(resource):
if resource_type_parent:
resource_type_parent = _remove_leading_and_trailing_slash(resource_type_parent)
resource_type = "{}/{}".format(resource_type_parent, resource_type)
resource = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace=namespace,
type=resource_type,
name=resource)
events = client.list_query_results_for_resource(
resource,
query_options)
elif resource_group_name:
events = client.list_query_results_for_resource_group(
subscription_id,
resource_group_name,
query_options)
elif management_group_name:
events = client.list_query_results_for_management_group(
management_group_name,
query_options)
else:
events = client.list_query_results_for_subscription(
subscription_id,
query_options)
return events
def list_policy_states(
cmd,
client,
all_results=False,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None,
policy_set_definition_name=None,
policy_definition_name=None,
policy_assignment_name=None,
from_value=None,
to_value=None,
order_by_clause=None,
select_clause=None,
top_value=None,
filter_clause=None,
apply_clause=None,
expand_clause=None):
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(
top=top_value,
order_by=order_by_clause,
select=select_clause,
from_property=from_value,
to=to_value,
filter=filter_clause,
apply=apply_clause,
expand=expand_clause)
policy_states_resource = 'latest'
if all_results is True:
policy_states_resource = 'default'
subscription_id = get_subscription_id(cmd.cli_ctx)
if policy_assignment_name:
if resource_group_name:
states = client.list_query_results_for_resource_group_level_policy_assignment(
policy_states_resource,
subscription_id,
resource_group_name,
policy_assignment_name,
query_options)
else:
states = client.list_query_results_for_subscription_level_policy_assignment(
policy_states_resource,
subscription_id,
policy_assignment_name,
query_options)
elif policy_definition_name:
states = client.list_query_results_for_policy_definition(
policy_states_resource,
subscription_id,
policy_definition_name,
query_options)
elif policy_set_definition_name:
states = client.list_query_results_for_policy_set_definition(
policy_states_resource,
subscription_id,
policy_set_definition_name,
query_options)
elif resource:
if not is_valid_resource_id(resource):
if resource_type_parent:
resource_type_parent = _remove_leading_and_trailing_slash(resource_type_parent)
resource_type = "{}/{}".format(resource_type_parent, resource_type)
resource = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace=namespace,
type=resource_type,
name=resource)
states = client.list_query_results_for_resource(
policy_states_resource,
resource,
query_options)
elif resource_group_name:
states = client.list_query_results_for_resource_group(
policy_states_resource,
subscription_id,
resource_group_name,
query_options)
elif management_group_name:
states = client.list_query_results_for_management_group(
policy_states_resource,
management_group_name,
query_options)
else:
states = client.list_query_results_for_subscription(
policy_states_resource,
subscription_id,
query_options)
return states
def summarize_policy_states(
cmd,
client,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None,
policy_set_definition_name=None,
policy_definition_name=None,
policy_assignment_name=None,
from_value=None,
to_value=None,
top_value=None,
filter_clause=None):
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(
top=top_value,
from_property=from_value,
to=to_value,
filter=filter_clause)
subscription_id = get_subscription_id(cmd.cli_ctx)
if policy_assignment_name:
if resource_group_name:
summary = client.summarize_for_resource_group_level_policy_assignment(
subscription_id,
resource_group_name,
policy_assignment_name,
query_options)
else:
summary = client.summarize_for_subscription_level_policy_assignment(
subscription_id,
policy_assignment_name,
query_options)
elif policy_definition_name:
summary = client.summarize_for_policy_definition(
subscription_id,
policy_definition_name,
query_options)
elif policy_set_definition_name:
summary = client.summarize_for_policy_set_definition(
subscription_id,
policy_set_definition_name,
query_options)
elif resource:
resource = _build_resource_id(
subscription_id,
resource,
resource_group_name,
namespace,
resource_type_parent,
resource_type)
summary = client.summarize_for_resource(
resource,
query_options)
elif resource_group_name:
summary = client.summarize_for_resource_group(
subscription_id,
resource_group_name,
query_options)
elif management_group_name:
summary = client.summarize_for_management_group(
management_group_name,
query_options)
else:
summary = client.summarize_for_subscription(
subscription_id,
query_options)
return summary.value[0]
def trigger_policy_scan(
cmd,
client,
resource_group_name=None,
no_wait=False):
subscription_id = get_subscription_id(cmd.cli_ctx)
if resource_group_name:
return sdk_no_wait(no_wait, client.begin_trigger_resource_group_evaluation,
subscription_id, resource_group_name)
return sdk_no_wait(no_wait, client.begin_trigger_subscription_evaluation,
subscription_id)
def get_policy_remediation(
cmd,
client,
remediation_name,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
return _execute_remediation_operation(
cmd,
client,
"get_at_resource",
management_group_name,
resource_group_name,
resource,
namespace,
resource_type_parent,
resource_type,
remediation_name)
def list_policy_remediations(
cmd,
client,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
return _execute_remediation_operation(
cmd,
client,
"list_for_resource",
management_group_name,
resource_group_name,
resource,
namespace,
resource_type_parent,
resource_type)
def delete_policy_remediation(
cmd,
client,
remediation_name,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
return _execute_remediation_operation(
cmd,
client,
"delete_at_resource",
management_group_name,
resource_group_name,
resource,
namespace,
resource_type_parent,
resource_type,
remediation_name)
def cancel_policy_remediation(
cmd,
client,
remediation_name,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
return _execute_remediation_operation(
cmd,
client,
"cancel_at_resource",
management_group_name,
resource_group_name,
resource,
namespace,
resource_type_parent,
resource_type,
remediation_name)
def list_policy_remediation_deployments(
cmd,
client,
remediation_name,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
return _execute_remediation_operation(
cmd,
client,
"list_deployments_at_resource",
management_group_name,
resource_group_name,
resource,
namespace,
resource_type_parent,
resource_type,
remediation_name)
def create_policy_remediation(
cmd,
client,
remediation_name,
policy_assignment,
definition_reference_id=None,
location_filters=None,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None,
resource_discovery_mode=None):
subscription_id = get_subscription_id(cmd.cli_ctx)
scope = _build_remediation_scope(
management_group_name,
subscription_id,
resource_group_name,
resource,
resource_type_parent,
resource_type,
namespace)
from azure.mgmt.policyinsights.models import Remediation
remediation = Remediation(policy_definition_reference_id=definition_reference_id)
# Get the full resource ID of the referenced policy assignment
if (not is_valid_resource_id(policy_assignment) and
not policy_assignment.lower().startswith("/providers/microsoft.management/managementgroups/")):
from ._client_factory import cf_policy
policy_assignment_client = cf_policy(cmd.cli_ctx).policy_assignments
policy_assignments = policy_assignment_client.list()
policy_assignment_ids = [p.id for p in policy_assignments if p.name.lower() == policy_assignment.lower()]
if not policy_assignment_ids:
raise CLIError("No policy assignment with the name '{}' found.".format(policy_assignment))
if len(policy_assignment_ids) > 1:
raise CLIError("Multiple policy assignment with the name '{}' found. "
"Specify the policy assignment ID.".format(policy_assignment))
policy_assignment = policy_assignment_ids[0]
remediation.policy_assignment_id = policy_assignment
# Ensure locations in the location filters are using their short name
if location_filters:
locations_list = []
for location_arg in location_filters:
locations_list.append(location_arg.replace(' ', ''))
from azure.mgmt.policyinsights.models import RemediationFilters
remediation.filters = RemediationFilters(locations=locations_list)
if resource_discovery_mode:
remediation.resource_discovery_mode = resource_discovery_mode
return client.create_or_update_at_resource(
resource_id=_remove_leading_and_trailing_slash(scope),
remediation_name=remediation_name,
parameters=remediation)
def show_policy_metadata(cmd, client, resource_name): # pylint: disable=unused-argument
return client.get_resource(resource_name=resource_name)
def list_policy_metadata(cmd, client, top_value=None): # pylint: disable=unused-argument
if top_value is not None:
from azure.mgmt.policyinsights.models import QueryOptions
page_iter = client.list(QueryOptions(top=top_value)).by_page()
results = []
while len(results) < top_value:
try:
results += list(next(page_iter))
except StopIteration:
break
return results[:top_value]
return list(client.list())
def _execute_remediation_operation(
cmd,
client,
operation_name,
management_group_name=None,
resource_group_name=None,
resource=None,
namespace=None,
resource_type_parent=None,
resource_type=None,
remediation_name=None):
subscription_id = get_subscription_id(cmd.cli_ctx)
scope = _build_remediation_scope(
management_group_name,
subscription_id,
resource_group_name,
resource,
resource_type_parent,
resource_type,
namespace)
operation = getattr(client, operation_name)
if remediation_name is None:
return operation(resource_id=_remove_leading_and_trailing_slash(scope))
return operation(resource_id=_remove_leading_and_trailing_slash(scope), remediation_name=remediation_name)
def _build_resource_id(
subscription_id,
resource,
resource_group_name=None,
namespace=None,
resource_type_parent=None,
resource_type=None):
if not is_valid_resource_id(resource):
if resource_type_parent:
resource_type_parent = _remove_leading_and_trailing_slash(resource_type_parent)
resource_type = "{}/{}".format(resource_type_parent, resource_type)
resource = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace=namespace,
type=resource_type,
name=resource)
return resource
def _build_remediation_scope(
management_group=None,
subscription=None,
resource_group_name=None,
resource=None,
resource_type_parent=None,
resource_type=None,
namespace=None):
if management_group:
return "/providers/Microsoft.Management/managementGroups/{}".format(management_group)
if resource:
return _build_resource_id(subscription, resource, resource_group_name,
namespace, resource_type_parent, resource_type)
return resource_id(subscription=subscription, resource_group=resource_group_name)
def _remove_leading_and_trailing_slash(s):
if s:
if s.startswith('/'):
s = s[1:]
if s.endswith('/'):
s = s[:-1]
return s
| |
import pytest
from metaworld.envs.mujoco.env_dict import ALL_V1_ENVIRONMENTS, ALL_V2_ENVIRONMENTS
from metaworld.policies import *
from tests.metaworld.envs.mujoco.sawyer_xyz.utils import trajectory_summary
test_cases_old_nonoise = [
# This should contain configs where a V2 policy is running in a V1 env.
# name, policy, action noise pct, success rate
['bin-picking-v1', SawyerBinPickingV2Policy(), .0, .50],
['handle-press-side-v1', SawyerHandlePressSideV2Policy(), .0, .05],
['lever-pull-v1', SawyerLeverPullV2Policy(), .0, .0],
['peg-insert-side-v1', SawyerPegInsertionSideV2Policy(), .0, .0],
['plate-slide-back-side-v1', SawyerPlateSlideBackSideV2Policy(), .0, 1.],
['window-open-v1', SawyerWindowOpenV2Policy(), .0, 0.85],
['window-close-v1', SawyerWindowCloseV2Policy(), .0, 0.37],
]
test_cases_old_noisy = [
# This should contain configs where a V2 policy is running in a V1 env.
# name, policy, action noise pct, success rate
['bin-picking-v1', SawyerBinPickingV2Policy(), .1, .40],
['handle-press-side-v1', SawyerHandlePressSideV2Policy(), .1, .77],
['lever-pull-v1', SawyerLeverPullV2Policy(), .1, .0],
['peg-insert-side-v1', SawyerPegInsertionSideV2Policy(), .1, .0],
['plate-slide-back-side-v1', SawyerPlateSlideBackSideV2Policy(), .1, 0.30],
['window-open-v1', SawyerWindowOpenV2Policy(), .1, 0.81],
['window-close-v1', SawyerWindowCloseV2Policy(), .1, 0.37],
]
test_cases_latest_nonoise = [
# name, policy, action noise pct, success rate
['assembly-v1', SawyerAssemblyV1Policy(), .0, 1.],
['assembly-v2', SawyerAssemblyV2Policy(), .0, 1.],
['basketball-v1', SawyerBasketballV1Policy(), .0, .98],
['basketball-v2', SawyerBasketballV2Policy(), .0, .98],
['bin-picking-v2', SawyerBinPickingV2Policy(), .0, .98],
['box-close-v1', SawyerBoxCloseV1Policy(), .0, .85],
['box-close-v2', SawyerBoxCloseV2Policy(), .0, .90],
['button-press-topdown-v1', SawyerButtonPressTopdownV1Policy(), .0, 1.],
['button-press-topdown-v2', SawyerButtonPressTopdownV2Policy(), .0, .95],
['button-press-topdown-wall-v1', SawyerButtonPressTopdownWallV1Policy(), .0, 1.],
['button-press-topdown-wall-v2', SawyerButtonPressTopdownWallV2Policy(), .0, .95],
['button-press-v1', SawyerButtonPressV1Policy(), .0, 1.],
['button-press-v2', SawyerButtonPressV2Policy(), .0, 1.],
['button-press-wall-v1', SawyerButtonPressWallV1Policy(), .0, 1.],
['button-press-wall-v2', SawyerButtonPressWallV2Policy(), .0, .93],
['coffee-button-v1', SawyerCoffeeButtonV1Policy(), .0, 1.],
['coffee-button-v2', SawyerCoffeeButtonV2Policy(), .0, 1.],
['coffee-pull-v1', SawyerCoffeePullV1Policy(), .0, .96],
['coffee-pull-v2', SawyerCoffeePullV2Policy(), .0, .94],
['coffee-push-v1', SawyerCoffeePushV1Policy(), .0, .93],
['coffee-push-v2', SawyerCoffeePushV2Policy(), .0, .93],
['dial-turn-v1', SawyerDialTurnV1Policy(), .0, 0.96],
['dial-turn-v2', SawyerDialTurnV2Policy(), .0, 0.96],
['disassemble-v1', SawyerDisassembleV1Policy(), .0, .96],
['disassemble-v2', SawyerDisassembleV2Policy(), .0, .92],
['door-close-v1', SawyerDoorCloseV1Policy(), .0, .99],
['door-close-v2', SawyerDoorCloseV2Policy(), .0, .99],
['door-lock-v1', SawyerDoorLockV1Policy(), .0, 1.],
['door-lock-v2', SawyerDoorLockV2Policy(), .0, 1.],
['door-open-v1', SawyerDoorOpenV1Policy(), .0, .98],
['door-open-v2', SawyerDoorOpenV2Policy(), .0, .94],
['door-unlock-v1', SawyerDoorUnlockV1Policy(), .0, 1.],
['door-unlock-v2', SawyerDoorUnlockV2Policy(), .0, 1.],
['drawer-close-v1', SawyerDrawerCloseV1Policy(), .0, .99],
['drawer-close-v2', SawyerDrawerCloseV2Policy(), .0, .99],
['drawer-open-v1', SawyerDrawerOpenV1Policy(), .0, .99],
['drawer-open-v2', SawyerDrawerOpenV2Policy(), .0, .99],
['faucet-close-v1', SawyerFaucetCloseV1Policy(), .0, 1.],
['faucet-close-v2', SawyerFaucetCloseV2Policy(), .0, 1.],
['faucet-open-v1', SawyerFaucetOpenV1Policy(), .0, 1.],
['faucet-open-v2', SawyerFaucetOpenV2Policy(), .0, 1.],
['hammer-v1', SawyerHammerV1Policy(), .0, 1.],
['hammer-v2', SawyerHammerV2Policy(), .0, 1.],
['hand-insert-v1', SawyerHandInsertV1Policy(), .0, 0.96],
['hand-insert-v2', SawyerHandInsertV2Policy(), .0, 0.96],
['handle-press-side-v2', SawyerHandlePressSideV2Policy(), .0, .99],
['handle-press-v1', SawyerHandlePressV1Policy(), .0, 1.],
['handle-press-v2', SawyerHandlePressV2Policy(), .0, 1.],
['handle-pull-v1', SawyerHandlePullV1Policy(), .0, 1.],
['handle-pull-v2', SawyerHandlePullV2Policy(), .0, 0.93],
['handle-pull-side-v1', SawyerHandlePullSideV1Policy(), .0, .92],
['handle-pull-side-v2', SawyerHandlePullSideV2Policy(), .0, 1.],
['peg-insert-side-v2', SawyerPegInsertionSideV2Policy(), .0, .89],
['lever-pull-v2', SawyerLeverPullV2Policy(), .0, .94],
['peg-unplug-side-v1', SawyerPegUnplugSideV1Policy(), .0, .99],
['peg-unplug-side-v2', SawyerPegUnplugSideV2Policy(), .0, .99],
['pick-out-of-hole-v1', SawyerPickOutOfHoleV1Policy(), .0, 1.],
['pick-out-of-hole-v2', SawyerPickOutOfHoleV2Policy(), .0, 1.],
['pick-place-v2', SawyerPickPlaceV2Policy(), .0, .95],
['pick-place-wall-v2', SawyerPickPlaceWallV2Policy(), .0, .95],
['plate-slide-back-side-v2', SawyerPlateSlideBackSideV2Policy(), .0, 1.],
['plate-slide-back-v1', SawyerPlateSlideBackV1Policy(), .0, 1.],
['plate-slide-back-v2', SawyerPlateSlideBackV2Policy(), .0, 1.],
['plate-slide-side-v1', SawyerPlateSlideSideV1Policy(), .0, 1.],
['plate-slide-side-v2', SawyerPlateSlideSideV2Policy(), .0, 1.],
['plate-slide-v1', SawyerPlateSlideV1Policy(), .0, 1.],
['plate-slide-v2', SawyerPlateSlideV2Policy(), .0, 1.],
['reach-v2', SawyerReachV2Policy(), .0, .99],
['reach-wall-v2', SawyerReachWallV2Policy(), 0.0, .98],
['push-back-v1', SawyerPushBackV1Policy(), .0, .97],
['push-back-v2', SawyerPushBackV2Policy(), .0, .97],
['push-v2', SawyerPushV2Policy(), .0, .97],
['push-wall-v2', SawyerPushWallV2Policy(), .0, .97],
['shelf-place-v1', SawyerShelfPlaceV1Policy(), .0, .96],
['shelf-place-v2', SawyerShelfPlaceV2Policy(), .0, .96],
['soccer-v1', SawyerSoccerV1Policy(), .0, .88],
['soccer-v2', SawyerSoccerV2Policy(), .0, .88],
['stick-pull-v1', SawyerStickPullV1Policy(), .0, 0.95],
['stick-pull-v2', SawyerStickPullV2Policy(), .0, 0.96],
['stick-push-v1', SawyerStickPushV1Policy(), .0, 0.98],
['stick-push-v2', SawyerStickPushV2Policy(), .0, 0.98],
['sweep-into-v1', SawyerSweepIntoV1Policy(), .0, 1.],
['sweep-into-v2', SawyerSweepIntoV2Policy(), .0, 0.98],
['sweep-v1', SawyerSweepV1Policy(), .0, 1.],
['sweep-v2', SawyerSweepV2Policy(), .0, 0.99],
['window-close-v2', SawyerWindowCloseV2Policy(), 0., .98],
['window-open-v2', SawyerWindowOpenV2Policy(), 0., .94],
]
test_cases_latest_noisy = [
# name, policy, action noise pct, success rate
['assembly-v1', SawyerAssemblyV1Policy(), .1, .69],
['assembly-v2', SawyerAssemblyV2Policy(), .1, .70],
['basketball-v1', SawyerBasketballV1Policy(), .1, .97],
['basketball-v2', SawyerBasketballV2Policy(), .1, .96],
['bin-picking-v2', SawyerBinPickingV2Policy(), .1, .96],
['box-close-v1', SawyerBoxCloseV1Policy(), .1, .84],
['box-close-v2', SawyerBoxCloseV2Policy(), .1, .82],
['button-press-topdown-v1', SawyerButtonPressTopdownV1Policy(), .1, .98],
['button-press-topdown-v2', SawyerButtonPressTopdownV2Policy(), .1, .93],
['button-press-topdown-wall-v1', SawyerButtonPressTopdownWallV1Policy(), .1, .99],
['button-press-topdown-wall-v2', SawyerButtonPressTopdownWallV2Policy(), .1, .95],
['button-press-v1', SawyerButtonPressV1Policy(), .1, .98],
['button-press-v2', SawyerButtonPressV2Policy(), .1, .98],
['button-press-wall-v1', SawyerButtonPressWallV1Policy(), .1, .94],
['button-press-wall-v2', SawyerButtonPressWallV2Policy(), .1, .92],
['coffee-button-v1', SawyerCoffeeButtonV1Policy(), .1, .99],
['coffee-button-v2', SawyerCoffeeButtonV2Policy(), .1, .99],
['coffee-pull-v1', SawyerCoffeePullV1Policy(), .1, .95],
['coffee-pull-v2', SawyerCoffeePullV2Policy(), .1, .82],
['coffee-push-v1', SawyerCoffeePushV1Policy(), .1, .86],
['coffee-push-v2', SawyerCoffeePushV2Policy(), .1, .88],
['dial-turn-v1', SawyerDialTurnV1Policy(), .1, 0.84],
['dial-turn-v2', SawyerDialTurnV2Policy(), .1, 0.84],
['disassemble-v1', SawyerDisassembleV1Policy(), .1, .91],
['disassemble-v2', SawyerDisassembleV2Policy(), .1, .88],
['door-close-v1', SawyerDoorCloseV1Policy(), .1, .99],
['door-close-v2', SawyerDoorCloseV2Policy(), .1, .97],
['door-lock-v1', SawyerDoorLockV1Policy(), .1, 1.],
['door-lock-v2', SawyerDoorLockV2Policy(), .1, .96],
['door-open-v1', SawyerDoorOpenV1Policy(), .1, .93],
['door-open-v2', SawyerDoorOpenV2Policy(), .1, .92],
['door-unlock-v1', SawyerDoorUnlockV1Policy(), .1, .96],
['door-unlock-v2', SawyerDoorUnlockV2Policy(), .1, .97],
['drawer-close-v1', SawyerDrawerCloseV1Policy(), .1, .64],
['drawer-close-v2', SawyerDrawerCloseV2Policy(), .1, .99],
['drawer-open-v1', SawyerDrawerOpenV1Policy(), .1, .97],
['drawer-open-v2', SawyerDrawerOpenV2Policy(), .1, .97],
['faucet-close-v1', SawyerFaucetCloseV1Policy(), .1, .93],
['faucet-close-v2', SawyerFaucetCloseV2Policy(), .1, 1.],
['faucet-open-v1', SawyerFaucetOpenV1Policy(), .1, .99],
['faucet-open-v2', SawyerFaucetOpenV2Policy(), .1, .99],
['hammer-v1', SawyerHammerV1Policy(), .1, .97],
['hammer-v2', SawyerHammerV2Policy(), .1, .96],
['hand-insert-v1', SawyerHandInsertV1Policy(), .1, 0.95],
['hand-insert-v2', SawyerHandInsertV2Policy(), .1, 0.86],
['handle-press-side-v2', SawyerHandlePressSideV2Policy(), .1, .98],
['handle-press-v1', SawyerHandlePressV1Policy(), .1, 1.],
['handle-press-v2', SawyerHandlePressV2Policy(), .1, 1.],
['handle-pull-v1', SawyerHandlePullV1Policy(), .1, 1.],
['handle-pull-v2', SawyerHandlePullV2Policy(), .1, .99],
['handle-pull-side-v1', SawyerHandlePullSideV1Policy(), .1, .75],
['handle-pull-side-v2', SawyerHandlePullSideV2Policy(), .1, .71],
['peg-insert-side-v2', SawyerPegInsertionSideV2Policy(), .1, .87],
['lever-pull-v2', SawyerLeverPullV2Policy(), .1, .90],
['peg-unplug-side-v1', SawyerPegUnplugSideV1Policy(), .1, .97],
['peg-unplug-side-v2', SawyerPegUnplugSideV2Policy(), .1, .80],
['pick-out-of-hole-v1', SawyerPickOutOfHoleV1Policy(), .1, .87],
['pick-out-of-hole-v2', SawyerPickOutOfHoleV2Policy(), .1, .89],
['pick-place-v2', SawyerPickPlaceV2Policy(), .1, .83],
['pick-place-wall-v2', SawyerPickPlaceWallV2Policy(), .1, .83],
['plate-slide-back-side-v2', SawyerPlateSlideBackSideV2Policy(), .1, .95],
['plate-slide-back-v1', SawyerPlateSlideBackV1Policy(), .1, .95],
['plate-slide-back-v2', SawyerPlateSlideBackV2Policy(), .1, .94],
['plate-slide-side-v1', SawyerPlateSlideSideV1Policy(), .1, .76],
['plate-slide-side-v2', SawyerPlateSlideSideV2Policy(), .1, .78],
['plate-slide-v1', SawyerPlateSlideV1Policy(), .1, .97],
['plate-slide-v2', SawyerPlateSlideV2Policy(), .1, .97],
['reach-v2', SawyerReachV2Policy(), .1, .98],
['reach-wall-v2', SawyerReachWallV2Policy(), .1, .96],
['push-back-v1', SawyerPushBackV1Policy(), .1, .90],
['push-back-v2', SawyerPushBackV2Policy(), .0, .91],
['push-v2', SawyerPushV2Policy(), .1, .88],
['push-wall-v2', SawyerPushWallV2Policy(), .1, .82],
['shelf-place-v1', SawyerShelfPlaceV1Policy(), .1, .90],
['shelf-place-v2', SawyerShelfPlaceV2Policy(), .1, .89],
['soccer-v1', SawyerSoccerV1Policy(), .1, .91],
['soccer-v2', SawyerSoccerV2Policy(), .1, .81],
['stick-pull-v1', SawyerStickPullV1Policy(), .1, 0.81],
['stick-pull-v2', SawyerStickPullV2Policy(), .1, 0.81],
['stick-push-v1', SawyerStickPushV1Policy(), .1, 0.95],
['stick-push-v2', SawyerStickPushV2Policy(), .1, 0.95],
['sweep-into-v1', SawyerSweepIntoV1Policy(), .1, 1.],
['sweep-into-v2', SawyerSweepIntoV2Policy(), .1, 0.86],
['sweep-v1', SawyerSweepV1Policy(), .1, 1.],
['sweep-v2', SawyerSweepV2Policy(), .0, 0.99],
['window-close-v2', SawyerWindowCloseV2Policy(), .1, .95],
['window-open-v2', SawyerWindowOpenV2Policy(), .1, .93],
]
# Combine test cases into a single array to pass to parameterized test function
test_cases = []
for row in test_cases_old_nonoise:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_old_noisy:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_latest_nonoise:
test_cases.append(pytest.param(*row, marks=pytest.mark.skip))
for row in test_cases_latest_noisy:
test_cases.append(pytest.param(*row, marks=pytest.mark.basic))
ALL_ENVS = {**ALL_V1_ENVIRONMENTS, **ALL_V2_ENVIRONMENTS}
@pytest.fixture(scope='function')
def env(request):
e = ALL_ENVS[request.param]()
e._partially_observable = False
e._freeze_rand_vec = False
e._set_task_called = True
return e
@pytest.mark.parametrize(
'env,policy,act_noise_pct,expected_success_rate',
test_cases,
indirect=['env']
)
def test_scripted_policy(env, policy, act_noise_pct, expected_success_rate, iters=100):
"""Tests whether a given policy solves an environment in a stateless manner
Args:
env (metaworld.envs.MujocoEnv): Environment to test
policy (metaworld.policies.policy.Policy): Policy that's supposed to
succeed in env
act_noise_pct (np.ndarray): Decimal value(s) indicating std deviation of
the noise as a % of action space
expected_success_rate (float): Decimal value indicating % of runs that
must be successful
iters (int): How many times the policy should be tested
"""
assert len(vars(policy)) == 0, \
'{} has state variable(s)'.format(policy.__class__.__name__)
successes = 0
for _ in range(iters):
successes += float(trajectory_summary(env, policy, act_noise_pct, render=False)[0])
print(successes)
assert successes >= expected_success_rate * iters
| |
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from src.python.baselines import *
from src.python.preprocess2 import *
from pymongo import MongoClient
from tqdm import tqdm
import tensorflow as tf
### Keras
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Activation
from keras.layers import Conv2D, Conv1D
from keras.layers import Dropout, BatchNormalization
from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Concatenate, Flatten, Reshape
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler
# from keras.losses import hinge, binary_crossentropy
from keras import backend as K
from sklearn.metrics import log_loss
import math
import argparse
sess = tf.Session()
K.set_session(sess)
LR = 0.001
BATCH_SIZE = 16
LONG_EXPOSURE = True
def step_decay(epoch):
initial_lrate = LR
drop = 0.5
epochs_drop = 10.0
lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)))
return lrate
def batch_generator(data, onto, classes, batch_size=BATCH_SIZE, shuffle=True):
s_cls = set(classes)
def labels2vec(lbl):
y = np.zeros(len(classes))
for go in onto.propagate(lbl, include_root=False):
if go not in s_cls:
continue
y[classes.index(go)] = 1
return y
def pad_seq(seq, max_length=MAX_LENGTH):
delta = max_length - len(seq)
left = [PAD for _ in range(delta // 2)]
right = [PAD for _ in range(delta - delta // 2)]
seq = left + [AA.aa2index[aa] for aa in seq] + right
return np.asarray(seq)
def prepare_batch(sequences, labels):
# b = max(max(map(len, sequences)), 100)
b = MAX_LENGTH
Y = np.asarray([labels2vec(lbl) for lbl in labels])
X = np.asarray([pad_seq(seq, b) for seq in sequences])
return X, Y
indices = list(range(0, len(data), batch_size))
if shuffle: np.random.shuffle(indices)
while indices:
ix = indices.pop()
batch = data[ix: min(ix + batch_size, len(data))]
ids, seqs, lbls = zip(*batch)
yield ids, prepare_batch(seqs, lbls)
class LossHistory(Callback):
def __init__(self):
self.losses = []
def on_batch_end(self, batch, logs={}):
while len(self.losses) > 500:
self.losses.pop(0)
self.losses.append(logs.get('loss'))
def train(model, gen_xy, length_xy, epoch, num_epochs,
history=LossHistory(), lrate=LearningRateScheduler(step_decay)):
pbar = tqdm(total=length_xy)
for _, (X, Y) in gen_xy:
model.fit(x=X, y=Y,
batch_size=BATCH_SIZE,
epochs=max(num_epochs, epoch + 1) if LONG_EXPOSURE else epoch + 1,
verbose=0,
validation_data=None,
initial_epoch=epoch,
callbacks=[history, lrate])
pbar.set_description("Training Loss:%.5f" % np.mean(history.losses))
pbar.update(len(Y))
pbar.close()
def zeroone2oneminusone(vec):
return np.add(np.multiply(np.array(vec), 2), -1)
def oneminusone2zeroone(vec):
return np.divide(np.add(np.array(vec), 1), 2)
def calc_loss(y_true, y_pred):
return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)])
def predict(model, gen_xy, length_xy, classes):
pbar = tqdm(total=length_xy, desc="Predicting...")
i, m, n = 0, length_xy, len(classes)
ids = list()
y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
for i, (keys, (X, Y)) in enumerate(gen_xy):
k = len(Y)
ids.extend(keys)
y_hat, y = model.predict(X), Y
y_pred[i:i + k, ] = y_hat
y_true[i:i + k, ] = y
pbar.update(k)
pbar.close()
return ids, y_true, y_pred
def evaluate(y_true, y_pred, classes):
y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
y_true = y_true[~np.all(y_true == 0, axis=1)]
prs, rcs, f1s = performance(y_pred, y_true, classes)
return calc_loss(y_true, y_pred), prs, rcs, f1s
def Classifier(inp1d, classes):
out = Dense(len(classes))(inp1d)
out = BatchNormalization()(out)
out = Activation('sigmoid')(out)
return out
def Inception(inpt, tower1=6, tower2=10, tower3=20):
tower_0 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2)
return Concatenate(axis=2)([tower_0, tower_1, tower_2])
def ResInception(inpt0, inpt1, tower1=6, tower2=10):
incept0 = Inception(inpt0, tower1, tower2)
incept1 = Inception(inpt1, tower1, tower2)
return Concatenate(axis=2)([incept0, incept1])
def Features(inpt):
feats = inpt
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
return feats
def ProteinCeption(classes, opt):
inp = Input(shape=(None,))
emb = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inp)
incpt = Inception(Inception(Inception(emb)))
out = Classifier(GlobalMaxPooling1D()(incpt), classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def DeeperSeq(classes, opt):
inp = Input(shape=(None,))
emb = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inp)
feats = GlobalMaxPooling1D()(Features(emb))
out = Classifier(feats, classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def MotifNet(classes, opt):
inp = Input(shape=(None,))
emb1 = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inp)
inception = GlobalMaxPooling1D()(Inception(Inception(Inception(emb1))))
emb2 = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inp)
deeperseq = GlobalMaxPooling1D()(Features(emb2))
out = Classifier(Concatenate()([inception, deeperseq]), classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument("--init_epoch", type=int, default=0,
help="Which epoch to start training the model?")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("-e", "--eval_every", type=int, default=10,
help="How often to evaluate on the validation set.")
parser.add_argument("--num_epochs", type=int, default=200,
help="How many epochs to train the model?")
parser.add_argument('--long_exposure', action='store_true', default=False,
help="Train in LONG_EXPOSURE mode?")
parser.add_argument("--arch", type=str, choices=['deepseq', 'inception', 'motifnet'],
default="inception", help="Specify the model arch.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
asp = args.aspect # default: Molecular Function
client = MongoClient(args.mongo_url)
db = client['prot2vec']
print("Loading Ontology...")
onto = get_ontology(asp)
classes = onto.classes
classes.remove(onto.root)
assert onto.root not in classes
opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
if args.arch == "motifnet":
model = MotifNet(classes, opt)
elif args.arch == "inception":
model = ProteinCeption(classes, opt)
elif args.arch == "deepseq":
model = DeeperSeq(classes, opt)
else:
print("Unknown arch")
exit(0)
if args.long_exposure:
num_epochs = args.num_epochs // 10
LONG_EXPOSURE = True
BATCH_SIZE = 32
else:
num_epochs = args.num_epochs
LONG_EXPOSURE = False
BATCH_SIZE = 16
if args.resume:
model.load_weights(args.resume)
print("Loaded model from disk")
model.summary()
print("Indexing Data...")
trn_stream, tst_stream = get_training_and_validation_streams(db, t0, t1, asp)
print("Loading Data...")
trn_data = load_data(trn_stream)
tst_data = load_data(tst_stream)
for epoch in range(args.init_epoch, num_epochs):
train(model, batch_generator(trn_data, onto, classes), len(trn_data), epoch, num_epochs)
if epoch < num_epochs-1 and epoch % args.eval_every != 0:
continue
_, y_true, y_pred = predict(model, batch_generator(tst_data, onto, classes), len(tst_data), classes)
loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes)
i = np.argmax(f1s)
f_max = f1s[i]
print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)"
% (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i]))
if f_max < 0.4: continue
model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max)
model.save_weights("checkpoints/%s.hdf5" % model_str)
with open("checkpoints/%s.json" % model_str, "w+") as f:
f.write(model.to_json())
| |
"""
Unit tests for the param.Time class, time dependent parameters and
time-dependent numbergenerators.
"""
import unittest
import param
import numbergen
import copy
from nose.plugins.skip import SkipTest
import fractions
try:
import gmpy
except ImportError:
import os
if os.getenv('PARAM_TEST_GMPY','0') == '1':
raise ImportError("PARAM_TEST_GMPY=1 but gmpy not available.")
else:
gmpy = None
class TestTimeClass(unittest.TestCase):
def test_time_init(self):
param.Time()
def test_time_init_int(self):
t = param.Time(time_type=int)
self.assertEqual(t(), 0)
def test_time_int_iter(self):
t = param.Time(time_type=int)
self.assertEqual(next(t), 0)
self.assertEqual(next(t), 1)
def test_time_init_timestep(self):
t = param.Time(time_type=int, timestep=2)
self.assertEqual(next(t), 0)
self.assertEqual(next(t), 2)
def test_time_int_until(self):
t = param.Time(time_type=int, until=3)
self.assertEqual(next(t), 0)
self.assertEqual(next(t), 1)
self.assertEqual(next(t), 2)
self.assertEqual(next(t), 3)
try:
self.assertEqual(next(t), 4)
raise AssertionError("StopIteration should have been raised")
except StopIteration:
pass
def test_time_int_eq(self):
t = param.Time(time_type=int)
s = param.Time(time_type=int)
t(3); s(3)
self.assertEqual(t == s, True)
def test_time_int_context(self):
t = param.Time(time_type=int)
t(3)
with t:
self.assertEqual(t(), 3)
t(5)
self.assertEqual(t(), 5)
self.assertEqual(t(), 3)
def test_time_int_context_iadd(self):
with param.Time(time_type=int) as t:
self.assertEqual(t(), 0)
t += 5
self.assertEqual(t(), 5)
self.assertEqual(t(), 0)
def test_time_int_change_type(self):
t = param.Time(time_type=int)
self.assertEqual(t(), 0)
t(1, fractions.Fraction)
self.assertEqual(t(), 1)
self.assertEqual(t.time_type, fractions.Fraction)
def test_time_init_gmpy(self):
if gmpy is None: raise SkipTest
t = param.Time(time_type=gmpy.mpq)
self.assertEqual(t(), gmpy.mpq(0))
t.advance(gmpy.mpq(0.25))
self.assertEqual(t(), gmpy.mpq(1,4))
def test_time_init_gmpy_advanced(self):
if gmpy is None: raise SkipTest
t = param.Time(time_type=gmpy.mpq,
timestep=gmpy.mpq(0.25),
until=1.5)
self.assertEqual(t(), gmpy.mpq(0,1))
t(0.5)
self.assertEqual(t(), gmpy.mpq(1,2))
with t:
t.advance(0.25)
self.assertEqual(t(), gmpy.mpq(3,4))
self.assertEqual(t(), gmpy.mpq(1,2))
tvals = [tval for tval in t]
self.assertEqual(tvals, [gmpy.mpq(1,2),
gmpy.mpq(3,4),
gmpy.mpq(1,1),
gmpy.mpq(5,4),
gmpy.mpq(3,2)])
class TestTimeDependentDynamic(unittest.TestCase):
def setUp(self):
param.Dynamic.time_dependent=None
self.time_fn= param.Time(time_type=int)
class Incrementer(object):
def __init__(self):
self.i = -1
def __call__(self):
self.i+=1
return self.i
self.Incrementer = Incrementer
class DynamicClass(param.Parameterized):
a = param.Number(default = self.Incrementer())
self.DynamicClass = DynamicClass
self._start_state = copy.copy([param.Dynamic.time_dependent,
numbergen.TimeAware.time_dependent,
param.Dynamic.time_fn,
numbergen.TimeAware.time_fn,
param.random_seed])
def tearDown(self):
param.Dynamic.time_dependent = self._start_state[0]
numbergen.TimeAware.time_dependent = self._start_state[1]
param.Dynamic.time_fn = self._start_state[2]
numbergen.TimeAware.time_fn = self._start_state[3]
param.random_seed = self._start_state[4]
def test_non_time_dependent(self):
"""
With param.Dynamic.time_dependent=None every call should
increment.
"""
param.Dynamic.time_dependent=None
param.Dynamic.time_fn = self.time_fn
dynamic = self.DynamicClass()
self.assertEqual(dynamic.a, 0)
self.assertEqual(dynamic.a, 1)
self.assertEqual(dynamic.a, 2)
def test_time_fixed(self):
"""
With param.Dynamic.time_dependent=True the value should only
increment when the time value changes.
"""
param.Dynamic.time_dependent=True
param.Dynamic.time_fn = self.time_fn
dynamic = self.DynamicClass()
self.assertEqual(dynamic.a, 0)
self.assertEqual(dynamic.a, 0)
self.time_fn += 1
self.assertEqual(dynamic.a, 1)
self.assertEqual(dynamic.a, 1)
param.Dynamic.time_fn -= 5
self.assertEqual(dynamic.a, 2)
self.assertEqual(dynamic.a, 2)
def test_time_dependent(self):
"""
With param.Dynamic.time_dependent=True and param.Dynamic and
numbergen.TimeDependent sharing a common time_fn, the value
should be a function of time.
"""
param.Dynamic.time_dependent=True
param.Dynamic.time_fn = self.time_fn
numbergen.TimeDependent.time_fn = self.time_fn
class DynamicClass(param.Parameterized):
b = param.Number(default = numbergen.ScaledTime(factor=2))
dynamic = DynamicClass()
self.time_fn(0)
self.assertEqual(dynamic.b, 0.0)
self.time_fn += 5
self.assertEqual(dynamic.b, 10.0)
self.assertEqual(dynamic.b, 10.0)
self.time_fn -= 2
self.assertEqual(dynamic.b, 6.0)
self.assertEqual(dynamic.b, 6.0)
self.time_fn -= 3
self.assertEqual(dynamic.b, 0.0)
def test_time_dependent_random(self):
"""
When set to time_dependent=True, random number generators
should also be a function of time.
"""
param.Dynamic.time_dependent=True
numbergen.TimeAware.time_dependent=True
param.Dynamic.time_fn = self.time_fn
numbergen.TimeAware.time_fn = self.time_fn
param.random_seed = 42
class DynamicClass(param.Parameterized):
c = param.Number(default = numbergen.UniformRandom(name = 'test1'))
d = param.Number(default = numbergen.UniformRandom(name = 'test2'))
e = param.Number(default = numbergen.UniformRandom(name = 'test1'))
dynamic = DynamicClass()
test1_t1 = 0.23589388250988552
test2_t1 = 0.12576257837158122
test1_t2 = 0.14117586161849593
test2_t2 = 0.9134917395930359
self.time_fn(0)
self.assertEqual(dynamic.c, test1_t1)
self.assertEqual(dynamic.c, dynamic.e)
self.assertNotEqual(dynamic.c, dynamic.d)
self.assertEqual(dynamic.d, test2_t1)
self.time_fn(1)
self.assertEqual(dynamic.c, test1_t2)
self.assertEqual(dynamic.c, test1_t2)
self.assertEqual(dynamic.d, test2_t2)
self.time_fn(0)
self.assertEqual(dynamic.c, test1_t1)
self.assertEqual(dynamic.d, test2_t1)
def test_time_hashing_integers(self):
"""
Check that ints, fractions and strings hash to the same value
for integer values.
"""
hashfn = numbergen.Hash("test", input_count=1)
hash_1 = hashfn(1)
hash_42 = hashfn(42)
hash_200001 = hashfn(200001)
self.assertEqual(hash_1, hashfn(fractions.Fraction(1)))
self.assertEqual(hash_1, hashfn("1"))
self.assertEqual(hash_42, hashfn(fractions.Fraction(42)))
self.assertEqual(hash_42, hashfn("42"))
self.assertEqual(hash_200001, hashfn(fractions.Fraction(200001)))
self.assertEqual(hash_200001, hashfn("200001"))
def test_time_hashing_rationals(self):
"""
Check that hashes fractions and strings match for some
reasonable rational numbers.
"""
hashfn = numbergen.Hash("test", input_count=1)
pi = "3.141592"
half = fractions.Fraction(0.5)
self.assertEqual(hashfn(0.5), hashfn(half))
self.assertEqual(hashfn(pi), hashfn(fractions.Fraction(pi)))
def test_time_hashing_integers_gmpy(self):
"""
Check that hashes for gmpy values at the integers also matches
those of ints, fractions and strings.
"""
if gmpy is None: raise SkipTest
hashfn = numbergen.Hash("test", input_count=1)
hash_1 = hashfn(1)
hash_42 = hashfn(42)
self.assertEqual(hash_1, hashfn(gmpy.mpq(1)))
self.assertEqual(hash_1, hashfn(1))
self.assertEqual(hash_42, hashfn(gmpy.mpq(42)))
self.assertEqual(hash_42, hashfn(42))
def test_time_hashing_rationals_gmpy(self):
"""
Check that hashes of fractions and gmpy mpqs match for some
reasonable rational numbers.
"""
if gmpy is None: raise SkipTest
pi = "3.141592"
hashfn = numbergen.Hash("test", input_count=1)
self.assertEqual(hashfn(0.5), hashfn(gmpy.mpq(0.5)))
self.assertEqual(hashfn(pi), hashfn(gmpy.mpq(3.141592)))
if __name__ == "__main__":
import nose
nose.runmodule()
| |
#!/usr/bin/env python
__author__ = 'Thomas R. Lennan, Stephen Henrie, Michael Meisinger'
from uuid import uuid4
import bcrypt
#from pyon.core.security.authentication import Authentication
from pyon.public import log, RT, OT, Inconsistent, NotFound, BadRequest, get_ion_ts_millis, get_ion_ts, Unauthorized
from interface.objects import SecurityToken, TokenTypeEnum, Credentials, AuthStatusEnum
from interface.services.core.iidentity_management_service import BaseIdentityManagementService
MAX_TOKEN_VALIDITY = 365*24*60*60
class IdentityManagementService(BaseIdentityManagementService):
"""
Stores identities of users and resources, including bindings of internal
identities to external identities. Also stores metadata such as a user profile.
"""
def on_init(self):
self.rr = self.clients.resource_registry
#self.authentication = Authentication()
def create_actor_identity(self, actor_identity=None):
self._validate_resource_obj("actor_identity", actor_identity, RT.ActorIdentity, checks="noid,name")
if actor_identity.credentials:
raise BadRequest("Cannot create actor with credentials")
if actor_identity.details and actor_identity.details.type_ == OT.IdentityDetails:
actor_identity.details = None
actor_identity.passwd_reset_token = None
actor_id, _ = self.rr.create(actor_identity)
return actor_id
def update_actor_identity(self, actor_identity=None):
old_actor = self._validate_resource_obj("actor_identity", actor_identity, RT.ActorIdentity, checks="id,name")
# Prevent security risk because contained credentials may be manipulated
actor_identity.credentials = old_actor.credentials
self.rr.update(actor_identity)
def read_actor_identity(self, actor_id=''):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
return actor_obj
def delete_actor_identity(self, actor_id=''):
self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
self.rr.delete(actor_id)
def find_actor_identity_by_name(self, name=''):
"""Return the ActorIdentity object whose name attribute matches the passed value.
"""
objects, _ = self.rr.find_resources(RT.ActorIdentity, None, name, id_only=False)
if not objects:
raise NotFound("ActorIdentity with name %s does not exist" % name)
if len(objects) > 1:
raise Inconsistent("Multiple ActorIdentity objects with name %s exist" % name)
return objects[0]
# -------------------------------------------------------------------------
# Credentials handling
def register_credentials(self, actor_id='', credentials=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
self._validate_arg_obj("credentials", credentials, OT.Credentials)
actor_obj.credentials.append(credentials)
if credentials.username:
actor_obj.alt_ids.append("UNAME:" + credentials.username)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def unregister_credentials(self, actor_id='', credentials_name=''):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not credentials_name:
raise BadRequest("Invalid credentials_name")
found_cred = -1
for i, cred in enumerate(actor_obj.credentials):
if cred.username == credentials_name:
found_cred = i
break
if found_cred != -1:
del actor_obj.credentials[found_cred]
else:
raise NotFound("Credentials not found")
actor_obj.alt_ids.remove("UNAME:" + credentials_name)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def find_actor_identity_by_username(self, username=''):
if not username:
raise BadRequest("Invalid username")
res_ids, _ = self.rr.find_resources_ext(alt_id_ns="UNAME", alt_id=username, id_only=True)
if not res_ids:
raise NotFound("No actor found with username")
return res_ids[0]
def set_actor_credentials(self, actor_id='', username='', password=''):
if not username:
raise BadRequest("Invalid username")
IdentityUtils.check_password_policy(password)
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
if not cred_obj:
cred_obj = Credentials()
cred_obj.username = username
actor_obj.credentials.append(cred_obj)
actor_obj.alt_ids.append("UNAME:" + username)
self._generate_password_hash(cred_obj, password)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def set_user_password(self, username='', password=''):
if not username:
raise BadRequest("Invalid username")
IdentityUtils.check_password_policy(password)
actor_id = self.find_actor_identity_by_username(username)
actor_obj = self.read_actor_identity(actor_id)
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
self._generate_password_hash(cred_obj, password)
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def _generate_password_hash(self, cred_obj, password):
if not cred_obj or cred_obj.type_ != OT.Credentials:
raise BadRequest("Invalid cred_obj")
cred_obj.identity_provider = "SciON"
cred_obj.authentication_service = "SciON IdM"
cred_obj.password_salt = bcrypt.gensalt()
cred_obj.password_hash = bcrypt.hashpw(password, cred_obj.password_salt)
def check_actor_credentials(self, username='', password=''):
if not username:
raise BadRequest("Invalid argument username")
if not password:
raise BadRequest("Invalid argument password")
actor_id = self.find_actor_identity_by_username(username)
actor_obj = self.read_actor_identity(actor_id)
try:
if actor_obj.auth_status != AuthStatusEnum.ENABLED:
raise NotFound("identity not enabled")
cred_obj = None
for cred in actor_obj.credentials:
if cred.username == username:
cred_obj = cred
break
if bcrypt.hashpw(password, cred_obj.password_salt) != cred_obj.password_hash:
# Failed login
if password: # Only record fail if password is non-empty and wrong
actor_obj.auth_fail_count += 1
actor_obj.auth_ts_last_fail = get_ion_ts()
max_fail_cnt = IdentityUtils.get_auth_fail_lock_count()
if actor_obj.auth_fail_count > max_fail_cnt:
actor_obj.auth_status = AuthStatusEnum.LOCKED
raise NotFound("Invalid password")
# Success
actor_obj.auth_count += 1
actor_obj.auth_fail_count = 0
actor_obj.auth_ts_last = get_ion_ts()
return actor_obj._id
finally:
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
def set_actor_auth_status(self, actor_id='', status=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not status:
raise BadRequest("Invalid argument status")
prev_status = actor_obj.auth_status
actor_obj.auth_status = status
if status == AuthStatusEnum.ENABLED:
actor_obj.auth_fail_count = 0
# Lower level RR call to avoid credentials clearing
self.rr.update(actor_obj)
return prev_status
def request_password_reset(self, username=''):
actor_id = self.find_actor_identity_by_username(username)
actor = self.rr.read(actor_id)
actor.passwd_reset_token = self._create_token(actor_id=actor_id, validity=10,
token_type=TokenTypeEnum.ACTOR_RESET_PASSWD)
self.rr.update(actor)
return actor.passwd_reset_token.token_string
def reset_password(self, username='', token_string='', new_password=''):
actor_id = self.find_actor_identity_by_username(username)
actor = self.rr.read(actor_id)
if not actor.passwd_reset_token or actor.passwd_reset_token.status != 'OPEN':
raise Unauthorized("Token status invalid")
cur_time = get_ion_ts_millis()
if cur_time >= int(actor.passwd_reset_token.expires):
raise Unauthorized("Token expired")
if actor.passwd_reset_token.token_string != token_string:
raise Unauthorized("Password reset token_string does not match")
# Update password
self.set_user_password(username, new_password)
# Invalidate token after success
actor = self.rr.read(actor_id) # Read again, resource was updated in between
actor.passwd_reset_token = None
self.rr.update(actor)
# -------------------------------------------------------------------------
# Identity details (user profile) handling
def define_identity_details(self, actor_id='', identity_details=None):
actor_obj = self._validate_resource_id("actor_id", actor_id, RT.ActorIdentity)
if not identity_details:
raise BadRequest("Invalid argument identity_details")
if actor_obj.details:
if actor_obj.details.type_ != identity_details.type_:
raise BadRequest("Type for identity_details does not match")
actor_obj.details = identity_details
self.update_actor_identity(actor_obj)
def read_identity_details(self, actor_id=''):
actor_obj = self.read_actor_identity(actor_id)
return actor_obj.details
# -------------------------------------------------------------------------
# Manage tokens - authentication and others
# TODO: Make more compliant with OAuth2, use HMAC, JWT etc
def _create_token(self, actor_id='', start_time='', validity=0,
token_type=TokenTypeEnum.ACTOR_AUTH):
if not actor_id:
raise BadRequest("Must provide argument: actor_id")
actor_obj = self.rr.read(actor_id)
if actor_obj.type_ != RT.ActorIdentity:
raise BadRequest("Illegal type for argument actor_id")
if type(validity) not in (int, long):
raise BadRequest("Illegal type for argument validity")
if validity <= 0 or validity > MAX_TOKEN_VALIDITY:
raise BadRequest("Illegal value for argument validity")
cur_time = get_ion_ts_millis()
if not start_time:
start_time = cur_time
start_time = int(start_time)
if start_time > cur_time:
raise BadRequest("Illegal value for start_time: Future values not allowed")
if (start_time + 1000*validity) < cur_time:
raise BadRequest("Illegal value for start_time: Already expired")
expires = str(start_time + 1000*validity)
token = self._generate_auth_token(actor_id, expires=expires, token_type=token_type)
token_id = "token_%s" % token.token_string
self.container.object_store.create(token, token_id)
return token
def _generate_auth_token(self, actor_id=None, expires="",
token_type=TokenTypeEnum.ACTOR_AUTH):
token_string = uuid4().hex
token = SecurityToken(token_type=token_type, token_string=token_string,
actor_id=actor_id, expires=expires, status="OPEN")
return token
def create_authentication_token(self, actor_id='', start_time='', validity=0):
"""Create an authentication token for provided actor id with a given start time and validity.
start_time defaults to current time if empty and uses a system timestamp.
validity is in seconds and must be set.
"""
return self._create_token(actor_id, start_time, validity).token_string
def read_authentication_token(self, token_string=''):
"""Returns the token object for given actor authentication token string.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
return token
def update_authentication_token(self, token=None):
"""Updates the given token.
"""
if not isinstance(token, SecurityToken):
raise BadRequest("Illegal argument type: token")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Argument token: Illegal type")
cur_time = get_ion_ts_millis()
token_exp = int(token.expires)
if token_exp > cur_time + 1000*MAX_TOKEN_VALIDITY:
raise BadRequest("Argument token: Maximum expiry extended")
self.container.object_store.update(token)
def invalidate_authentication_token(self, token_string=''):
"""Invalidates an authentication token, but leaves it in place for auditing purposes.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Illegal token type")
token.status = "INVALID"
self.container.object_store.update(token)
log.info("Invalidated security auth token: %s", token.token_string)
def check_authentication_token(self, token_string=''):
"""Checks given token and returns a dict with actor id if valid.
"""
token_id = "token_%s" % token_string
token = self.container.object_store.read(token_id)
if not isinstance(token, SecurityToken):
raise Inconsistent("Token illegal type")
if token.token_type != TokenTypeEnum.ACTOR_AUTH:
raise BadRequest("Illegal token type")
if token.token_string != token_string:
raise Inconsistent("Found token's token_string does not match")
cur_time = get_ion_ts_millis()
if token.status != "OPEN":
raise Unauthorized("Token status invalid")
if cur_time >= int(token.expires):
raise Unauthorized("Token expired")
token_info = dict(actor_id=token.actor_id,
expiry=token.expires,
token=token,
token_id=token_id)
log.info("Authentication token %s resolved to actor %s, expiry %s", token_string, token.actor_id, token.expires)
return token_info
def _get_actor_authentication_tokens(self, actor_id):
actor_tokens = []
raise NotImplementedError("TODO")
#return actor_tokens
class IdentityUtils(object):
@classmethod
def check_password_policy(cls, password, id_provider=None):
"""Checks if given password passes the establshed password policy for identity provider"""
# TODO: Make configurable
if not password or type(password) is not str:
raise BadRequest("Invalid type")
if len(password) < 3:
raise BadRequest("Password too short")
@classmethod
def get_auth_fail_lock_count(cls, id_provider=None):
return 5
| |
#-------------------------------------------------------------------------------
# elftools: elf/structs.py
#
# Encapsulation of Construct structs for parsing an ELF file, adjusted for
# correct endianness and word-size.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
from ..construct import (
UBInt8, UBInt16, UBInt32, UBInt64,
ULInt8, ULInt16, ULInt32, ULInt64,
SBInt32, SLInt32, SBInt64, SLInt64,
Struct, Array, Enum, Padding, BitStruct, BitField, Value, String, CString
)
from ..common.construct_utils import ULEB128
from .enums import *
class ELFStructs(object):
""" Accessible attributes:
Elf_{byte|half|word|word64|addr|offset|sword|xword|xsword}:
Data chunks, as specified by the ELF standard, adjusted for
correct endianness and word-size.
Elf_Ehdr:
ELF file header
Elf_Phdr:
Program header
Elf_Shdr:
Section header
Elf_Sym:
Symbol table entry
Elf_Rel, Elf_Rela:
Entries in relocation sections
"""
def __init__(self, little_endian=True, elfclass=32):
assert elfclass == 32 or elfclass == 64
self.little_endian = little_endian
self.elfclass = elfclass
def create_basic_structs(self):
""" Create word-size related structs and ehdr struct needed for
initial determining of ELF type.
"""
if self.little_endian:
self.Elf_byte = ULInt8
self.Elf_half = ULInt16
self.Elf_word = ULInt32
self.Elf_word64 = ULInt64
self.Elf_addr = ULInt32 if self.elfclass == 32 else ULInt64
self.Elf_offset = self.Elf_addr
self.Elf_sword = SLInt32
self.Elf_xword = ULInt32 if self.elfclass == 32 else ULInt64
self.Elf_sxword = SLInt32 if self.elfclass == 32 else SLInt64
else:
self.Elf_byte = UBInt8
self.Elf_half = UBInt16
self.Elf_word = UBInt32
self.Elf_word64 = UBInt64
self.Elf_addr = UBInt32 if self.elfclass == 32 else UBInt64
self.Elf_offset = self.Elf_addr
self.Elf_sword = SBInt32
self.Elf_xword = UBInt32 if self.elfclass == 32 else UBInt64
self.Elf_sxword = SBInt32 if self.elfclass == 32 else SBInt64
self._create_ehdr()
self._create_leb128()
self._create_ntbs()
def create_advanced_structs(self, e_type=None, e_machine=None, e_ident_osabi=None):
""" Create all ELF structs except the ehdr. They may possibly depend
on provided e_type and/or e_machine parsed from ehdr.
"""
self._create_phdr(e_machine)
self._create_shdr(e_machine)
self._create_chdr()
self._create_sym()
self._create_rel()
self._create_dyn(e_machine, e_ident_osabi)
self._create_sunw_syminfo()
self._create_gnu_verneed()
self._create_gnu_verdef()
self._create_gnu_versym()
self._create_gnu_abi()
self._create_note(e_type)
self._create_stabs()
self._create_arm_attributes()
self._create_elf_hash()
self._create_gnu_hash()
#-------------------------------- PRIVATE --------------------------------#
def _create_ehdr(self):
self.Elf_Ehdr = Struct('Elf_Ehdr',
Struct('e_ident',
Array(4, self.Elf_byte('EI_MAG')),
Enum(self.Elf_byte('EI_CLASS'), **ENUM_EI_CLASS),
Enum(self.Elf_byte('EI_DATA'), **ENUM_EI_DATA),
Enum(self.Elf_byte('EI_VERSION'), **ENUM_E_VERSION),
Enum(self.Elf_byte('EI_OSABI'), **ENUM_EI_OSABI),
self.Elf_byte('EI_ABIVERSION'),
Padding(7)
),
Enum(self.Elf_half('e_type'), **ENUM_E_TYPE),
Enum(self.Elf_half('e_machine'), **ENUM_E_MACHINE),
Enum(self.Elf_word('e_version'), **ENUM_E_VERSION),
self.Elf_addr('e_entry'),
self.Elf_offset('e_phoff'),
self.Elf_offset('e_shoff'),
self.Elf_word('e_flags'),
self.Elf_half('e_ehsize'),
self.Elf_half('e_phentsize'),
self.Elf_half('e_phnum'),
self.Elf_half('e_shentsize'),
self.Elf_half('e_shnum'),
self.Elf_half('e_shstrndx'),
)
def _create_leb128(self):
self.Elf_uleb128 = ULEB128
def _create_ntbs(self):
self.Elf_ntbs = CString
def _create_phdr(self, e_machine=None):
p_type_dict = ENUM_P_TYPE_BASE
if e_machine == 'EM_ARM':
p_type_dict = ENUM_P_TYPE_ARM
elif e_machine == 'EM_AARCH64':
p_type_dict = ENUM_P_TYPE_AARCH64
elif e_machine == 'EM_MIPS':
p_type_dict = ENUM_P_TYPE_MIPS
if self.elfclass == 32:
self.Elf_Phdr = Struct('Elf_Phdr',
Enum(self.Elf_word('p_type'), **p_type_dict),
self.Elf_offset('p_offset'),
self.Elf_addr('p_vaddr'),
self.Elf_addr('p_paddr'),
self.Elf_word('p_filesz'),
self.Elf_word('p_memsz'),
self.Elf_word('p_flags'),
self.Elf_word('p_align'),
)
else: # 64
self.Elf_Phdr = Struct('Elf_Phdr',
Enum(self.Elf_word('p_type'), **p_type_dict),
self.Elf_word('p_flags'),
self.Elf_offset('p_offset'),
self.Elf_addr('p_vaddr'),
self.Elf_addr('p_paddr'),
self.Elf_xword('p_filesz'),
self.Elf_xword('p_memsz'),
self.Elf_xword('p_align'),
)
def _create_shdr(self, e_machine=None):
"""Section header parsing.
Depends on e_machine because of machine-specific values in sh_type.
"""
sh_type_dict = ENUM_SH_TYPE_BASE
if e_machine == 'EM_ARM':
sh_type_dict = ENUM_SH_TYPE_ARM
elif e_machine == 'EM_X86_64':
sh_type_dict = ENUM_SH_TYPE_AMD64
elif e_machine == 'EM_MIPS':
sh_type_dict = ENUM_SH_TYPE_MIPS
self.Elf_Shdr = Struct('Elf_Shdr',
self.Elf_word('sh_name'),
Enum(self.Elf_word('sh_type'), **sh_type_dict),
self.Elf_xword('sh_flags'),
self.Elf_addr('sh_addr'),
self.Elf_offset('sh_offset'),
self.Elf_xword('sh_size'),
self.Elf_word('sh_link'),
self.Elf_word('sh_info'),
self.Elf_xword('sh_addralign'),
self.Elf_xword('sh_entsize'),
)
def _create_chdr(self):
# Structure of compressed sections header. It is documented in Oracle
# "Linker and Libraries Guide", Part IV ELF Application Binary
# Interface, Chapter 13 Object File Format, Section Compression:
# https://docs.oracle.com/cd/E53394_01/html/E54813/section_compression.html
fields = [
Enum(self.Elf_word('ch_type'), **ENUM_ELFCOMPRESS_TYPE),
self.Elf_xword('ch_size'),
self.Elf_xword('ch_addralign'),
]
if self.elfclass == 64:
fields.insert(1, self.Elf_word('ch_reserved'))
self.Elf_Chdr = Struct('Elf_Chdr', *fields)
def _create_rel(self):
# r_info is also taken apart into r_info_sym and r_info_type.
# This is done in Value to avoid endianity issues while parsing.
if self.elfclass == 32:
r_info_sym = Value('r_info_sym',
lambda ctx: (ctx['r_info'] >> 8) & 0xFFFFFF)
r_info_type = Value('r_info_type',
lambda ctx: ctx['r_info'] & 0xFF)
else: # 64
r_info_sym = Value('r_info_sym',
lambda ctx: (ctx['r_info'] >> 32) & 0xFFFFFFFF)
r_info_type = Value('r_info_type',
lambda ctx: ctx['r_info'] & 0xFFFFFFFF)
self.Elf_Rel = Struct('Elf_Rel',
self.Elf_addr('r_offset'),
self.Elf_xword('r_info'),
r_info_sym,
r_info_type,
)
self.Elf_Rela = Struct('Elf_Rela',
self.Elf_addr('r_offset'),
self.Elf_xword('r_info'),
r_info_sym,
r_info_type,
self.Elf_sxword('r_addend'),
)
def _create_dyn(self, e_machine=None, e_ident_osabi=None):
d_tag_dict = dict(ENUM_D_TAG_COMMON)
if e_machine in ENUMMAP_EXTRA_D_TAG_MACHINE:
d_tag_dict.update(ENUMMAP_EXTRA_D_TAG_MACHINE[e_machine])
elif e_ident_osabi == 'ELFOSABI_SOLARIS':
d_tag_dict.update(ENUM_D_TAG_SOLARIS)
self.Elf_Dyn = Struct('Elf_Dyn',
Enum(self.Elf_sxword('d_tag'), **d_tag_dict),
self.Elf_xword('d_val'),
Value('d_ptr', lambda ctx: ctx['d_val']),
)
def _create_sym(self):
# st_info is hierarchical. To access the type, use
# container['st_info']['type']
st_info_struct = BitStruct('st_info',
Enum(BitField('bind', 4), **ENUM_ST_INFO_BIND),
Enum(BitField('type', 4), **ENUM_ST_INFO_TYPE))
# st_other is hierarchical. To access the visibility,
# use container['st_other']['visibility']
st_other_struct = BitStruct('st_other',
Padding(5),
Enum(BitField('visibility', 3), **ENUM_ST_VISIBILITY))
if self.elfclass == 32:
self.Elf_Sym = Struct('Elf_Sym',
self.Elf_word('st_name'),
self.Elf_addr('st_value'),
self.Elf_word('st_size'),
st_info_struct,
st_other_struct,
Enum(self.Elf_half('st_shndx'), **ENUM_ST_SHNDX),
)
else:
self.Elf_Sym = Struct('Elf_Sym',
self.Elf_word('st_name'),
st_info_struct,
st_other_struct,
Enum(self.Elf_half('st_shndx'), **ENUM_ST_SHNDX),
self.Elf_addr('st_value'),
self.Elf_xword('st_size'),
)
def _create_sunw_syminfo(self):
self.Elf_Sunw_Syminfo = Struct('Elf_Sunw_Syminfo',
Enum(self.Elf_half('si_boundto'), **ENUM_SUNW_SYMINFO_BOUNDTO),
self.Elf_half('si_flags'),
)
def _create_gnu_verneed(self):
# Structure of "version needed" entries is documented in
# Oracle "Linker and Libraries Guide", Chapter 13 Object File Format
self.Elf_Verneed = Struct('Elf_Verneed',
self.Elf_half('vn_version'),
self.Elf_half('vn_cnt'),
self.Elf_word('vn_file'),
self.Elf_word('vn_aux'),
self.Elf_word('vn_next'),
)
self.Elf_Vernaux = Struct('Elf_Vernaux',
self.Elf_word('vna_hash'),
self.Elf_half('vna_flags'),
self.Elf_half('vna_other'),
self.Elf_word('vna_name'),
self.Elf_word('vna_next'),
)
def _create_gnu_verdef(self):
# Structure of "version definition" entries are documented in
# Oracle "Linker and Libraries Guide", Chapter 13 Object File Format
self.Elf_Verdef = Struct('Elf_Verdef',
self.Elf_half('vd_version'),
self.Elf_half('vd_flags'),
self.Elf_half('vd_ndx'),
self.Elf_half('vd_cnt'),
self.Elf_word('vd_hash'),
self.Elf_word('vd_aux'),
self.Elf_word('vd_next'),
)
self.Elf_Verdaux = Struct('Elf_Verdaux',
self.Elf_word('vda_name'),
self.Elf_word('vda_next'),
)
def _create_gnu_versym(self):
# Structure of "version symbol" entries are documented in
# Oracle "Linker and Libraries Guide", Chapter 13 Object File Format
self.Elf_Versym = Struct('Elf_Versym',
Enum(self.Elf_half('ndx'), **ENUM_VERSYM),
)
def _create_gnu_abi(self):
# Structure of GNU ABI notes is documented in
# https://code.woboq.org/userspace/glibc/csu/abi-note.S.html
self.Elf_abi = Struct('Elf_abi',
Enum(self.Elf_word('abi_os'), **ENUM_NOTE_ABI_TAG_OS),
self.Elf_word('abi_major'),
self.Elf_word('abi_minor'),
self.Elf_word('abi_tiny'),
)
def _create_note(self, e_type=None):
# Structure of "PT_NOTE" section
self.Elf_Nhdr = Struct('Elf_Nhdr',
self.Elf_word('n_namesz'),
self.Elf_word('n_descsz'),
Enum(self.Elf_word('n_type'),
**(ENUM_NOTE_N_TYPE if e_type != "ET_CORE"
else ENUM_CORE_NOTE_N_TYPE)),
)
# A process psinfo structure according to
# http://elixir.free-electrons.com/linux/v2.6.35/source/include/linux/elfcore.h#L84
if self.elfclass == 32:
self.Elf_Prpsinfo = Struct('Elf_Prpsinfo',
self.Elf_byte('pr_state'),
String('pr_sname', 1),
self.Elf_byte('pr_zomb'),
self.Elf_byte('pr_nice'),
self.Elf_xword('pr_flag'),
self.Elf_half('pr_uid'),
self.Elf_half('pr_gid'),
self.Elf_half('pr_pid'),
self.Elf_half('pr_ppid'),
self.Elf_half('pr_pgrp'),
self.Elf_half('pr_sid'),
String('pr_fname', 16),
String('pr_psargs', 80),
)
else: # 64
self.Elf_Prpsinfo = Struct('Elf_Prpsinfo',
self.Elf_byte('pr_state'),
String('pr_sname', 1),
self.Elf_byte('pr_zomb'),
self.Elf_byte('pr_nice'),
Padding(4),
self.Elf_xword('pr_flag'),
self.Elf_word('pr_uid'),
self.Elf_word('pr_gid'),
self.Elf_word('pr_pid'),
self.Elf_word('pr_ppid'),
self.Elf_word('pr_pgrp'),
self.Elf_word('pr_sid'),
String('pr_fname', 16),
String('pr_psargs', 80),
)
# A PT_NOTE of type NT_FILE matching the definition in
# https://chromium.googlesource.com/
# native_client/nacl-binutils/+/upstream/master/binutils/readelf.c
# Line 15121
self.Elf_Nt_File = Struct('Elf_Nt_File',
self.Elf_xword("num_map_entries"),
self.Elf_xword("page_size"),
Array(lambda ctx: ctx.num_map_entries,
Struct('Elf_Nt_File_Entry',
self.Elf_addr('vm_start'),
self.Elf_addr('vm_end'),
self.Elf_offset('page_offset'))),
Array(lambda ctx: ctx.num_map_entries,
CString('filename')))
def _create_stabs(self):
# Structure of one stabs entry, see binutils/bfd/stabs.c
# Names taken from https://sourceware.org/gdb/current/onlinedocs/stabs.html#Overview
self.Elf_Stabs = Struct('Elf_Stabs',
self.Elf_word('n_strx'),
self.Elf_byte('n_type'),
self.Elf_byte('n_other'),
self.Elf_half('n_desc'),
self.Elf_word('n_value'),
)
def _create_arm_attributes(self):
# Structure of a build attributes subsection header. A subsection is
# either public to all tools that process the ELF file or private to
# the vendor's tools.
self.Elf_Attr_Subsection_Header = Struct('Elf_Attr_Subsection',
self.Elf_word('length'),
self.Elf_ntbs('vendor_name',
encoding='utf-8')
)
# Structure of a build attribute tag.
self.Elf_Attribute_Tag = Struct('Elf_Attribute_Tag',
Enum(self.Elf_uleb128('tag'),
**ENUM_ATTR_TAG_ARM)
)
def _create_elf_hash(self):
# Structure of the old SYSV-style hash table header. It is documented
# in the Oracle "Linker and Libraries Guide", Part IV ELF Application
# Binary Interface, Chapter 14 Object File Format, Section Hash Table
# Section:
# https://docs.oracle.com/cd/E53394_01/html/E54813/chapter6-48031.html
self.Elf_Hash = Struct('Elf_Hash',
self.Elf_word('nbuckets'),
self.Elf_word('nchains'),
Array(lambda ctx: ctx['nbuckets'], self.Elf_word('buckets')),
Array(lambda ctx: ctx['nchains'], self.Elf_word('chains')))
def _create_gnu_hash(self):
# Structure of the GNU-style hash table header. Documentation for this
# table is mostly in the GLIBC source code, a good explanation of the
# format can be found in this blog post:
# https://flapenguin.me/2017/05/10/elf-lookup-dt-gnu-hash/
self.Gnu_Hash = Struct('Gnu_Hash',
self.Elf_word('nbuckets'),
self.Elf_word('symoffset'),
self.Elf_word('bloom_size'),
self.Elf_word('bloom_shift'),
Array(lambda ctx: ctx['bloom_size'], self.Elf_xword('bloom')),
Array(lambda ctx: ctx['nbuckets'], self.Elf_word('buckets')))
| |
# Copyright 2014 eBay Inc.
#
# Author: Ron Rickard <rrickard@ebaysf.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from contextlib import contextmanager
from decimal import Decimal
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_log import log as logging
from oslo_concurrency import lockutils
from designate import backend
from designate import exceptions
from designate import objects
from designate import utils
from designate.central import rpcapi as central_api
from designate.mdns import rpcapi as mdns_api
from designate import service
from designate.context import DesignateContext
from designate.i18n import _LE
from designate.i18n import _LI
from designate.i18n import _LW
from designate.pool_manager import cache
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SUCCESS_STATUS = 'SUCCESS'
ERROR_STATUS = 'ERROR'
NO_DOMAIN_STATUS = 'NO_DOMAIN'
CREATE_ACTION = 'CREATE'
DELETE_ACTION = 'DELETE'
UPDATE_ACTION = 'UPDATE'
MAXIMUM_THRESHOLD = 100
@contextmanager
def wrap_backend_call():
"""
Wraps backend calls, ensuring any exception raised is a Backend exception.
"""
try:
yield
except exceptions.Backend:
raise
except Exception as e:
raise exceptions.Backend('Unknown backend failure: %r' % e)
class Service(service.RPCService, service.Service):
"""
Service side of the Pool Manager RPC API.
API version history:
1.0 - Initial version
"""
RPC_API_VERSION = '1.0'
target = messaging.Target(version=RPC_API_VERSION)
def __init__(self, threads=None):
super(Service, self).__init__(threads=threads)
# Build the Pool (and related) Object from Config
self.pool = objects.Pool.from_config(CONF)
# Get a pool manager cache connection.
self.cache = cache.get_pool_manager_cache(
CONF['service:pool_manager'].cache_driver)
# Store some settings for quick access later
self.threshold = CONF['service:pool_manager'].threshold_percentage
self.timeout = CONF['service:pool_manager'].poll_timeout
self.retry_interval = CONF['service:pool_manager'].poll_retry_interval
self.max_retries = CONF['service:pool_manager'].poll_max_retries
self.delay = CONF['service:pool_manager'].poll_delay
# Create the necessary Backend instances for each target
self._setup_target_backends()
def _setup_target_backends(self):
self.target_backends = {}
for target in self.pool.targets:
# Fetch an instance of the Backend class, passing in the options
# and masters
self.target_backends[target.id] = backend.get_backend(
target.type, target)
LOG.info(_LI('%d targets setup'), len(self.pool.targets))
if not self.target_backends:
raise exceptions.NoPoolTargetsConfigured()
@property
def service_name(self):
return 'pool_manager'
@property
def _rpc_topic(self):
# Modify the default topic so it's pool manager instance specific.
topic = super(Service, self)._rpc_topic
topic = '%s.%s' % (topic, CONF['service:pool_manager'].pool_id)
LOG.info(_LI('Using topic %(topic)s for this pool manager instance.')
% {'topic': topic})
return topic
def start(self):
for target in self.pool.targets:
self.target_backends[target.id].start()
super(Service, self).start()
if CONF['service:pool_manager'].enable_recovery_timer:
LOG.info(_LI('Starting periodic recovery timer'))
self.tg.add_timer(
CONF['service:pool_manager'].periodic_recovery_interval,
self.periodic_recovery,
CONF['service:pool_manager'].periodic_recovery_interval)
if CONF['service:pool_manager'].enable_sync_timer:
LOG.info(_LI('Starting periodic synchronization timer'))
self.tg.add_timer(
CONF['service:pool_manager'].periodic_sync_interval,
self.periodic_sync,
CONF['service:pool_manager'].periodic_sync_interval)
def stop(self):
for target in self.pool.targets:
self.target_backends[target.id].stop()
super(Service, self).stop()
@property
def central_api(self):
return central_api.CentralAPI.get_instance()
@property
def mdns_api(self):
return mdns_api.MdnsAPI.get_instance()
# Periodioc Tasks
def periodic_recovery(self):
"""
:return: None
"""
# TODO(kiall): Replace this inter-process-lock with a distributed
# lock, likely using the tooz library - see bug 1445127.
with lockutils.lock('periodic_recovery', external=True, delay=30):
context = DesignateContext.get_admin_context(all_tenants=True)
LOG.debug("Starting Periodic Recovery")
try:
# Handle Deletion Failures
domains = self._get_failed_domains(context, DELETE_ACTION)
for domain in domains:
self.delete_domain(context, domain)
# Handle Creation Failures
domains = self._get_failed_domains(context, CREATE_ACTION)
for domain in domains:
self.create_domain(context, domain)
# Handle Update Failures
domains = self._get_failed_domains(context, UPDATE_ACTION)
for domain in domains:
self.update_domain(context, domain)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'recovery occurred'))
def periodic_sync(self):
"""
:return: None
"""
# TODO(kiall): Replace this inter-process-lock with a distributed
# lock, likely using the tooz library - see bug 1445127.
with lockutils.lock('periodic_sync', external=True, delay=30):
context = DesignateContext.get_admin_context(all_tenants=True)
LOG.debug("Starting Periodic Synchronization")
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'status': '!%s' % ERROR_STATUS
}
periodic_sync_seconds = \
CONF['service:pool_manager'].periodic_sync_seconds
if periodic_sync_seconds is not None:
# Generate the current serial, will provide a UTC Unix TS.
current = utils.increment_serial()
criterion['serial'] = ">%s" % (current - periodic_sync_seconds)
domains = self.central_api.find_domains(context, criterion)
try:
for domain in domains:
# TODO(kiall): If the domain was created within the last
# periodic_sync_seconds, attempt to recreate
# to fill in targets which may have failed.
self.update_domain(context, domain)
except Exception:
LOG.exception(_LE('An unhandled exception in periodic '
'synchronization occurred.'))
# Standard Create/Update/Delete Methods
def create_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be created
:return: None
"""
LOG.info(_LI("Creating new domain %s"), domain.name)
results = []
# Create the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._create_domain_on_target(context, target, domain))
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for creating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
else:
LOG.warn(_LW('Consensus not reached for creating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
# Send a NOTIFY to each nameserver
for nameserver in self.pool.nameservers:
create_status = self._build_status_object(
nameserver, domain, CREATE_ACTION)
self.cache.store(context, create_status)
self._update_domain_on_nameserver(context, nameserver, domain)
def _create_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to create Domain on
:param domain: Domain to be created
:return: True/False
"""
LOG.debug("Creating domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.create_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to create domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def update_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be updated
:return: None
"""
LOG.info(_LI("Updating domain %s"), domain.name)
results = []
# Update the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._update_domain_on_target(context, target, domain))
if self._exceed_or_meet_threshold(results.count(True)):
LOG.debug('Consensus reached for updating domain %(domain)s '
'on pool targets' % {'domain': domain.name})
else:
LOG.warn(_LW('Consensus not reached for updating domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
return
# Send a NOTIFY to each also-notifies
for also_notify in self.pool.also_notifies:
self._update_domain_on_also_notify(context, also_notify, domain)
# Send a NOTIFY to each nameserver
for nameserver in self.pool.nameservers:
# See if there is already another update in progress
try:
update_status = self.cache.retrieve(
context, nameserver.id, domain.id, UPDATE_ACTION)
except exceptions.PoolManagerStatusNotFound:
update_status = self._build_status_object(
nameserver, domain, UPDATE_ACTION)
self.cache.store(context, update_status)
self._update_domain_on_nameserver(context, nameserver, domain)
def _update_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to update Domain on
:param domain: Domain to be updated
:return: True/False
"""
LOG.debug("Updating domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.update_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to update domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def _update_domain_on_also_notify(self, context, also_notify, domain):
LOG.info(_LI('Updating domain %(domain)s on also_notify %(server)s.') %
{'domain': domain.name,
'server': self._get_destination(also_notify)})
self.mdns_api.notify_zone_changed(
context, domain, also_notify, self.timeout, self.retry_interval,
self.max_retries, 0)
def _update_domain_on_nameserver(self, context, nameserver, domain):
LOG.info(_LI('Updating domain %(domain)s on nameserver %(server)s.') %
{'domain': domain.name,
'server': self._get_destination(nameserver)})
self.mdns_api.notify_zone_changed(
context, domain, nameserver, self.timeout, self.retry_interval,
self.max_retries, 0)
self.mdns_api.poll_for_serial_number(
context, domain, nameserver, self.timeout, self.retry_interval,
self.max_retries, self.delay)
def delete_domain(self, context, domain):
"""
:param context: Security context information.
:param domain: Domain to be deleted
:return: None
"""
LOG.info(_LI("Deleting domain %s"), domain.name)
results = []
# Delete the domain on each of the Pool Targets
for target in self.pool.targets:
results.append(
self._delete_domain_on_target(context, target, domain))
# TODO(kiall): We should monitor that the Domain is actually deleted
# correctly on each of the nameservers, rather than
# assuming a sucessful delete-on-target is OK as we have
# in the past.
if self._exceed_or_meet_threshold(
results.count(True), MAXIMUM_THRESHOLD):
LOG.debug('Consensus reached for deleting domain %(domain)s '
'on pool targets' % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, domain.serial)
else:
LOG.warn(_LW('Consensus not reached for deleting domain %(domain)s'
' on pool targets') % {'domain': domain.name})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, domain.serial)
def _delete_domain_on_target(self, context, target, domain):
"""
:param context: Security context information.
:param target: Target to delete Domain from
:param domain: Domain to be deleted
:return: True/False
"""
LOG.debug("Deleting domain %s on target %s", domain.name, target.id)
backend = self.target_backends[target.id]
try:
backend.delete_domain(context, domain)
return True
except Exception:
LOG.exception(_LE("Failed to delete domain %(domain)s on target "
"%(target)s"),
{'domain': domain.name, 'target': target.id})
return False
def update_status(self, context, domain, nameserver, status,
actual_serial):
"""
update_status is called by mdns for creates and updates.
deletes are handled by the backend entirely and status is determined
at the time of delete itself.
:param context: Security context information.
:param domain: The designate domain object.
:param nameserver: The nameserver for which a status update is being
sent.
:param status: The status, 'SUCCESS' or 'ERROR'.
:param actual_serial: The actual serial number received from the name
server for the domain.
:return: None
"""
LOG.debug("Calling update_status for %s : %s : %s : %s" %
(domain.name, domain.action, status, actual_serial))
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
with lockutils.lock('update-status-%s' % domain.id):
try:
current_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
except exceptions.PoolManagerStatusNotFound:
current_status = self._build_status_object(
nameserver, domain, action)
self.cache.store(context, current_status)
cache_serial = current_status.serial_number
LOG.debug('For domain %s : %s on nameserver %s the cache serial '
'is %s and the actual serial is %s.' %
(domain.name, action,
self._get_destination(nameserver),
cache_serial, actual_serial))
if actual_serial and cache_serial <= actual_serial:
current_status.status = status
current_status.serial_number = actual_serial
self.cache.store(context, current_status)
consensus_serial = self._get_consensus_serial(context, domain)
# If there is a valid consensus serial we can still send a success
# for that serial.
# If there is a higher error serial we can also send an error for
# the error serial.
if consensus_serial != 0 and cache_serial <= consensus_serial \
and domain.status != 'ACTIVE':
LOG.info(_LI('For domain %(domain)s '
'the consensus serial is %(consensus_serial)s.') %
{'domain': domain.name,
'consensus_serial': consensus_serial})
self.central_api.update_status(
context, domain.id, SUCCESS_STATUS, consensus_serial)
if status == ERROR_STATUS:
error_serial = self._get_error_serial(
context, domain, consensus_serial)
if error_serial > consensus_serial or error_serial == 0:
LOG.warn(_LW('For domain %(domain)s '
'the error serial is %(error_serial)s.') %
{'domain': domain.name,
'error_serial': error_serial})
self.central_api.update_status(
context, domain.id, ERROR_STATUS, error_serial)
if consensus_serial == domain.serial and self._is_consensus(
context, domain, action, SUCCESS_STATUS,
MAXIMUM_THRESHOLD):
self._clear_cache(context, domain, action)
# Utility Methods
def _get_failed_domains(self, context, action):
criterion = {
'pool_id': CONF['service:pool_manager'].pool_id,
'action': action,
'status': 'ERROR'
}
return self.central_api.find_domains(context, criterion)
@staticmethod
def _get_destination(nameserver):
return '%s:%s' % (nameserver.host, nameserver.port)
@staticmethod
def _percentage(count, total_count):
return (Decimal(count) / Decimal(total_count)) * Decimal(100)
def _exceed_or_meet_threshold(self, count, threshold=None):
threshold = threshold or self.threshold
return self._percentage(
count, len(self.pool.targets)) >= Decimal(threshold)
@staticmethod
def _get_sorted_serials(pool_manager_statuses, descending=False):
serials = []
for pool_manager_status in pool_manager_statuses:
serials.append(pool_manager_status.serial_number)
serials.sort(reverse=descending)
return serials
def _get_serials_ascending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses)
def _get_serials_descending(self, pool_manager_statuses):
return self._get_sorted_serials(pool_manager_statuses, descending=True)
def _is_consensus(self, context, domain, action, status, threshold=None):
status_count = 0
pool_manager_statuses = self._retrieve_statuses(
context, domain, action)
for pool_manager_status in pool_manager_statuses:
if pool_manager_status.status == status:
status_count += 1
if threshold is None:
threshold = self.threshold
return self._exceed_or_meet_threshold(status_count, threshold)
def _get_consensus_serial(self, context, domain):
consensus_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
pm_statuses = self._retrieve_statuses(context, domain, action)
for serial in self._get_serials_descending(pm_statuses):
serial_count = 0
for pm_status in pm_statuses:
if pm_status.serial_number >= serial:
serial_count += 1
if self._exceed_or_meet_threshold(serial_count, self.threshold):
consensus_serial = serial
break
return consensus_serial
def _get_error_serial(self, context, domain, consensus_serial):
error_serial = 0
action = UPDATE_ACTION if domain.action == 'NONE' else domain.action
if self._is_consensus(context, domain, action, ERROR_STATUS):
pm_statuses = self._retrieve_statuses(context, domain, action)
for serial in self._get_serials_ascending(pm_statuses):
if serial > consensus_serial:
error_serial = serial
break
return error_serial
# When we hear back from the nameserver, the serial_number is set to the
# value the nameserver
@staticmethod
def _build_status_object(nameserver, domain, action):
values = {
'nameserver_id': nameserver.id,
'domain_id': domain.id,
'status': None,
'serial_number': 0,
'action': action
}
return objects.PoolManagerStatus(**values)
# Methods for manipulating the cache.
def _clear_cache(self, context, domain, action=None):
LOG.debug('Clearing cache for domain %s with action %s.' %
(domain.name, action))
pool_manager_statuses = []
if action:
actions = [action]
else:
actions = [CREATE_ACTION, UPDATE_ACTION, DELETE_ACTION]
for nameserver in self.pool.nameservers:
for action in actions:
pool_manager_status = self._build_status_object(
nameserver, domain, action)
pool_manager_statuses.append(pool_manager_status)
for pool_manager_status in pool_manager_statuses:
# Ignore any not found errors while clearing the cache
try:
self.cache.clear(context, pool_manager_status)
except exceptions.PoolManagerStatusNotFound:
pass
def _retrieve_from_mdns(self, context, nameserver, domain, action):
try:
(status, actual_serial, retries) = \
self.mdns_api.get_serial_number(
context, domain, nameserver, self.timeout,
self.retry_interval, self.max_retries, self.delay)
except messaging.MessagingException as msg_ex:
LOG.debug('Could not retrieve status and serial for domain %s on '
'nameserver %s with action %s (%s: %s)' %
(domain.name, self._get_destination(nameserver), action,
type(msg_ex), str(msg_ex)))
return None
pool_manager_status = self._build_status_object(
nameserver, domain, action)
if status == NO_DOMAIN_STATUS:
if action == CREATE_ACTION:
pool_manager_status.status = 'ERROR'
elif action == DELETE_ACTION:
pool_manager_status.status = 'SUCCESS'
# TODO(Ron): Handle this case properly.
elif action == UPDATE_ACTION:
pool_manager_status.status = 'ERROR'
else:
pool_manager_status.status = status
pool_manager_status.serial_number = actual_serial \
if actual_serial is not None else 0
LOG.debug('Retrieved status %s and serial %s for domain %s '
'on nameserver %s with action %s from mdns.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name, self._get_destination(nameserver), action))
self.cache.store(context, pool_manager_status)
return pool_manager_status
def _retrieve_statuses(self, context, domain, action):
pool_manager_statuses = []
for nameserver in self.pool.nameservers:
try:
pool_manager_status = self.cache.retrieve(
context, nameserver.id, domain.id, action)
LOG.debug('Cache hit! Retrieved status %s and serial %s '
'for domain %s on nameserver %s with action %s from '
'the cache.' %
(pool_manager_status.status,
pool_manager_status.serial_number,
domain.name,
self._get_destination(nameserver), action))
except exceptions.PoolManagerStatusNotFound:
LOG.debug('Cache miss! Did not retrieve status and serial '
'for domain %s on nameserver %s with action %s from '
'the cache. Getting it from the server.' %
(domain.name,
self._get_destination(nameserver),
action))
pool_manager_status = self._retrieve_from_mdns(
context, nameserver, domain, action)
if pool_manager_status is not None:
pool_manager_statuses.append(pool_manager_status)
return pool_manager_statuses
| |
#!/usr/bin/env python
"""Helper functions used by client building/repacking process."""
import io
import logging
import os
import shutil
import struct
import tempfile
from typing import Optional, Sequence, Text, Tuple
from grr_response_client_builder import build
from grr_response_core import config
from grr_response_core import version
from grr_response_core.config import contexts
from grr_response_core.lib import config_validator_base
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
# pylint: disable=unused-import
# Pull in local config validators.
from grr_response_core.lib.local import plugins
from grr_response_core.lib.rdfvalues import client as rdf_client
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.util.compat import yaml
# pylint: enable=unused-import
# pylint: disable=g-import-not-at-top,unused-import
# This is a workaround so we don't need to maintain the whole PyInstaller
# codebase as a full-fledged dependency.
try:
# pytype: disable=import-error
from PyInstaller import __main__ as PyInstallerMain
# pytype: enable=import-error
except ImportError:
# We ignore this failure since most people running the code don't build their
# own clients and printing an error message causes confusion. Those building
# their own clients will need PyInstaller installed.
pass
# pylint: enable=g-import-not-at-top,unused-import
Context = Sequence[Text]
def GenerateDirectory(input_dir: Optional[Text] = None,
output_dir: Optional[Text] = None,
replacements: Optional[Sequence[Tuple[Text,
Text]]] = None,
context: Optional[Context] = None) -> None:
"""Copies an a directory rewriting file names according to spec."""
if context is None:
raise ValueError("context must be provided")
input_dir = utils.NormalizePath(input_dir)
output_dir = utils.NormalizePath(output_dir)
replacements = replacements or []
for (root, _, files) in os.walk(input_dir):
for filename in files:
in_file = utils.JoinPath(root, filename)
out_file = in_file.replace(input_dir, output_dir)
for (s, replacement) in replacements:
out_file = out_file.replace(s, replacement)
utils.EnsureDirExists(os.path.dirname(out_file))
GenerateFile(in_file, out_file, context=context)
def GenerateFile(input_filename: Optional[Text] = None,
output_filename: Optional[Text] = None,
context: Optional[Context] = None) -> None:
"""Generates a file from a template, interpolating config values."""
if context is None:
raise ValueError("context must be provided.")
if input_filename is None:
input_filename = output_filename + ".in"
if output_filename[-3:] == ".in":
output_filename = output_filename[:-3]
logging.debug("Generating file %s from %s", output_filename, input_filename)
with io.open(input_filename, "r") as fd:
data = fd.read()
with io.open(output_filename, "w") as fd:
fd.write(config.CONFIG.InterpolateValue(data, context=context))
def CleanDirectory(directory: Text):
logging.info("Clearing directory %s", directory)
try:
shutil.rmtree(directory)
except OSError:
pass
utils.EnsureDirExists(directory)
def MakeBuildDirectory(context=None):
"""Prepares the build and work directories."""
if context is None:
raise ValueError("context can't be None")
build_dir = config.CONFIG.Get("PyInstaller.build_dir", context=context)
work_path = config.CONFIG.Get("PyInstaller.workpath_dir", context=context)
CleanDirectory(build_dir)
CleanDirectory(work_path)
def BuildWithPyInstaller(context=None):
"""Use pyinstaller to build a client package."""
if context is None:
raise ValueError("context has to be specified")
CleanDirectory(config.CONFIG.Get("PyInstaller.distpath", context=context))
logging.info("Copying pyinstaller support files")
build_dir = config.CONFIG.Get("PyInstaller.build_dir", context=context)
spec_file = os.path.join(build_dir, "grr.spec")
with io.open(spec_file, "w") as fd:
fd.write(config.CONFIG.Get("PyInstaller.spec", context=context))
with io.open(os.path.join(build_dir, "version.txt"), "w") as fd:
fd.write(config.CONFIG.Get("PyInstaller.version", context=context))
shutil.copy(
src=config.CONFIG.Get("PyInstaller.icon_path", context=context),
dst=os.path.join(build_dir, "grr.ico"))
# We expect the onedir (a one-folder bundle containing an executable) output
# at this location.
output_dir = os.path.join(
config.CONFIG.Get("PyInstaller.distpath", context=context), "grr-client")
args = [
"--distpath",
config.CONFIG.Get("PyInstaller.distpath", context=context),
"--workpath",
config.CONFIG.Get("PyInstaller.workpath_dir", context=context),
spec_file,
]
logging.info("Running pyinstaller: %s", args)
PyInstallerMain.run(pyi_args=args)
# Clear out some crud that pyinstaller includes.
for path in ["tcl", "tk", "pytz"]:
dir_path = os.path.join(output_dir, path)
try:
shutil.rmtree(dir_path)
except OSError:
logging.error("Unable to remove directory: %s", dir_path)
try:
os.mkdir(dir_path)
except OSError:
logging.error("Unable to create directory: %s", dir_path)
file_path = os.path.join(dir_path, path)
try:
# Create an empty file so the directories get put in the installers.
with io.open(file_path, "wb"):
pass
except IOError:
logging.error("Unable to create file: %s", file_path)
version_ini = config.CONFIG.Get(
"ClientBuilder.version_ini_path", default=version.VersionPath())
shutil.copy(version_ini, os.path.join(output_dir, "version.ini"))
build_yaml_path = os.path.join(output_dir, "build.yaml")
with io.open(build_yaml_path, mode="w", encoding="utf-8") as fd:
WriteBuildYaml(fd, context=context)
return output_dir
def WriteBuildYaml(fd, build_timestamp=True, context=None):
"""Write build spec to fd."""
if context is None:
raise ValueError("context has to be specified")
output = {
"Client.build_environment":
rdf_client.Uname.FromCurrentSystem().signature(),
"Template.build_type":
config.CONFIG.Get("ClientBuilder.build_type", context=context),
"Template.version_major":
config.CONFIG.Get("Source.version_major", context=context),
"Template.version_minor":
config.CONFIG.Get("Source.version_minor", context=context),
"Template.version_revision":
config.CONFIG.Get("Source.version_revision", context=context),
"Template.version_release":
config.CONFIG.Get("Source.version_release", context=context),
"Template.arch":
config.CONFIG.Get("Client.arch", context=context)
}
yaml_keys = set(build.REQUIRED_BUILD_YAML_KEYS)
if build_timestamp:
output["Client.build_time"] = rdfvalue.RDFDatetime.Now()
else:
yaml_keys.remove("Client.build_time")
for key, value in output.items():
output[key] = str(value)
output["Template.build_context"] = context
output_keys = set(output.keys())
if output_keys != yaml_keys:
raise RuntimeError("Bad build.yaml: expected %s, got %s" %
(yaml_keys, output_keys))
for k, v in output.items():
if v is None:
raise RuntimeError("Bad build.yaml: expected %s to be not None" % k)
fd.write(yaml.Dump(output))
def ValidateEndConfig(config_obj, errors_fatal=True, context=None):
"""Given a generated client config, attempt to check for common errors."""
if context is None:
raise ValueError("context can't be None")
errors = []
if not config.CONFIG["Client.fleetspeak_enabled"]:
location = config_obj.Get("Client.server_urls", context=context)
if not location:
errors.append("Empty Client.server_urls")
for url in location:
if not url.startswith("http"):
errors.append("Bad Client.server_urls specified %s" % url)
certificate = config_obj.GetRaw(
"CA.certificate", default=None, context=context)
if certificate is None or not certificate.startswith("-----BEGIN CERTIF"):
errors.append("CA certificate missing from config.")
key_data = config_obj.GetRaw(
"Client.executable_signing_public_key", default=None, context=context)
if key_data is None:
errors.append("Missing Client.executable_signing_public_key.")
elif not key_data.startswith("-----BEGIN PUBLIC"):
errors.append("Invalid Client.executable_signing_public_key: %s" % key_data)
else:
rdf_crypto.RSAPublicKey.FromHumanReadable(key_data)
for bad_opt in ["Client.private_key"]:
if config_obj.Get(bad_opt, context=context, default=""):
errors.append("Client cert in conf, this should be empty at deployment"
" %s" % bad_opt)
if errors_fatal and errors:
for error in errors:
logging.error("Build Config Error: %s", error)
raise RuntimeError("Bad configuration generated. Terminating.")
else:
return errors
# Config options that have to make it to a deployable binary.
_CONFIG_SECTIONS = [
"CA", "Client", "ClientRepacker", "Logging", "Config", "Nanny", "Osquery",
"Installer", "Template"
]
# Config options that should never make it to a deployable binary.
_SKIP_OPTION_LIST = ["Client.private_key"]
def GetClientConfig(context, validate=True, deploy_timestamp=True):
"""Generates the client config file for inclusion in deployable binaries."""
with utils.TempDirectory() as tmp_dir:
# Make sure we write the file in yaml format.
filename = os.path.join(
tmp_dir,
config.CONFIG.Get("ClientBuilder.config_filename", context=context))
new_config = config.CONFIG.MakeNewConfig()
new_config.Initialize(reset=True, data="")
new_config.SetWriteBack(filename)
# Only copy certain sections to the client. We enumerate all
# defined options and then resolve those from the config in the
# client's context. The result is the raw option as if the
# client read our config file.
client_context = context[:]
while contexts.CLIENT_BUILD_CONTEXT in client_context:
client_context.remove(contexts.CLIENT_BUILD_CONTEXT)
for descriptor in sorted(config.CONFIG.type_infos, key=lambda x: x.name):
if descriptor.name in _SKIP_OPTION_LIST:
continue
if descriptor.section in _CONFIG_SECTIONS:
value = config.CONFIG.GetRaw(
descriptor.name, context=client_context, default=None)
if value is not None:
logging.debug("Copying config option to client: %s", descriptor.name)
new_config.SetRaw(descriptor.name, value)
if deploy_timestamp:
deploy_time_string = str(rdfvalue.RDFDatetime.Now())
new_config.Set("Client.deploy_time", deploy_time_string)
new_config.Write()
if validate:
ValidateEndConfig(new_config, context=context)
private_validator = config.CONFIG.Get(
"ClientBuilder.private_config_validator_class", context=context)
if private_validator:
try:
validator = config_validator_base.PrivateConfigValidator.classes[
private_validator]()
except KeyError:
logging.error("Couldn't find config validator class %s",
private_validator)
raise
validator.ValidateEndConfig(new_config, context)
return io.open(filename, "r").read()
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None, signer=None):
"""Read a file from a ZipFile and write it to a new ZipFile."""
data = from_zip.read(from_name)
if to_name is None:
to_name = from_name
if signer:
logging.debug("Signing %s", from_name)
data = signer.SignBuffer(data)
to_zip.writestr(to_name, data)
def CreateNewZipWithSignedLibs(z_in,
z_out,
ignore_files=None,
signer=None,
skip_signing_files=None):
"""Copies files from one zip to another, signing all qualifying files."""
ignore_files = ignore_files or []
skip_signing_files = skip_signing_files or []
extensions_to_sign = [".sys", ".exe", ".dll", ".pyd"]
to_sign = []
for template_file in z_in.namelist():
if template_file not in ignore_files:
extension = os.path.splitext(template_file)[1].lower()
if (signer and template_file not in skip_signing_files and
extension in extensions_to_sign):
to_sign.append(template_file)
else:
CopyFileInZip(z_in, template_file, z_out)
temp_files = {}
for filename in to_sign:
fd, path = tempfile.mkstemp()
with os.fdopen(fd, "wb") as temp_fd:
temp_fd.write(z_in.read(filename))
temp_files[filename] = path
try:
signer.SignFiles(temp_files.values())
except AttributeError:
for f in temp_files.values():
signer.SignFile(f)
for filename, tempfile_path in temp_files.items():
with io.open(tempfile_path, "rb") as fd:
z_out.writestr(filename, fd.read())
def SetPeSubsystem(fd, console=True):
"""Takes file like obj and returns (offset, value) for the PE subsystem."""
current_pos = fd.tell()
fd.seek(0x3c) # _IMAGE_DOS_HEADER.e_lfanew
header_offset = struct.unpack("<I", fd.read(4))[0]
# _IMAGE_NT_HEADERS.OptionalHeader.Subsystem ( 0x18 + 0x44)
subsystem_offset = header_offset + 0x5c
fd.seek(subsystem_offset)
if console:
fd.write(b"\x03")
else:
fd.write(b"\x02")
fd.seek(current_pos)
| |
from bson import ObjectId
import jsonschema
import numpy
from girder.exceptions import ValidationException
from girder.models.file import File
from girder.models.model_base import Model
from girder.models.upload import Upload
from girder.utility.acl_mixin import AccessControlMixin
from .image import Image
from .segmentation_helpers import ScikitSegmentationHelper
from .study import Study
from .user import User
class Annotation(AccessControlMixin, Model):
def initialize(self):
self.name = 'annotation'
self.ensureIndices(['studyId', 'imageId', 'userId'])
# TODO: resourceColl should be ['study', 'isic_archive'], but upstream support is unclear
self.resourceColl = 'folder'
self.resourceParent = 'studyId'
def createAnnotation(self, study, image, user):
annotation = self.save({
'studyId': study['_id'],
'imageId': image['_id'],
'userId': user['_id'],
'startTime': None,
'stopTime': None,
'status': None,
'log': [],
'responses': {},
'markups': {},
})
return annotation
def getState(self, annotation):
return (Study().State.COMPLETE
if annotation['stopTime'] is not None
else Study().State.ACTIVE)
def _superpixelsToMasks(self, superpixelValues, image):
possibleSuperpixelNums = numpy.array([
superpixelNum
for superpixelNum, featureValue
in enumerate(superpixelValues)
if featureValue == 0.5
])
definiteSuperpixelNums = numpy.array([
superpixelNum
for superpixelNum, featureValue
in enumerate(superpixelValues)
if featureValue == 1.0
])
superpixelsLabelData = Image().superpixelsData(image)
possibleMask = numpy.in1d(
superpixelsLabelData.flat,
possibleSuperpixelNums
).reshape(superpixelsLabelData.shape)
possibleMask = possibleMask.astype(numpy.bool_)
definiteMask = numpy.in1d(
superpixelsLabelData.flat,
definiteSuperpixelNums
).reshape(superpixelsLabelData.shape)
definiteMask = definiteMask.astype(numpy.bool_)
return possibleMask, definiteMask
def _superpixelsToMaskMarkup(self, superpixelValues, image):
possibleMask, definiteMask = self._superpixelsToMasks(superpixelValues, image)
markupMask = numpy.zeros(possibleMask.shape, dtype=numpy.uint8)
markupMask[possibleMask] = 128
markupMask[definiteMask] = 255
return markupMask
def saveSuperpixelMarkup(self, annotation, featureId, superpixelValues):
image = Image().load(annotation['imageId'], force=True, exc=True)
annotator = User().load(annotation['userId'], force=True, exc=True)
markupMask = self._superpixelsToMaskMarkup(superpixelValues, image)
markupMaskEncodedStream = ScikitSegmentationHelper.writeImage(markupMask, 'png')
markupFile = Upload().uploadFromFile(
obj=markupMaskEncodedStream,
size=len(markupMaskEncodedStream.getvalue()),
name='annotation_%s_%s.png' % (
annotation['_id'],
# Rename features to ensure the file is downloadable on Windows
featureId.replace(' : ', ' ; ').replace('/', ',')
),
# TODO: change this once a bug in upstream Girder is fixed
parentType='annotation',
parent=annotation,
attachParent=True,
user=annotator,
mimeType='image/png'
)
markupFile['superpixels'] = superpixelValues
# TODO: remove this once a bug in upstream Girder is fixed
markupFile['attachedToType'] = ['annotation', 'isic_archive']
markupFile = File().save(markupFile)
annotation['markups'][featureId] = {
'fileId': markupFile['_id'],
'present': bool(markupMask.any())
}
return Annotation().save(annotation)
def getMarkupFile(self, annotation, featureId, includeSuperpixels=False):
if featureId in annotation['markups']:
markupFile = File().load(
annotation['markups'][featureId]['fileId'],
force=True,
exc=True,
fields={'superpixels': includeSuperpixels}
)
return markupFile
else:
return None
def renderMarkup(self, annotation, featureId):
image = Image().load(annotation['imageId'], force=True, exc=True)
renderData = Image().imageData(image)
markupFile = Annotation().getMarkupFile(annotation, featureId)
if markupFile:
markupMask = Image()._decodeDataFromFile(markupFile)
else:
image = Image().load(annotation['imageId'], force=True, exc=True)
markupMask = numpy.zeros(
(
image['meta']['acquisition']['pixelsY'],
image['meta']['acquisition']['pixelsX']
),
dtype=numpy.uint8
)
possibleMask = markupMask == 128
definiteMask = markupMask == 255
POSSIBLE_OVERLAY_COLOR = numpy.array([250, 250, 0])
DEFINITE_OVERLAY_COLOR = numpy.array([0, 0, 255])
renderData[possibleMask] = POSSIBLE_OVERLAY_COLOR
renderData[definiteMask] = DEFINITE_OVERLAY_COLOR
return renderData
def filter(self, annotation, user=None, additionalKeys=None):
output = {
'_id': annotation['_id'],
'_modelType': 'annotation',
'studyId': annotation['studyId'],
'image': Image().filterSummary(
Image().load(annotation['imageId'], force=True, exc=True),
user),
'user': User().filterSummary(
user=User().load(annotation['userId'], force=True, exc=True),
accessorUser=user),
'state': Annotation().getState(annotation)
}
if Annotation().getState(annotation) == Study().State.COMPLETE:
output.update({
'status': annotation['status'],
'startTime': annotation['startTime'],
'stopTime': annotation['stopTime'],
'responses': annotation['responses'],
'markups': {
featureId: markup['present']
for featureId, markup
in annotation['markups'].items()
},
'log': annotation.get('log', [])
})
return output
def filterSummary(self, annotation, user=None):
return {
'_id': annotation['_id'],
'studyId': annotation['studyId'],
'userId': annotation['userId'],
'imageId': annotation['imageId'],
'state': self.getState(annotation)
}
def remove(self, annotation, **kwargs):
for featureId in annotation['markups'].keys():
File().remove(self.getMarkupFile(annotation, featureId))
return super(Annotation, self).remove(annotation)
def validate(self, doc): # noqa C901
for field in ['studyId', 'userId', 'imageId']:
if not isinstance(doc.get(field), ObjectId):
raise ValidationException(f'Annotation field "{field}" must be an ObjectId')
study = Study().load(doc['studyId'], force=True, exc=False)
if not study:
raise ValidationException(
'Annotation field "studyId" must reference an existing Study.')
# If annotation is complete
if doc.get('stopTime'):
schema = {
# '$schema': 'http://json-schema.org/draft-07/schema#',
'title': 'annotation',
'type': 'object',
'properties': {
'_id': {
# TODO
},
'studyId': {
# TODO
},
'imageId': {
# TODO
},
'userId': {
# TODO
},
'startTime': {
# TODO
},
'stopTime': {
# TODO
},
'status': {
'type': 'string',
'enum': ['ok', 'phi', 'quality', 'zoom', 'inappropriate', 'other']
},
'responses': {
'type': 'object',
'properties': {
question['id']: {
'type': 'string',
# TODO: Support non-'select' question types
'enum': question['choices']
}
for question in study['meta']['questions']
},
'additionalProperties': False
},
'markups': {
'type': 'object',
'properties': {
feature['id']: {
'type': 'object',
'properties': {
'fileId': {
# TODO
},
'present': {
'type': 'boolean'
}
},
'required': ['fileId', 'present'],
'additionalProperties': False
}
for feature in study['meta']['features']
},
'additionalProperties': False
},
'log': {
# TODO
}
},
'required': [
'_id', 'studyId', 'imageId', 'userId', 'startTime', 'stopTime', 'status',
'responses', 'markups', 'log'
],
'additionalProperties': False
}
try:
jsonschema.validate(doc, schema)
except jsonschema.ValidationError as e:
raise ValidationException(f'Invalid annotation: {str(e)}')
return doc
| |
# -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Async DNS server
Future plans:
- Better config file format maybe
- Make sure to differentiate between different classes
- notice truncation bit
Important: No additional processing is done on some of the record types.
This violates the most basic RFC and is just plain annoying
for resolvers to deal with. Fix it.
@author: Jp Calderone
"""
import time
from twisted.internet import protocol
from twisted.names import dns, resolve
from twisted.python import log
class DNSServerFactory(protocol.ServerFactory):
"""
Server factory and tracker for L{DNSProtocol} connections. This
class also provides records for responses to DNS queries.
@ivar connections: A list of all the connected L{DNSProtocol}
instances using this object as their controller.
@type connections: C{list} of L{DNSProtocol}
"""
protocol = dns.DNSProtocol
cache = None
def __init__(self, authorities = None, caches = None, clients = None, verbose = 0):
resolvers = []
if authorities is not None:
resolvers.extend(authorities)
if caches is not None:
resolvers.extend(caches)
if clients is not None:
resolvers.extend(clients)
self.canRecurse = not not clients
self.resolver = resolve.ResolverChain(resolvers)
self.verbose = verbose
if caches:
self.cache = caches[-1]
self.connections = []
def buildProtocol(self, addr):
p = self.protocol(self)
p.factory = self
return p
def connectionMade(self, protocol):
"""
Track a newly connected L{DNSProtocol}.
"""
self.connections.append(protocol)
def connectionLost(self, protocol):
"""
Stop tracking a no-longer connected L{DNSProtocol}.
"""
self.connections.remove(protocol)
def sendReply(self, protocol, message, address):
if self.verbose > 1:
s = ' '.join([str(a.payload) for a in message.answers])
auth = ' '.join([str(a.payload) for a in message.authority])
add = ' '.join([str(a.payload) for a in message.additional])
if not s:
log.msg("Replying with no answers")
else:
log.msg("Answers are " + s)
log.msg("Authority is " + auth)
log.msg("Additional is " + add)
if address is None:
protocol.writeMessage(message)
else:
protocol.writeMessage(message, address)
if self.verbose > 1:
log.msg("Processed query in %0.3f seconds" % (time.time() - message.timeReceived))
def gotResolverResponse(self, (ans, auth, add), protocol, message, address):
message.rCode = dns.OK
message.answers = ans
for x in ans:
if x.isAuthoritative():
message.auth = 1
break
message.authority = auth
message.additional = add
self.sendReply(protocol, message, address)
l = len(ans) + len(auth) + len(add)
if self.verbose:
log.msg("Lookup found %d record%s" % (l, l != 1 and "s" or ""))
if self.cache and l:
self.cache.cacheResult(
message.queries[0], (ans, auth, add)
)
def gotResolverError(self, failure, protocol, message, address):
if failure.check(dns.DomainError, dns.AuthoritativeDomainError):
message.rCode = dns.ENAME
else:
message.rCode = dns.ESERVER
log.err(failure)
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Lookup failed")
def handleQuery(self, message, protocol, address):
# Discard all but the first query! HOO-AAH HOOOOO-AAAAH
# (no other servers implement multi-query messages, so we won't either)
query = message.queries[0]
return self.resolver.query(query).addCallback(
self.gotResolverResponse, protocol, message, address
).addErrback(
self.gotResolverError, protocol, message, address
)
def handleInverseQuery(self, message, protocol, address):
message.rCode = dns.ENOTIMP
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Inverse query from %r" % (address,))
def handleStatus(self, message, protocol, address):
message.rCode = dns.ENOTIMP
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Status request from %r" % (address,))
def handleNotify(self, message, protocol, address):
message.rCode = dns.ENOTIMP
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Notify message from %r" % (address,))
def handleOther(self, message, protocol, address):
message.rCode = dns.ENOTIMP
self.sendReply(protocol, message, address)
if self.verbose:
log.msg("Unknown op code (%d) from %r" % (message.opCode, address))
def messageReceived(self, message, proto, address = None):
message.timeReceived = time.time()
if self.verbose:
if self.verbose > 1:
s = ' '.join([str(q) for q in message.queries])
elif self.verbose > 0:
s = ' '.join([dns.QUERY_TYPES.get(q.type, 'UNKNOWN') for q in message.queries])
if not len(s):
log.msg("Empty query from %r" % ((address or proto.transport.getPeer()),))
else:
log.msg("%s query from %r" % (s, address or proto.transport.getPeer()))
message.recAv = self.canRecurse
message.answer = 1
if not self.allowQuery(message, proto, address):
message.rCode = dns.EREFUSED
self.sendReply(proto, message, address)
elif message.opCode == dns.OP_QUERY:
self.handleQuery(message, proto, address)
elif message.opCode == dns.OP_INVERSE:
self.handleInverseQuery(message, proto, address)
elif message.opCode == dns.OP_STATUS:
self.handleStatus(message, proto, address)
elif message.opCode == dns.OP_NOTIFY:
self.handleNotify(message, proto, address)
else:
self.handleOther(message, proto, address)
def allowQuery(self, message, protocol, address):
# Allow anything but empty queries
return len(message.queries)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ReplicationNetworksOperations(object):
"""ReplicationNetworksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicessiterecovery.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_replication_fabrics(
self,
fabric_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkCollection"]
"""Gets the list of networks under a fabric.
Lists the networks available for a fabric.
:param fabric_name: Fabric name.
:type fabric_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.NetworkCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_replication_fabrics.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_replication_fabrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationNetworks'} # type: ignore
def get(
self,
fabric_name, # type: str
network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Network"
"""Gets a network with specified server id and network name.
Gets the details of a network.
:param fabric_name: Server Id.
:type fabric_name: str
:param network_name: Primary network name.
:type network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Network, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.Network
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Network"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'fabricName': self._serialize.url("fabric_name", fabric_name, 'str'),
'networkName': self._serialize.url("network_name", network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Network', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationNetworks/{networkName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkCollection"]
"""Gets the list of networks. View-only API.
Lists the networks available in a vault.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.NetworkCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceName': self._serialize.url("self._config.resource_name", self._config.resource_name, 'str'),
'resourceGroupName': self._serialize.url("self._config.resource_group_name", self._config.resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationNetworks'} # type: ignore
| |
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# Copyright (c) 2010 Google
# Copyright (c) 2008 rPath, Inc.
# Copyright (c) 2009 The Echo Nest Corporation
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Handles basic connections to AWS
"""
from __future__ import with_statement
import base64
import errno
import httplib
import os
import Queue
import random
import re
import socket
import sys
import time
import urllib
import urlparse
import xml.sax
import copy
import auth
import auth_handler
import boto
import boto.utils
import boto.handler
import boto.cacerts
from boto import config, UserAgent
from boto.exception import AWSConnectionError, BotoClientError
from boto.exception import BotoServerError
from boto.provider import Provider
from boto.resultset import ResultSet
HAVE_HTTPS_CONNECTION = False
try:
import ssl
from boto import https_connection
# Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError.
if hasattr(ssl, 'SSLError'):
HAVE_HTTPS_CONNECTION = True
except ImportError:
pass
try:
import threading
except ImportError:
import dummy_threading as threading
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
PORTS_BY_SECURITY = {True: 443,
False: 80}
DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__ )), "cacerts.txt")
class HostConnectionPool(object):
"""
A pool of connections for one remote (host,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
before the response body has been read, so they connections aren't
ready to send another request yet. They stay in the pending queue
until they are ready for another request, at which point they are
returned to the pool of ready connections.
The pool of ready connections is an ordered list of
(connection,time) pairs, where the time is the time the connection
was returned from _mexe. After a certain period of time,
connections are considered stale, and discarded rather than being
reused. This saves having to wait for the connection to time out
if AWS has decided to close it on the other end because of
inactivity.
Thread Safety:
This class is used only fram ConnectionPool while it's mutex
is held.
"""
def __init__(self):
self.queue = []
def size(self):
"""
Returns the number of connections in the pool for this host.
Some of the connections may still be in use, and may not be
ready to be returned by get().
"""
return len(self.queue)
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
added.
"""
self.queue.append((conn, time.time()))
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None of there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None
def _conn_ready(self, conn):
"""
There is a nice state diagram at the top of httplib.py. It
indicates that once the response headers have been read (which
_mexe does before adding the connection to the pool), a
response is attached to the connection, and it stays there
until it's done reading. This isn't entirely true: even after
the client is done reading, the response may be closed, but
not removed from the connection yet.
This is ugly, reading a private instance variable, but the
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
# Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
"""
Get rid of stale connections.
"""
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0)
def _pair_stale(self, pair):
"""
Returns true of the (connection,time) pair is too old to be
used.
"""
(_conn, return_time) = pair
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
class ConnectionPool(object):
"""
A connection pool that expires connections after a fixed period of
time. This saves time spent waiting for a connection that AWS has
timed out on the other end.
This class is thread-safe.
"""
#
# The amout of time between calls to clean.
#
CLEAN_INTERVAL = 5.0
#
# How long before a connection becomes "stale" and won't be reused
# again. The intention is that this time is less that the timeout
# period that AWS uses, so we'll never try to reuse a connection
# and find that AWS is timing it out.
#
# Experimentation in July 2011 shows that AWS starts timing things
# out after three minutes. The 60 seconds here is conservative so
# we should never hit that 3-minute timout.
#
STALE_DURATION = 60.0
def __init__(self):
# Mapping from (host,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
ConnectionPool.STALE_DURATION = \
config.getfloat('Boto', 'connection_stale_duration',
ConnectionPool.STALE_DURATION)
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
pickled_dict['host_to_pool'] = {}
del pickled_dict['mutex']
return pickled_dict
def __setstate__(self, dct):
self.__init__()
def size(self):
"""
Returns the number of connections in the pool.
"""
return sum(pool.size() for pool in self.host_to_pool.values())
def get_http_connection(self, host, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
responsibility to call close() on the connection when it's no longer
needed.
"""
self.clean()
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
def put_http_connection(self, host, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
key = (host, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
params, headers, body):
"""Represents an HTTP request.
:type method: string
:param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
:type protocol: string
:param protocol: The http protocol used, 'http' or 'https'.
:type host: string
:param host: Host to which the request is addressed. eg. abc.com
:type port: int
:param port: port on which the request is being sent. Zero means unset,
in which case default port will be chosen.
:type path: string
:param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
authentication string.
:type params: dict
:param params: HTTP url query parameters, with key as name of
the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
empty string ('').
"""
self.method = method
self.protocol = protocol
self.host = host
self.port = port
self.path = path
if auth_path is None:
auth_path = path
self.auth_path = auth_path
self.params = params
# chunked Transfer-Encoding should act only on PUT request.
if headers and 'Transfer-Encoding' in headers and \
headers['Transfer-Encoding'] == 'chunked' and \
self.method != 'PUT':
self.headers = headers.copy()
del self.headers['Transfer-Encoding']
else:
self.headers = headers
self.body = body
def __str__(self):
return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
'params(%s) headers(%s) body(%s)') % (self.method,
self.protocol, self.host, self.port, self.path, self.params,
self.headers, self.body))
def authorize(self, connection, **kwargs):
for key in self.headers:
val = self.headers[key]
if isinstance(val, unicode):
self.headers[key] = urllib.quote_plus(val.encode('utf-8'))
connection._auth_handler.add_auth(self, **kwargs)
self.headers['User-Agent'] = UserAgent
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(httplib.HTTPResponse):
def __init__(self, *args, **kwargs):
httplib.HTTPResponse.__init__(self, *args, **kwargs)
self._cached_response = ''
def read(self, amt=None):
"""Read the response.
This method does not have the same behavior as
httplib.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.
"""
if amt is None:
# The reason for doing this is that many places in boto call
# response.read() and except to get the response body that they
# can then process. To make sure this always works as they expect
# we're caching the response so that multiple calls to read()
# will return the full body. Note that this behavior only
# happens if the amt arg is not specified.
if not self._cached_response:
self._cached_response = httplib.HTTPResponse.read(self)
return self._cached_response
else:
return httplib.HTTPResponse.read(self, amt)
class AWSAuthConnection(object):
def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
validate_certs=True):
"""
:type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
factory and the exceptions to catch. The factory should have
a similar interface to L{httplib.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
:type proxy_port: int
:param proxy_port: The port to use when connecting over a proxy
:type proxy_user: str
:param proxy_user: The username to connect with on the proxy
:type proxy_pass: str
:param proxy_pass: The password to use when connection over a proxy.
:type port: int
:param port: The port to use to connect
:type suppress_consec_slashes: bool
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type validate_certs: bool
:param validate_certs: Controls whether SSL certificates
will be validated or not. Defaults to True.
"""
self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
# Whether or not to validate server certificates.
# The default is now to validate certificates. This can be
# overridden in the boto config file are by passing an
# explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
'Boto', 'https_validate_certificates',
validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
"configuration, but Python dependencies required to "
"support this feature are not available. Certificate "
"validation is only supported when running under Python "
"2.6 or later.")
self.ca_certificates_file = config.get_value(
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
# define exceptions from httplib that we want to catch and retry
self.http_exceptions = (httplib.HTTPException, socket.error,
socket.gaierror, httplib.BadStatusLine)
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
# define values in socket exceptions we don't want to catch
self.socket_exception_values = (errno.EINTR,)
if https_connection_factory is not None:
self.https_connection_factory = https_connection_factory[0]
self.http_exceptions += https_connection_factory[1]
else:
self.https_connection_factory = None
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.host = host
self.path = path
# if the value passed in for debug
if not isinstance(debug, (int, long)):
debug = 0
self.debug = config.getint('Boto', 'debug', debug)
if port:
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
# Timeout used to tell httplib how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
# the socket's default global timeout being used. To specify a
# timeout, set http_socket_timeout in Boto config. Regardless,
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
if config.has_option('Boto', 'http_socket_timeout'):
timeout = config.getint('Boto', 'http_socket_timeout')
self.http_connection_kwargs['timeout'] = timeout
if isinstance(provider, Provider):
# Allow overriding Provider
self.provider = provider
else:
self._provider_type = provider
self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token)
# allow config file to override default host
if self.provider.host:
self.host = self.provider.host
self._pool = ConnectionPool()
self._connection = (self.server_name(), self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
if getattr(self, 'AuthServiceName', None) is not None:
self.auth_service_name = self.AuthServiceName
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
def _required_auth_capability(self):
return []
def _get_auth_service_name(self):
return getattr(self._auth_handler, 'service_name')
# For Sigv4, the auth_service_name/auth_region_name properties allow
# the service_name/region_name to be explicitly set instead of being
# derived from the endpoint url.
def _set_auth_service_name(self, value):
self._auth_handler.service_name = value
auth_service_name = property(_get_auth_service_name, _set_auth_service_name)
def _get_auth_region_name(self):
return getattr(self._auth_handler, 'region_name')
def _set_auth_region_name(self, value):
self._auth_handler.region_name = value
auth_region_name = property(_get_auth_region_name, _set_auth_region_name)
def connection(self):
return self.get_http_connection(*self._connection)
connection = property(connection)
def aws_access_key_id(self):
return self.provider.access_key
aws_access_key_id = property(aws_access_key_id)
gs_access_key_id = aws_access_key_id
access_key = aws_access_key_id
def aws_secret_access_key(self):
return self.provider.secret_key
aws_secret_access_key = property(aws_secret_access_key)
gs_secret_access_key = aws_secret_access_key
secret_key = aws_secret_access_key
def get_path(self, path='/'):
# The default behavior is to suppress consecutive slashes for reasons
# discussed at
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
return self.path + re.sub('^/*', "", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
path = path[:pos]
else:
params = None
if path[-1] == '/':
need_trailing = True
else:
need_trailing = False
path_elements = self.path.split('/')
path_elements.extend(path.split('/'))
path_elements = [p for p in path_elements if p]
path = '/' + '/'.join(path_elements)
if path[-1] != '/' and need_trailing:
path += '/'
if params:
path = path + params
return path
def server_name(self, port=None):
if not port:
port = self.port
if port == 80:
signature_host = self.host
else:
# This unfortunate little hack can be attributed to
# a difference in the 2.6 version of httplib. In old
# versions, it would append ":443" to the hostname sent
# in the Host header and so we needed to make sure we
# did the same when calculating the V2 signature. In 2.6
# (and higher!)
# it no longer does that. Hence, this kludge.
if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or
sys.version[:3] in ('2.6', '2.7')) and port == 443:
signature_host = self.host
else:
signature_host = '%s:%d' % (self.host, port)
return signature_host
def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy = proxy
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?' \
'(?:(?P<user>\w+):(?P<pass>.*)@)?' \
'(?P<host>[\w\-\.]+)' \
'(?::(?P<port>\d+))?'
)
match = pattern.match(os.environ['http_proxy'])
if match:
self.proxy = match.group('host')
self.proxy_port = match.group('port')
self.proxy_user = match.group('user')
self.proxy_pass = match.group('pass')
else:
if not self.proxy:
self.proxy = config.get_value('Boto', 'proxy', None)
if not self.proxy_port:
self.proxy_port = config.get_value('Boto', 'proxy_port', None)
if not self.proxy_user:
self.proxy_user = config.get_value('Boto', 'proxy_user', None)
if not self.proxy_pass:
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
if not self.proxy_port and self.proxy:
print "http_proxy environment variable does not specify " \
"a port, using default"
self.proxy_port = self.port
self.use_proxy = (self.proxy != None)
def get_http_connection(self, host, is_secure):
conn = self._pool.get_http_connection(host, is_secure)
if conn is not None:
return conn
else:
return self.new_http_connection(host, is_secure)
def new_http_connection(self, host, is_secure):
if self.use_proxy and not is_secure:
host = '%s:%d' % (self.proxy, int(self.proxy_port))
if host is None:
host = self.server_name()
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
host, self.http_connection_kwargs)
if self.use_proxy:
connection = self.proxy_ssl(host, is_secure and 443 or 80)
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPSConnection(host,
**self.http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
self.http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(host,
**self.http_connection_kwargs)
else:
connection = httplib.HTTPConnection(host,
**self.http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, is_secure, connection):
self._pool.put_http_connection(host, is_secure, connection)
def proxy_ssl(self, host=None, port=None):
if host and port:
host = '%s:%d' % (host, port)
else:
host = '%s:%d' % (self.host, self.port)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.proxy, int(self.proxy_port)))
except:
raise
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
sock.sendall("User-Agent: %s\r\n" % UserAgent)
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall("%s: %s\r\n" % (k, v))
# See discussion about this config option at
# https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
sock.sendall("\r\n")
else:
sock.sendall("\r\n")
resp = httplib.HTTPResponse(sock, strict=True, debuglevel=self.debug)
resp.begin()
if resp.status != 200:
# Fake a socket error, use a code that make it obvious it hasn't
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
(self.proxy, self.proxy_port,
resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
h = httplib.HTTPConnection(host)
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
boto.log.debug("wrapping ssl socket for proxied connection; "
"CA certificate file=%s",
self.ca_certificates_file)
key_file = self.http_connection_kwargs.get('key_file', None)
cert_file = self.http_connection_kwargs.get('cert_file', None)
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
certfile=cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certificates_file)
cert = sslSock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not https_connection.ValidateCertificateHostname(cert, hostname):
raise https_connection.InvalidCertificateException(
hostname, cert, 'hostname mismatch')
else:
# Fallback for old Python without ssl.wrap_socket
if hasattr(httplib, 'ssl'):
sslSock = httplib.ssl.SSLSocket(sock)
else:
sslSock = socket.ssl(sock, None, None)
sslSock = httplib.FakeSocket(sock, sslSock)
# This is a bit unclean
h.sock = sslSock
return h
def prefix_proxy_to_path(self, path, host=None):
path = self.protocol + '://' + (host or self.server_name()) + path
return path
def get_proxy_auth_header(self):
auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
response = None
body = None
e = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, self.is_secure)
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
next_sleep = random.random() * (2 ** i)
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
location = response.getheader('location')
# -- gross hack --
# httplib gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status == 500 or response.status == 503:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
elif response.status < 300 or response.status >= 400 or \
not location:
self.put_http_connection(request.host, self.is_secure,
connection)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse.urlparse(location)
if query:
request.path += '?' + query
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
scheme == 'https')
response = None
continue
except self.http_exceptions, e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise e
boto.log.debug('encountered %s exception, reconnecting' % \
e.__class__.__name__)
connection = self.new_http_connection(request.host,
self.is_secure)
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already h#appened.
if response:
raise BotoServerError(response.status, response.reason, body)
elif e:
raise e
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
if params == None:
params = {}
else:
params = params.copy()
if headers == None:
headers = {}
else:
headers = headers.copy()
host = host or self.host
if self.use_proxy:
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
if self.proxy_user and self.proxy_pass and not self.is_secure:
# If is_secure, we don't have to set the proxy authentication
# header here, we did that in the CONNECT to the proxy.
headers.update(self.get_proxy_auth_header())
return HTTPRequest(method, self.protocol, host, self.port,
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
params=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
self._connection = None # compat field
class AWSQueryConnection(AWSAuthConnection):
APIVersion = ''
ResponseError = BotoServerError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return []
def get_utf8_value(self, value):
return boto.utils.get_utf8_value(value)
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
self.server_name())
if action:
http_request.params['Action'] = action
if self.APIVersion:
http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def build_complex_list_params(self, params, items, label, names):
"""Serialize a list of structures.
For example::
items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')]
label = 'ParamName.member'
names = ('One', 'Two', 'Three')
self.build_complex_list_params(params, items, label, names)
would result in the params dict being updated with these params::
ParamName.member.1.One = foo
ParamName.member.1.Two = bar
ParamName.member.1.Three = baz
ParamName.member.2.One = foo2
ParamName.member.2.Two = bar2
ParamName.member.2.Three = baz2
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type items: list of tuples
:param items: The list to serialize.
:type label: string
:param label: The prefix to apply to the parameter.
:type names: tuple of strings
:param names: The names associated with each tuple element.
"""
for i, item in enumerate(items, 1):
current_prefix = '%s.%s' % (label, i)
for key, value in zip(names, item):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
# generics
def get_list(self, action, params, markers, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet(markers)
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_object(self, action, params, cls, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
xml.sax.parseString(body, h)
return obj
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_status(self, action, params, path='/', parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet()
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs.status
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from eemeter.features import (
compute_occupancy_feature,
compute_temperature_features,
compute_temperature_bin_features,
compute_time_features,
compute_usage_per_day_feature,
estimate_hour_of_week_occupancy,
get_missing_hours_of_week_warning,
fit_temperature_bins,
merge_features,
)
from eemeter.segmentation import segment_time_series
def test_compute_temperature_features_no_freq_index(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_meter_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
meter_data.index = meter_data.index.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_no_temp_data_tz(
il_electricity_cdd_hdd_billing_monthly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_data = temperature_data.tz_localize(None)
with pytest.raises(ValueError):
compute_temperature_features(meter_data.index, temperature_data)
def test_compute_temperature_features_hourly_temp_mean(il_electricity_cdd_hdd_hourly):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(meter_data.index, temperature_data)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_mean",
]
assert df.shape == (2952, 3)
assert round(df.temperature_mean.mean()) == 62.0
def test_compute_temperature_features_hourly_hourly_degree_days(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
assert df.shape == (2952, 6)
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_hourly, snapshot
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (2952, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_hourly_daily_degree_days_fail(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_daily_missing_explicit_freq(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
meter_data.index.freq = None
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="daily",
)
def test_compute_temperature_features_hourly_bad_degree_days(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_hourly_data_quality(
il_electricity_cdd_hdd_hourly
):
# pick a slice with both hdd and cdd
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]["2016-03-01":"2016-07-01"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"][
"2016-03-01":"2016-07-01"
]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (2952, 4)
assert list(sorted(df.columns)) == [
"n_hours_dropped",
"n_hours_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1.0
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_daily_temp_mean(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_daily_daily_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_daily, snapshot
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (810, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_daily_bad_degree_days(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_daily_data_quality(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (810, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 23.99
assert round(df.temperature_null.mean(), 2) == 0.00
def test_compute_temperature_features_billing_monthly_temp_mean(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (27, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_monthly_daily_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_daily_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_hourly_degree_days_use_mean_false(
il_electricity_cdd_hdd_billing_monthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
use_mean_daily_values=False,
)
assert df.shape == (27, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_monthly_bad_degree_day_method(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_monthly_data_quality(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (27, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 729.23
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_billing_bimonthly_temp_mean(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (14, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.mean()) == 55.0
def test_compute_temperature_features_billing_bimonthly_daily_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="daily",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_days_dropped",
"n_days_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_days_kept.mean(), 2),
round(df.n_days_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_hourly_degree_days(
il_electricity_cdd_hdd_billing_bimonthly, snapshot
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
temperature_mean=False,
degree_day_method="hourly",
)
assert df.shape == (14, 6)
assert list(sorted(df.columns)) == [
"cdd_65",
"cdd_66",
"hdd_60",
"hdd_61",
"n_hours_dropped",
"n_hours_kept",
]
snapshot.assert_match(
[
round(df.hdd_60.mean(), 2),
round(df.hdd_61.mean(), 2),
round(df.cdd_65.mean(), 2),
round(df.cdd_66.mean(), 2),
round(df.n_hours_kept.mean(), 2),
round(df.n_hours_dropped.mean(), 2),
],
"values",
)
def test_compute_temperature_features_billing_bimonthly_bad_degree_days(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
with pytest.raises(ValueError):
compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[60, 61],
cooling_balance_points=[65, 66],
degree_day_method="UNKNOWN",
)
def test_compute_temperature_features_billing_bimonthly_data_quality(
il_electricity_cdd_hdd_billing_bimonthly
):
meter_data = il_electricity_cdd_hdd_billing_bimonthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_bimonthly["temperature_data"]
df = compute_temperature_features(
meter_data.index, temperature_data, temperature_mean=False, data_quality=True
)
assert df.shape == (14, 4)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_not_null",
"temperature_null",
]
assert round(df.temperature_not_null.mean(), 2) == 1478.77
assert round(df.temperature_null.mean(), 2) == 0.0
def test_compute_temperature_features_shorter_temperature_data(
il_electricity_cdd_hdd_daily
):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
temperature_data = temperature_data[:-200]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (810, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43958.0
def test_compute_temperature_features_shorter_meter_data(il_electricity_cdd_hdd_daily):
meter_data = il_electricity_cdd_hdd_daily["meter_data"]
temperature_data = il_electricity_cdd_hdd_daily["temperature_data"]
# drop some data
meter_data = meter_data[:-10]
df = compute_temperature_features(meter_data.index, temperature_data)
assert df.shape == (800, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 43904.0
# ensure last row is NaN'ed
assert pd.isnull(df.iloc[-1].n_days_kept)
def test_compute_temperature_features_with_duplicated_index(
il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
# these are specifically formed to give a less readable error if
# duplicates are not caught
meter_data = meter_data.append(meter_data).sort_index()
temperature_data = temperature_data.iloc[8000:]
with pytest.raises(ValueError) as excinfo:
compute_temperature_features(meter_data.index, temperature_data)
assert str(excinfo.value) == "Duplicates found in input meter trace index."
def test_compute_temperature_features_empty_temperature_data():
index = pd.DatetimeIndex([], tz="UTC", name="dt", freq="H")
temperature_data = pd.Series({"value": []}, index=index).astype(float)
result_index = temperature_data.resample("D").sum().index
meter_data_hack = pd.DataFrame({"value": 0}, index=result_index)
df = compute_temperature_features(
meter_data_hack.index,
temperature_data,
heating_balance_points=[65],
cooling_balance_points=[65],
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (0, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 0
def test_compute_temperature_features_empty_meter_data():
index = pd.DatetimeIndex([], tz="UTC", name="dt", freq="H")
temperature_data = pd.Series({"value": 0}, index=index)
result_index = temperature_data.resample("D").sum().index
meter_data_hack = pd.DataFrame({"value": []}, index=result_index)
meter_data_hack.index.freq = None
df = compute_temperature_features(
meter_data_hack.index,
temperature_data,
heating_balance_points=[65],
cooling_balance_points=[65],
degree_day_method="daily",
use_mean_daily_values=False,
)
assert df.shape == (0, 3)
assert list(sorted(df.columns)) == [
"n_days_dropped",
"n_days_kept",
"temperature_mean",
]
assert round(df.temperature_mean.sum()) == 0
def test_merge_features():
index = pd.date_range("2017-01-01", periods=100, freq="H", tz="UTC")
features = merge_features(
[
pd.Series(1, index=index, name="a"),
pd.DataFrame({"b": 2}, index=index),
pd.DataFrame({"c": 3, "d": 4}, index=index),
]
)
assert list(features.columns) == ["a", "b", "c", "d"]
assert features.shape == (100, 4)
assert features.sum().sum() == 1000
assert features.a.sum() == 100
assert features.b.sum() == 200
assert features.c.sum() == 300
assert features.d.sum() == 400
assert features.index[0] == index[0]
assert features.index[-1] == index[-1]
def test_merge_features_empty_raises():
with pytest.raises(ValueError):
features = merge_features([])
@pytest.fixture
def meter_data_hourly():
index = pd.date_range("2017-01-01", periods=100, freq="H", tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_compute_usage_per_day_feature_hourly(meter_data_hourly):
usage_per_day = compute_usage_per_day_feature(meter_data_hourly)
assert usage_per_day.name == "usage_per_day"
assert usage_per_day["2017-01-01T00:00:00Z"] == 24
assert usage_per_day.sum() == 2376.0
def test_compute_usage_per_day_feature_hourly_series_name(meter_data_hourly):
usage_per_day = compute_usage_per_day_feature(
meter_data_hourly, series_name="meter_value"
)
assert usage_per_day.name == "meter_value"
@pytest.fixture
def meter_data_daily():
index = pd.date_range("2017-01-01", periods=100, freq="D", tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_compute_usage_per_day_feature_daily(meter_data_daily):
usage_per_day = compute_usage_per_day_feature(meter_data_daily)
assert usage_per_day["2017-01-01T00:00:00Z"] == 1
assert usage_per_day.sum() == 99.0
@pytest.fixture
def meter_data_billing():
index = pd.date_range("2017-01-01", periods=100, freq="MS", tz="UTC")
return pd.DataFrame({"value": 1}, index=index)
def test_compute_usage_per_day_feature_billing(meter_data_billing):
usage_per_day = compute_usage_per_day_feature(meter_data_billing)
assert usage_per_day["2017-01-01T00:00:00Z"] == 1.0 / 31
assert usage_per_day.sum().round(3) == 3.257
@pytest.fixture
def complete_hour_of_week_feature():
index = pd.date_range("2017-01-01", periods=168, freq="H", tz="UTC")
time_features = compute_time_features(index, hour_of_week=True)
hour_of_week_feature = time_features.hour_of_week
return hour_of_week_feature
def test_get_missing_hours_of_week_warning_ok(complete_hour_of_week_feature):
warning = get_missing_hours_of_week_warning(complete_hour_of_week_feature)
assert warning is None
@pytest.fixture
def partial_hour_of_week_feature():
index = pd.date_range("2017-01-01", periods=84, freq="H", tz="UTC")
time_features = compute_time_features(index, hour_of_week=True)
hour_of_week_feature = time_features.hour_of_week
return hour_of_week_feature
def test_get_missing_hours_of_week_warning_triggered(partial_hour_of_week_feature):
warning = get_missing_hours_of_week_warning(partial_hour_of_week_feature)
assert warning.qualified_name is not None
assert warning.description is not None
assert warning.data["missing_hours_of_week"] == list(range(60, 144))
def test_compute_time_features_bad_freq():
index = pd.date_range("2017-01-01", periods=168, freq="D", tz="UTC")
with pytest.raises(ValueError):
compute_time_features(index)
def test_compute_time_features_all():
index = pd.date_range("2017-01-01", periods=168, freq="H", tz="UTC")
features = compute_time_features(index)
assert list(features.columns) == ["day_of_week", "hour_of_day", "hour_of_week"]
assert features.shape == (168, 3)
assert features.sum().sum() == 16464.0
with pytest.raises(TypeError): # categoricals
features.day_of_week.sum()
with pytest.raises(TypeError):
features.hour_of_day.sum()
with pytest.raises(TypeError):
features.hour_of_week.sum()
assert features.day_of_week.astype("float").sum() == sum(range(7)) * 24
assert features.hour_of_day.astype("float").sum() == sum(range(24)) * 7
assert features.hour_of_week.astype("float").sum() == sum(range(168))
assert features.index[0] == index[0]
assert features.index[-1] == index[-1]
def test_compute_time_features_none():
index = pd.date_range("2017-01-01", periods=168, freq="H", tz="UTC")
with pytest.raises(ValueError):
compute_time_features(
index, hour_of_week=False, day_of_week=False, hour_of_day=False
)
@pytest.fixture
def occupancy_precursor(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"]
time_features = compute_time_features(meter_data.index)
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[50],
cooling_balance_points=[65],
degree_day_method="hourly",
)
return merge_features(
[meter_data.value.to_frame("meter_value"), temperature_features, time_features]
)
def test_estimate_hour_of_week_occupancy_no_segmentation(occupancy_precursor):
occupancy = estimate_hour_of_week_occupancy(occupancy_precursor)
assert list(occupancy.columns) == ["occupancy"]
assert occupancy.shape == (168, 1)
assert occupancy.sum().sum() == 0
@pytest.fixture
def one_month_segmentation(occupancy_precursor):
return segment_time_series(occupancy_precursor.index, segment_type="one_month")
def test_estimate_hour_of_week_occupancy_one_month_segmentation(
occupancy_precursor, one_month_segmentation
):
occupancy = estimate_hour_of_week_occupancy(
occupancy_precursor, segmentation=one_month_segmentation
)
assert list(occupancy.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert occupancy.shape == (168, 12)
assert occupancy.sum().sum() == 84.0
@pytest.fixture
def temperature_means():
index = pd.date_range("2017-01-01", periods=2000, freq="H", tz="UTC")
return pd.DataFrame({"temperature_mean": [10, 35, 55, 80, 100] * 400}, index=index)
def test_fit_temperature_bins_no_segmentation(temperature_means):
bins = fit_temperature_bins(
temperature_means, segmentation=None, occupancy_lookup=None
)
assert list(bins.columns) == ["keep_bin_endpoint"]
assert bins.shape == (6, 1)
assert bins.sum().sum() == 4
@pytest.fixture
def occupancy_lookup_no_segmentation(occupancy_precursor):
occupancy = estimate_hour_of_week_occupancy(occupancy_precursor)
return occupancy
def test_fit_temperature_bins_no_segmentation_with_occupancy(
temperature_means, occupancy_lookup_no_segmentation
):
occupied_bins, unoccupied_bins = fit_temperature_bins(
temperature_means,
segmentation=None,
occupancy_lookup=occupancy_lookup_no_segmentation,
)
assert list(occupied_bins.columns) == ["keep_bin_endpoint"]
assert occupied_bins.shape == (6, 1)
assert occupied_bins.sum().sum() == 0
assert list(unoccupied_bins.columns) == ["keep_bin_endpoint"]
assert unoccupied_bins.shape == (6, 1)
assert unoccupied_bins.sum().sum() == 4
def test_fit_temperature_bins_one_month_segmentation(
temperature_means, one_month_segmentation
):
bins = fit_temperature_bins(temperature_means, segmentation=one_month_segmentation)
assert list(bins.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert bins.shape == (6, 12)
assert bins.sum().sum() == 12
@pytest.fixture
def occupancy_lookup_one_month_segmentation(
occupancy_precursor, one_month_segmentation
):
occupancy_lookup = estimate_hour_of_week_occupancy(
occupancy_precursor, segmentation=one_month_segmentation
)
return occupancy_lookup
def test_fit_temperature_bins_with_occupancy_lookup(
temperature_means, one_month_segmentation, occupancy_lookup_one_month_segmentation
):
occupied_bins, unoccupied_bins = fit_temperature_bins(
temperature_means,
segmentation=one_month_segmentation,
occupancy_lookup=occupancy_lookup_one_month_segmentation,
)
assert list(occupied_bins.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert occupied_bins.shape == (6, 12)
assert occupied_bins.sum().sum() == 0
assert list(unoccupied_bins.columns) == [
"jan",
"feb",
"mar",
"apr",
"may",
"jun",
"jul",
"aug",
"sep",
"oct",
"nov",
"dec",
]
assert unoccupied_bins.shape == (6, 12)
assert unoccupied_bins.sum().sum() == 12
def test_fit_temperature_bins_empty(temperature_means):
bins = fit_temperature_bins(temperature_means.iloc[:0])
assert list(bins.columns) == ["keep_bin_endpoint"]
assert bins.shape == (6, 1)
assert bins.sum().sum() == 0
def test_compute_temperature_bin_features(temperature_means):
temps = temperature_means.temperature_mean
bin_features = compute_temperature_bin_features(temps, [25, 75])
assert list(bin_features.columns) == ["bin_0", "bin_1", "bin_2"]
assert bin_features.shape == (2000, 3)
assert bin_features.sum().sum() == 112000.0
@pytest.fixture
def even_occupancy():
return pd.Series([i % 2 == 0 for i in range(168)], index=pd.Categorical(range(168)))
def test_compute_occupancy_feature(even_occupancy):
index = pd.date_range("2017-01-01", periods=1000, freq="H", tz="UTC")
time_features = compute_time_features(index, hour_of_week=True)
hour_of_week = time_features.hour_of_week
occupancy = compute_occupancy_feature(hour_of_week, even_occupancy)
assert occupancy.name == "occupancy"
assert occupancy.shape == (1000,)
assert occupancy.sum().sum() == 500
def test_compute_occupancy_feature_with_nans(even_occupancy):
"""If there are less than 168 periods, the NaN at the end causes problems"""
index = pd.date_range("2017-01-01", periods=100, freq="H", tz="UTC")
time_features = compute_time_features(index, hour_of_week=True)
hour_of_week = time_features.hour_of_week
hour_of_week[-1] = np.nan
# comment out line below to see the error from not dropping na when
# calculationg _add_weights when there are less than 168 periods.
# TODO (ssuffian): Refactor so get_missing_hours_warnings propogates.
# right now, it will error if the dropna below isn't used.
hour_of_week.dropna(inplace=True)
occupancy = compute_occupancy_feature(hour_of_week, even_occupancy)
@pytest.fixture
def occupancy_precursor_only_nan(il_electricity_cdd_hdd_hourly):
meter_data = il_electricity_cdd_hdd_hourly["meter_data"]
meter_data = meter_data[datetime(2017, 1, 4) : datetime(2017, 6, 1)]
meter_data.iloc[-1] = np.nan
# Simulates a segment where there is only a single nan value
temperature_data = il_electricity_cdd_hdd_hourly["temperature_data"]
time_features = compute_time_features(meter_data.index)
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[50],
cooling_balance_points=[65],
degree_day_method="hourly",
)
return merge_features(
[meter_data.value.to_frame("meter_value"), temperature_features, time_features]
)
@pytest.fixture
def segmentation_only_nan(occupancy_precursor_only_nan):
return segment_time_series(
occupancy_precursor_only_nan.index, segment_type="three_month_weighted"
)
def test_estimate_hour_of_week_occupancy_segmentation_only_nan(
occupancy_precursor_only_nan, segmentation_only_nan
):
occupancy = estimate_hour_of_week_occupancy(
occupancy_precursor_only_nan, segmentation=segmentation_only_nan
)
def test_compute_occupancy_feature_hour_of_week_has_nan(even_occupancy):
index = pd.date_range("2017-01-01", periods=72, freq="H", tz="UTC")
time_features = compute_time_features(index, hour_of_week=True)
hour_of_week = time_features.hour_of_week
hour_of_week.iloc[-1] = np.nan
occupancy = compute_occupancy_feature(hour_of_week, even_occupancy)
assert occupancy.name == "occupancy"
assert occupancy.shape == (72,)
assert occupancy.sum() == 36
| |
__author__ = 'Ian S. Evans'
__version__ = '0.0.4'
from json import dumps, JSONEncoder, loads
from collections import UserList
MIMETYPE = "application/vnd.collection+json"
class Comparable(object):
"""
An object that needs to be comparable.
Stolen shamelessly from Ricardo Kirkner's bindings
See https://github.com/ricardokirkner/collection-json.python
"""
def __init__(self, *args, **kwargs):
super(Comparable, self).__init__()
def __eq__(self, other):
if type(self) == type(other) and self.__dict__ == other.__dict__:
return True
return False
def __ne__(self, other):
if type(self) != type(other) or self.__dict__ != other.__dict__:
return True
return False
class CollectionField(object):
def __init__(self, cls, truthy=False, nullable=True):
# have to double on type call to catch meta classes
if not isinstance(cls, type):
raise TypeError("Parameter 'cls' must be a class. type(type(cls)) -> {cls}".format(cls=str(type(cls))))
self.cls = cls
self.truthy = truthy
if not truthy:
self.nullable = nullable
else:
self.nullable = False
def __get__(self, instance, owner):
if instance is None:
return self
else:
return instance.__dict__.get(self.get_own_name(owner))
def __set__(self, instance, value):
if (not value) and self.truthy:
raise ValueError("Value must be truthy (cannot evaluate to False.)")
if value is None:
if not self.nullable:
raise ValueError("Value cannot be None.")
elif not isinstance(value, self.cls):
raise TypeError("Value must be an instance of {cls}.".format(cls=self.cls.__name__))
instance.__dict__[self.get_own_name(type(instance))] = value
def __delete__(self, instance):
if not self.nullable:
raise ValueError("{name} cannot be deleted.".format(name=self.get_own_name(type(instance))))
del instance.__dict__[self.get_own_name(type(instance))]
def get_own_name(self, owner):
for attr in dir(owner):
if getattr(owner, attr) is self:
return attr
class CollectionArrayField(CollectionField):
def __init__(self, cls, contains=object, truthy=False, nullable=True):
super(CollectionArrayField, self).__init__(cls, truthy=truthy, nullable=nullable)
if not isinstance(contains, type):
raise TypeError("Parameter 'contains' must be a class.")
self.contains = contains
def __set__(self, instance, value):
if (not value) and self.truthy:
raise ValueError("Value must be truthy (cannot evaluate to False.)")
if value is None:
if not self.nullable:
raise ValueError("Value cannot be None.")
elif not isinstance(value, self.cls):
raise TypeError("Value must be an instance of {cls}.".format(cls=self.cls.__name__))
if not all([isinstance(i, self.contains) for i in value]):
raise TypeError("Value must contain instances of {cls}".format(cls=self.contains.__name__))
instance.__dict__[self.get_own_name(type(instance))] = value
class RequiresProperties(object):
"""
Abstract class for classes that require certain properties to exist and be of certain types.
"""
# TODO: delete me once descriptors prove bug-free
__should__ = {}
def __setattr__(self, key, value):
if key in self.__should__:
if not isinstance(value, self.__should__[key]["type"]):
raise TypeError(
"The value of {k} must be a {type}.".format(
cls=self.__class__.__name__, k=key, type=self.__should__[key]["type"].__name__
)
)
if self.__should__[key]["truthy"]:
if not value:
raise TypeError(
"The value of {k} cannot evaluate to False.".format(cls=self.__class__.__name__, k=key)
)
super(RequiresProperties, self).__setattr__(key, value)
class Serializable(object):
"""
An object that needs to be JSON serializable.
"""
class Encoder(JSONEncoder):
def default(self, o):
if isinstance(o, Serializable):
return o.get_serializable()
return JSONEncoder.default(self, o)
def __init__(self, *args, **kwargs):
super(Serializable, self).__init__()
def __repr__(self):
value = " ".join(["{k}={v}".format(k=k, v=repr(v)) for k, v in self.__dict__.items()])
return "<{classname} {value}>".format(classname=self.__class__.__name__, value=value)
def __str__(self):
return dumps(self, cls=self.Encoder)
def get_serializable(self):
serializable = {}
for k, v in self.__dict__.items():
if v:
if isinstance(v, Serializable):
serializable[k] = v.get_serializable()
else:
serializable[k] = v
return serializable
class Array(Serializable, Comparable, UserList):
"""
A serializable, comparable list-like object that contains objects of a certain type.
See: http://amundsen.com/media-types/collection/format/#arrays
"""
def __init__(self, iterable=(), cls=object, *args, **kwargs):
super(Array, self).__init__(self, iterable, *args, **kwargs)
self.required_class = cls
for item in iterable:
if isinstance(item, cls):
self.data.append(item)
else:
self.data.append(cls(**item))
def __add__(self, other):
if type(self) is type(other):
if self.required_class == other.required_class:
merged = self.data + other.data
return Array(merged, self.required_class)
else:
raise TypeError(
"unsupported operand type(s) for +: 'Array[{self_type}]' and 'Array[{other_type}]'".format(
self_type=self.required_class.__name__, other_type=other.required_class.__name__
)
)
else:
raise TypeError(
"unsupported operand type(s) for +: 'Array' and '{other_type}'".format(other_type=type(other).__name__)
)
def __sub__(self, other):
if type(self) is type(other):
if self.required_class == other.required_class:
modified = []
for self_item in self.data:
if self_item not in other.data:
modified.append(self_item)
return Array(modified, self.required_class)
else:
raise TypeError(
"unsupported operand type(s) for -: 'Array[{self_type}] and Array[{other_type}]'".format(
self_type=self.required_class.__name__, other_type=other.required_class.__name__
)
)
else:
raise TypeError(
"unsupported operand type(s) for -: 'Array' and '{other_type}'".format(other_type=type(other).__name__)
)
def __eq__(self, other):
if type(self) == type(other) and \
self.required_class == other.required_class and \
self.data == other.data:
return True
return False
def __ne__(self, other):
if type(self) != type(other) or \
self.required_class != other.required_class or \
self.data != other.data:
return True
return False
def __repr__(self):
return UserList.__repr__(self)
def append(self, item):
if isinstance(item, self.required_class):
super(Array, self).append(item)
else:
raise TypeError("item must be an instance of {type}".format(type=self.required_class.__name__))
def get(self, **kwargs):
"""
Find the first contained object that matches certain criteria
:param kwargs: Keyword arguments for property name:value pairs to match
:returns: object The first contained object found to match all the criteria, None if no match.
"""
for obj in self.data:
matches = all([v == obj.__dict__.get(k) for k, v in kwargs.items()])
if matches:
return obj
return None
def get_serializable(self):
data = []
for item in self.data:
if isinstance(item, Serializable):
data.append(item.get_serializable())
else:
data.append(item)
return data
def search(self, operator, *args, **kwargs):
"""
Search for all contained objects that match certain criteria
:param operator: Which logical operation to apply to search criteria (e.g. "and", "or")
:param args: Arguments for property names to match (regardless of value)
:param kwargs: Keyword arguments for property name:value pairs to match
:returns: tuple All of the objects that match the criteria
"""
operations = {
"and": all,
"or": any
}
if str(operator).lower() in operations:
op = operations[operator]
results = []
for obj in self.data:
has_props = op([k in obj.__dict__ for k in args])
has_items = op([v == obj.__dict__.get(k) for k, v in kwargs.items()])
if has_props or has_items:
results.append(obj)
return tuple(results)
class Data(Serializable, Comparable):
"""
A dict-like object that contains some objects representing information about another object.
Usually contained in an Array.
See: http://amundsen.com/media-types/collection/format/#arrays-data
"""
name = CollectionField(str, truthy=True)
prompt = CollectionField(str)
value = CollectionField(object)
'''
__should__ = {"name": {"type": str, "truthy": True}}
'''
def __init__(self, name=None, prompt=None, value=None, **kwargs):
super(Data, self).__init__()
self.name = name
self.prompt = prompt
self.value = value
for k, v in kwargs.items():
self.__setattr__(k, v)
class Error(Serializable, Comparable):
"""
A dict-like object containing error information.
See: http://amundsen.com/media-types/collection/format/#objects-error
"""
code = CollectionField(str)
message = CollectionField(str)
title = CollectionField(str)
def __init__(self, code=None, message=None, title=None, **kwargs):
super(Error, self).__init__()
self.code = code
self.message = message
self.title = title
for k, v in kwargs.items():
self.__setattr__(k, v)
class Link(Serializable, Comparable):
"""
A dict-like object containing information representing something as related to something else.
Usually contained in an Array.
See: http://amundsen.com/media-types/collection/format/#arrays-links
"""
href = CollectionField(str, truthy=True)
rel = CollectionField(str, truthy=True)
name = CollectionField(str)
prompt = CollectionField(str)
render = CollectionField(str)
'''
__should__ = {
"href": {"type": str, "truthy": True},
"rel": {"type": str, "truthy": True}
}
'''
def __init__(self, href=None, rel=None, name=None, prompt=None, render=None, **kwargs):
super(Link, self).__init__()
self.href = href
self.rel = rel
self.name = name
self.prompt = prompt
self.render = render
for k, v in kwargs.items():
self.__setattr__(k, v)
class Query(Serializable, Comparable):
"""
A dict-like object containing a form template related to the type of objects in the collection.
Usually contained in an Array.
See: http://amundsen.com/media-types/collection/format/#arrays-queries
"""
href = CollectionField(str, truthy=True)
rel = CollectionField(str, truthy=True)
name = CollectionField(str)
prompt = CollectionField(str)
'''
__should__ = {
"href": {"type": str, "truthy": True},
"rel": {"type": str, "truthy": True}
}
'''
def __init__(self, href=None, rel=None, data=None, name=None, prompt=None, **kwargs):
super(Query, self).__init__()
self.href = href
self.rel = rel
self.name = name
self.prompt = prompt
if not isinstance(data, Array):
data = Array(data, cls=Data)
self.data = data
for k, v in kwargs.items():
self.__setattr__(k, v)
class Item(Serializable, Comparable):
"""
A dict-like object containing information representing something.
http://amundsen.com/media-types/collection/format/#arrays-items
"""
href = CollectionField(str, truthy=True)
data = CollectionArrayField(Array, contains=Data)
links = CollectionArrayField(Array, contains=Link)
'''
__should__ = {"href": {"type": str, "truthy": True}}
'''
def __init__(self, href=None, data=(), links=(), **kwargs):
super(Item, self).__init__()
self.href = href
if not isinstance(data, Array):
data = Array(data, cls=Data)
self.data = data
if not isinstance(links, Array):
links = Array(links, cls=Link)
self.links = links
for k, v in kwargs.items():
self.__setattr__(k, v)
class Template(Serializable, Comparable):
"""
A dict-like object containing a template for objects in the containing collection.
See: http://amundsen.com/media-types/collection/format/#objects-template
"""
data = CollectionArrayField(Array, contains=Data)
'''
__should__ = {"data": {"type": (list, UserList), "truthy": False}}
'''
def __init__(self, data=(), **kwargs):
super(Template, self).__init__()
if not isinstance(data, Array):
data = Array(data, cls=Data)
self.data = data
for k, v in kwargs.items():
self.__setattr__(k, v)
class Collection(Serializable, Comparable):
"""
A dict-like object that contains a collection of information.
See: http://amundsen.com/media-types/collection/format/#objects-collection
"""
__mimetype = MIMETYPE
href = CollectionField(str, truthy=True)
version = CollectionField(str, truthy=True)
error = CollectionField(Error)
template = CollectionField(Template)
items = CollectionArrayField(Array, contains=Item)
links = CollectionArrayField(Array, contains=Link)
queries = CollectionArrayField(Array, contains=Query)
'''
__should__ = {
"href": {"type": str, "truthy": True},
"version": {"type": str, "truthy": True}
}
'''
@property
def mimetype(self):
return self.__mimetype
def __init__(self, href=None, version="1.0", error=None, items=(),
links=(), queries=(), template=None, **kwargs):
super(Collection, self).__init__()
# Process like normal, apply restrictions to properties
# from the standard, allow non-standard properties
self.href = href
self.version = version
if error:
if not isinstance(error, Error):
error = Error(**error) # let the class raise exceptions if something's amiss
self.error = error
if template:
if not isinstance(template, Template):
template = Template(**template)
self.template = template
if items:
if not isinstance(items, Array):
items = Array(items, cls=Item)
self.items = items
if links:
if not isinstance(links, Array):
links = Array(links, cls=Link)
self.links = links
if queries:
if not isinstance(queries, Array):
queries = Array(queries, cls=Query)
self.queries = queries
for k, v in kwargs.items():
# let the user set whatever non-standard data
# no warranty, express or implied that non-standard
# data will behave correctly or as expected
self.__setattr__(k, v)
def __setattr__(self, key, value):
# Let folks supply dicts or lists when setting collection attributes
if key == "error":
if not isinstance(value, Error):
value = Error(**value)
elif key == "template":
if not isinstance(value, Template):
value = Template(**value)
elif key == "items":
if not isinstance(value, Array):
value = Array(value, cls=Item)
elif key == "links":
if not isinstance(value, Array):
value = Array(value, cls=Link)
elif key == "queries":
if not isinstance(value, Array):
value = Array(value, cls=Query)
super(Collection, self).__setattr__(key, value)
def get_serializable(self):
return {"collection": super(Collection, self).get_serializable()}
| |
#!/usr/bin/env python
import unittest
from src import draw_figure
from src.draw_figure import Pose
from functools import partial
from copy import copy
import math
class Speed():
def __init__(self, linear, angular):
self.linear = linear
self.angular = angular
def __str__(self):
return "Speed (linear: %.2f angular: %.2f)" % (self.linear, self.angular)
def __repr__(self):
return str(self)
class MockDrawer():
def __init__(self, start_x, start_y, start_theta, slowdown):
self.pose = Pose(start_x, start_y, start_theta)
self.speed = Speed(0, 0)
self.drawing = False
self.slowdown = float(slowdown)
self.drawn_points = []
def step(self):
if self.drawing:
self.drawn_points.append(copy(self.pose)) # Store a cope of the pose, not a reference
# First spin, then move forward
# All speeds are divided by a slowdown factor (representing a high refresh rate)
self.pose.theta += self.speed.angular / self.slowdown
self.pose.x += math.cos(self.pose.theta) * self.speed.linear / self.slowdown
self.pose.y += math.sin(self.pose.theta) * self.speed.linear / self.slowdown
def draw(self, on):
self.drawing = on
def __str__(self):
return "Drawer (%s, %s, drawing: %s)" % (self.pose, self.speed, self.drawing)
def __repr__(self):
return str(self)
def point_between_points(p, p1, p2, cross_delta, dist_delta):
# To determine if p is in the line described by p1 and p2, we can simply
# check if the cross product between p1->p and p1->p2 is zero, that is, if
# the angle between the vectors is zero (assuming p1 != p2)
vec_a_x = p.x - p1.x
vec_a_y = p.y - p1.y
vec_b_x = p2.x - p1.x
vec_b_y = p2.y - p1.y
if abs(vec_a_x * vec_b_y - vec_a_y * vec_b_x) > cross_delta:
return False
# We now know p is in the line, but we still need to check if it is
# between p1 and p2, by checking the coordinates
if (abs(vec_a_x) >= abs(vec_a_y)): # If the slope of the line is mostly horizontal
if vec_a_x > 0: # Check if the point's x-coordinates lie between p1 and p2, or very close to them
return (p1.x <= p.x <= p2.x) or any([abs(p.x - other_p.x) < dist_delta for other_p in [p1, p2]])
else:
return (p2.x <= p.x <= p1.x) or any([abs(p.x - other_p.x) < dist_delta for other_p in [p1, p2]])
else:
if vec_a_y > 0: # Check if the point's y-coordinates lie between p1 and p2, or very close to them
return (p1.y <= p.y <= p2.y) or any([abs(p.y - other_p.y) < dist_delta for other_p in [p1, p2]])
else:
return (p2.y <= p.y <= p1.y) or any([abs(p.y - other_p.y) < dist_delta for other_p in [p1, p2]])
def get_points_in_line(p1, p2, total_points):
points = []
theta = draw_figure._angle_between_points(p1, p2)
distance = math.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)
for i in range(total_points):
x = (p1.x + math.cos(theta) * i * distance / (total_points - 1))
y = (p1.y + math.sin(theta) * i * distance / (total_points - 1))
points.append(Pose(x, y))
return points
def get_figure_key_points(f, inter_vertexes_points):
key_points = []
for n in range(len(f)):
(p1, p2) = (f[n], f[(n + 1) % len(f)])
key_points += get_points_in_line(p1, p2, inter_vertexes_points)
return key_points
def points_match_figure(figure, points, inter_vertexes_points, cross_delta, dist_delta):
# For the points to correctly represent a figure, two conditions must be true:
# a) All key points of the figure must have at least one corresponding point close to them
for kp in get_figure_key_points(figure, inter_vertexes_points):
if not any([draw_figure._are_points_equal(p, kp, dist_delta) for p in points]):
return False
# b) All points must lie between two vertexes of the figure
for p in points:
if not any([point_between_points(p, figure[n], figure[(n + 1) % len(figure)], cross_delta, dist_delta) for n in range(len(figure))]):
return False
return True
class TestMockDrawer(unittest.TestCase):
slowdown = 100
def setUp(self):
self.d = MockDrawer(0, 0, 0, self.slowdown)
# Simple cases
def test_no_movement(self):
self._advance_drawer()
self._assert_drawer_pose_equal_to(Pose(0, 0, 0))
def test_move_forward(self):
self.d.speed.linear = 1
self._advance_drawer()
self._assert_drawer_pose_equal_to(Pose(1, 0, 0))
def test_spin(self):
self.d.speed.angular = 1
self._advance_drawer()
self._assert_drawer_pose_equal_to(Pose(0, 0, 1))
# Complex cases
def test_rotate(self):
# Start at (1, 0) with an angle of pi/2
self.d.pose.x = 1
self.d.pose.theta = math.pi/2
# These speeds will cause the object to describe a movement of a quarter of a circle
self.d.speed.linear = math.pi/2
self.d.speed.angular = math.pi/2
self._advance_drawer()
self._assert_drawer_pose_equal_to(Pose(0, 1, math.pi), delta=0.01)
def test_advance_and_return(self):
self.d.speed.linear = 1
self._advance_drawer()
self.d.speed.linear = -1
self._advance_drawer()
self._assert_drawer_pose_equal_to(Pose(0, 0 ,0))
# Drawing
def test_move_forward_no_draw(self):
self.d.speed.linear = 1
self._advance_drawer()
self.assertEqual(len(self.d.drawn_points), 0)
def test_simple_draw(self):
# Advance 1 along the x-axis
self.d.pose.theta = 0
self.d.speed.linear = self.slowdown
self.d.draw(True)
# Two steps: the first one will draw on the starting position, and the second one
# after having moved 1 unit
self.d.step()
self.d.step()
self.assertAlmostEqual(self.d.drawn_points[0].x, 0)
self.assertAlmostEqual(self.d.drawn_points[0].y, 0)
self.assertAlmostEqual(self.d.drawn_points[1].x, 1)
self.assertAlmostEqual(self.d.drawn_points[1].y, 0)
def test_move_forward_draw(self):
self.d.speed.linear = 1
self.d.draw(True)
self._advance_drawer()
self.assertEqual(len(self.d.drawn_points), self.slowdown)
for p in self.d.drawn_points:
self.assertTrue(0.0 <= p.x <= 1.01)
self.assertEqual(p.y, 0)
self.assertEqual(p.theta, 0)
# Helpers
def _advance_drawer(self):
for n in range(self.slowdown):
self.d.step()
def _assert_drawer_pose_equal_to(self, p, delta=None):
self.assertAlmostEqual(self.d.pose.x, p.x, delta=delta)
self.assertAlmostEqual(self.d.pose.y, p.y, delta=delta)
self.assertAlmostEqual(self.d.pose.theta, p.theta, delta=delta)
class TestPointBetweenPoints(unittest.TestCase):
cross_delta = 0.01
dist_delta = 0.01
# Simple cases
def test_point_at_start(self):
self.assertTrue(self._point_between_points_deltas(Pose(0, 0), Pose(0, 0), Pose(0, 1)))
def test_point_at_end(self):
self.assertTrue(self._point_between_points_deltas(Pose(0, 1), Pose(0, 0), Pose(0, 1)))
def test_point_in_middle_x(self):
self.assertTrue(self._point_between_points_deltas(Pose(0, 0.5), Pose(0, 0), Pose(0, 1)))
def test_point_in_middle_x_reverse(self):
self.assertTrue(self._point_between_points_deltas(Pose(0, 0.5), Pose(0, 1), Pose(0, 0)))
def test_point_in_middle_y(self):
self.assertTrue(self._point_between_points_deltas(Pose(0.5, 0), Pose(0, 0), Pose(1, 0)))
def test_point_in_middle_y_reverse(self):
self.assertTrue(self._point_between_points_deltas(Pose(0.5, 0), Pose(1, 0), Pose(0, 0)))
def test_point_outside(self):
self.assertFalse(self._point_between_points_deltas(Pose(2, 0), Pose(0, 0), Pose(0, 1)))
# Complex cases
def test_point_in_line_not_between_smaller_x(self):
self.assertFalse(self._point_between_points_deltas(Pose(0, -1), Pose(0, 0), Pose(0, 1)))
def test_point_in_line_not_between_larger_x(self):
self.assertFalse(self._point_between_points_deltas(Pose(0, 2), Pose(0, 0), Pose(0, 1)))
def test_point_in_line_not_between_smaller_y(self):
self.assertFalse(self._point_between_points_deltas(Pose(-1, 0), Pose(0, 0), Pose(1, 0)))
def test_point_in_line_not_between_larger_y(self):
self.assertFalse(self._point_between_points_deltas(Pose(2, 0), Pose(0, 0), Pose(1, 0)))
# Helpers
def _point_between_points_deltas(self, p, p1, p2):
return point_between_points(p, p1, p2, self.cross_delta, self.dist_delta)
class TestGetPointsInLine(unittest.TestCase):
total_points = 10
cross_delta = 0.01
dist_delta = 0.01
# Simple cases
def test_amount_of_points(self):
self.assertEqual(len(get_points_in_line(Pose(0, 0), Pose(1, 0), self.total_points)), self.total_points)
def test_points_in_horizontal_line(self):
self._test_points_in_line(Pose(0, 0), Pose(1, 0))
def test_points_in_vertical_line(self):
self._test_points_in_line(Pose(0, 0) ,Pose(0, 1))
def test_points_in_diagonal_line(self):
self._test_points_in_line(Pose(0, 0), Pose(1, 1))
# Extra behavior
def test_line_vertexes_in_points(self):
p1 = Pose(0, 0)
p2 = Pose(1, 0)
points = get_points_in_line(p1, p2, self.total_points)
self.assertTrue(any([draw_figure._are_points_equal(p, p1, self.dist_delta) for p in points]))
self.assertTrue(any([draw_figure._are_points_equal(p, p2, self.dist_delta) for p in points]))
# Helpers
def _test_points_in_line(self, p1, p2):
for p in get_points_in_line(p1, p2, self.total_points):
self.assertTrue(point_between_points(p, p1, p2, self.cross_delta, self.dist_delta))
class TestGetFigureKeyPoints(unittest.TestCase):
inter_vertexes_points = 10
cross_delta = 0.01
dist_delta = 0.01
# Simple cases
def test_amount_of_key_points(self):
f = [Pose(0, 0), Pose(1, 0), Pose(0.5, 1)]
self.assertEqual(len(get_figure_key_points(f, self.inter_vertexes_points)), len(f) * self.inter_vertexes_points)
def test_line_key_points(self):
self._test_key_points_in_at_least_one_outline_line([Pose(0, 0), Pose(1, 0)])
def test_triangle_key_points(self):
self._test_key_points_in_at_least_one_outline_line([Pose(0, 0), Pose(1, 0), Pose(0.5, 1)])
def test_square_key_points(self):
self._test_key_points_in_at_least_one_outline_line([Pose(0, 0), Pose(1, 0), Pose(1, 1), Pose(0, 1)])
# Extra behavior
def test_figure_vertexes_in_key_points(self):
f = [Pose(0, 0), Pose(1, 0), Pose(0.5, 1)]
points = get_figure_key_points(f, self.inter_vertexes_points)
for vertex in f:
self.assertTrue(any([draw_figure._are_points_equal(p, vertex, self.dist_delta) for p in points]))
# Helpers
def _test_key_points_in_at_least_one_outline_line(self, f):
for p in get_figure_key_points(f, self.inter_vertexes_points):
self.assertTrue(any([point_between_points(p, f[n], f[(n + 1) % len(f)], self.cross_delta, self.dist_delta) for n in range(len(f))]))
class TestPointsMatchFigure(unittest.TestCase):
inter_vertexes_points = 10
cross_delta = 0.01
dist_delta = 0.01
# Simple cases
def test_line_vertexes_no_match(self):
f = [Pose(0, 0), Pose(0, 1)]
points = f
self.assertFalse(self._points_match_figure_deltas(f, points))
def test_points_in_line(self):
f = [Pose(0, 0), Pose(0, 1)]
points = [Pose(0, y / float(self.inter_vertexes_points * 10)) for y in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
self.assertTrue(self._points_match_figure_deltas(f, points))
def test_points_in_line_and_outlier(self):
f = [Pose(0, 0), Pose(0, 1)]
points = [Pose(0, y / float(self.inter_vertexes_points * 10)) for y in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
# Matching points
self.assertTrue(self._points_match_figure_deltas(f, points))
points += [Pose(1, 1)]
# Mismatch after adding outlier
self.assertFalse(self._points_match_figure_deltas(f, points))
# Complex cases
def test_progressive_points_in_square(self):
f = [Pose(0, 0), Pose(0, 1), Pose(1, 1), Pose(1, 0)]
points = []
# No points - figure mismatch
self.assertFalse(self._points_match_figure_deltas(f, points))
# First segment - figure mismatch
points += [Pose(0, y / float(self.inter_vertexes_points * 10)) for y in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
self.assertFalse(self._points_match_figure_deltas(f, points))
# Second segment - figure mismatch
points += [Pose(x / float(self.inter_vertexes_points * 10), 1) for x in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
self.assertFalse(self._points_match_figure_deltas(f, points))
# Third segment - figure mismatch
points += [Pose(1, 1 - y / float(self.inter_vertexes_points * 10)) for y in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
self.assertFalse(self._points_match_figure_deltas(f, points))
# Fourth (last) segment - match
points += [Pose(1 - x / float(self.inter_vertexes_points * 10), 0) for x in range(self.inter_vertexes_points * 10 + 1)] # +1 to also generate a point at the end
self.assertTrue(self._points_match_figure_deltas(f, points))
# Helpers
def _points_match_figure_deltas(self, f, points):
return points_match_figure(f, points, self.inter_vertexes_points, self.cross_delta, self.dist_delta)
class TestDrawFigure(unittest.TestCase):
inter_vertexes_points = 10
cross_delta = 0.1
dist_delta = 0.15
def setUp(self):
self.d = MockDrawer(0, 0, 0, 10)
self.deps = {
"log" : self._log,
"pause" : self._pause,
"abort" : self._abort,
"step" : partial(self._step, drawer=self.d),
"curr_pose" : partial(self._curr_pose, drawer=self.d),
"move" : partial(self._move, drawer=self.d),
"pen" : partial(self._pen, drawer=self.d)
}
def test_draw_incomplete_triangle(self):
f = [Pose(0, 0), Pose(1, 0), Pose(0.5, 1)]
# draw only goes from point to point, and doesn't go from the last point
# to the first one: if this is not done manually, the drawn figure will
# not match
draw_figure.draw(f, self.deps)
self.assertFalse(self._points_match_figure_deltas(f, self.d.drawn_points))
def test_draw_triangle(self):
f = [Pose(0, 0), Pose(1, 0), Pose(0.5, 1)]
draw_figure.draw(self._get_complete_figure(f), self.deps)
self.assertTrue(self._points_match_figure_deltas(f, self.d.drawn_points))
def test_draw_square(self):
f = [Pose(0, 0), Pose(1, 0), Pose(1, 1), Pose(0, 1)]
draw_figure.draw(self._get_complete_figure(f), self.deps)
self.assertTrue(self._points_match_figure_deltas(f, self.d.drawn_points))
def test_draw_triangle_not_square(self):
f = [Pose(0, 0), Pose(1, 0), Pose(0.5, 1)]
draw_figure.draw(self._get_complete_figure(f), self.deps)
self.assertFalse(self._points_match_figure_deltas([Pose(0, 0), Pose(1, 0), Pose(1, 1), Pose(0, 1)], self.d.drawn_points))
# Helpers
def _get_complete_figure(self, f):
# We add the first point to the figure, to create the segment f[-1] -> f[0]
return f + [f[0]]
def _points_match_figure_deltas(self, f, points):
return points_match_figure(f, points, self.inter_vertexes_points, self.cross_delta, self.dist_delta)
# Mocked dependencies
def _log(self, *args):
pass
def _pause(self):
return False
def _abort(self):
return False
def _step(self, drawer):
drawer.step()
def _curr_pose(self, drawer):
return drawer.pose
def _move(self, linear_speed, angular_speed, drawer):
drawer.speed.linear = linear_speed
drawer.speed.angular = angular_speed
def _pen(self, on, drawer):
drawer.draw(on)
| |
#!/usr/bin/env python3
import time
import argparse
import configparser
import logging
import getpass
import requests
import serial
import sys
NETATMO_URL_AUTH = "https://api.netatmo.com/oauth2/token"
NETATMO_URL_STATION = "https://api.netatmo.com/api/getstationsdata"
NETATMO_URL_TIMEOUT = 20
config = None
config_filename = "config.cfg"
serialport = None
def open_serialport():
global config, serialport
assert config['output']['device'] != ''
assert config['output']['baudrate'] != ''
baudrate = int(config['output']['baudrate'])
logging.debug("Open serial device = {0} at {1} bps".format(config['output']['device'],
config['output']['baudrate']))
serialport = serial.Serial(port=config['output']['device'],
baudrate=baudrate)
def close_serialport():
global serialport
logging.debug("Closing serial device")
if serialport != None:
serialport.close()
def display_temperatures_serialport(temps):
global serialport
try:
text_1 = "In: {0:5.1f} C".format(temps['indoor'])
except:
text_1 = "In: --.- C"
try:
text_2 = "Out: {0:5.1f} C".format(temps['outdoor'])
except:
text_2 = "Out: --.- C"
serialport.write(b'\xfe\x01') # Clear display
serialport.write(text_1.encode('ascii', 'replace'))
serialport.write(b'\xfe\xc0') # Goto line 2 position 0
serialport.write(text_2.encode('ascii', 'replace'))
def access_token_expired():
global config
if config['auth']['expires'] == '':
return True
if config['auth']['expires'] == '-1':
return True
expires = int(config['auth']['expires'])
return expires < int(time.time())
def get_temperatures():
global config
assert config['auth']['access_token'] != ''
res_temp = {}
params_data = {'access_token': config['auth']['access_token']}
logging.info("Sending getstationsdata")
try:
req = requests.get(NETATMO_URL_STATION, params = params_data, timeout=NETATMO_URL_TIMEOUT)
except Exception as e:
logging.warning("Exception during getstationsdata: {0}".format(e))
return res_temp
logging.debug("getstationsdata: status_code = {0}, content = {1}".format(req.status_code, req.text))
if req.status_code == requests.codes.ok:
json_content = req.json()
try:
res_temp['indoor'] = json_content['body']['devices'][0]['dashboard_data']['Temperature']
logging.info("Indoor temperature = {0}".format(res_temp['indoor']))
except:
logging.info("Indoor temperature not found")
try:
outdoor_tmp_found = False
for module in json_content['body']['devices'][0]['modules']:
if module['type'] == 'NAModule1':
res_temp['outdoor'] = module['dashboard_data']['Temperature']
outdoor_tmp_found = True
break
if outdoor_tmp_found:
logging.info("Outdoor temperature = {0}".format(res_temp['outdoor']))
else:
logging.info("Outdoor temperature not found")
except:
logging.info("Outdoor temperature not found")
return res_temp
def prompt_username_password():
username = input("Username: ")
password = getpass.getpass(prompt="Password: ")
return username, password
def auth_with_password(username, password):
global config
assert config['auth']['client_id'] != ''
assert config['auth']['client_secret'] != ''
assert username != ''
assert password != ''
post_data = {'client_id': config['auth']['client_id'],
'client_secret': config['auth']['client_secret'],
'grant_type': 'password',
'username': username,
'password': password,
'scope': 'read_station'
}
logging.info("Sending authentication request (with password)")
try:
req = requests.post(NETATMO_URL_AUTH, data = post_data, timeout=NETATMO_URL_TIMEOUT)
except Exception as e:
logging.warning("Exception during authentication (password): {0}".format(e))
return False
logging.debug("Authentication (password): status_code = {0}, content = {1}".format(req.status_code, req.text))
if req.status_code == requests.codes.ok:
json_content = req.json()
at = json_content['access_token']
rt = json_content['refresh_token']
expin = json_content['expires_in']
logging.debug("Authentication decoded (password): access_token = {0}, refresh_token = {1}, expires_in = {2}".format(at, rt, expin))
config['auth']['access_token'] = at
config['auth']['refresh_token'] = rt
config['auth']['expires'] = str(expin + int(time.time()))
return True
else:
logging.error("Authentication error (password): status_code = {0}, content = {1}".format(req.status_code, req.text))
return False
def refresh_access_token():
global config
assert config['auth']['client_id'] != ''
assert config['auth']['client_secret'] != ''
assert config['auth']['refresh_token'] != ''
post_data = {'client_id': config['auth']['client_id'],
'client_secret': config['auth']['client_secret'],
'grant_type': 'refresh_token',
'refresh_token': config['auth']['refresh_token']
}
logging.info("Sending refresh token request")
try:
req = requests.post(NETATMO_URL_AUTH, data = post_data, timeout=NETATMO_URL_TIMEOUT)
except Exception as e:
logging.warning("Exception during authentication (refresh): {0}".format(e))
return False
logging.debug("Result: status_code = {0}, content = {1}".format(req.status_code, req.text))
if req.status_code == requests.codes.ok:
json_content = req.json()
at = json_content['access_token']
rt = json_content['refresh_token']
expin = json_content['expires_in']
logging.debug("Authentication decoded (refresh): access_token = {0}, refresh_token = {1}, expires_in = {2}".format(at, rt, expin))
config['auth']['access_token'] = at
config['auth']['refresh_token'] = rt
config['auth']['expires'] = str(expin + int(time.time()))
return True
else:
logging.error("Authentication error: status_code = {0}, content = {1}".format(req.status_code, req.text))
return False
def parse_args():
parser = argparse.ArgumentParser(description="Retrieves the temperatures and displays it on LCD")
parser.add_argument("-v", "--verbose",
action="store_true",
help="increase output verbosity")
parser.add_argument("-c", "--config",
default="config.cfg",
help="name of the config file to use")
parser.add_argument("-l", "--loop",
action="store_true",
help="retrieve and display regularly instead of just once")
return parser.parse_args()
def read_config():
global config, config_filename
config = configparser.ConfigParser()
# Default config
config['auth'] = {'client_id': '',
'client_secret': '',
'access_token': '',
'refresh_token': '',
'expires': '-1'}
config['output'] = {'device': '/dev/ttyUSB0',
'baudrate': '9600'}
config['general'] = {'polling_interval': '600'}
logging.info("Reading config from {0}".format(config_filename))
config.read(config_filename)
def write_config():
global config, config_filename
logging.info("Writing config to {0}".format(config_filename))
with open(config_filename, 'w') as configfile:
config.write(configfile)
def main():
global config, config_filename
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
config_filename = args.config
read_config()
write_config()
if config['auth']['access_token'] == '':
logging.info("No access_token found in configuration file, authenticate with password")
username, password = prompt_username_password()
if auth_with_password(username, password):
write_config()
else:
sys.exit(1)
while(True):
auth = True
temps = {}
if (access_token_expired()):
auth = refresh_access_token()
if auth:
write_config()
if auth:
temps = get_temperatures()
open_serialport()
display_temperatures_serialport(temps)
close_serialport()
if not args.loop:
break
time.sleep(int(config['general']['polling_interval']))
if __name__ == "__main__":
main()
| |
# pylint: disable=pointless-string-statement
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import datetime
import mock
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_db import exception as exc
from oslo_utils import timeutils
import testscenarios
from neutron.db import agents_db
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.objects import agent as agent_obj
from neutron.objects import base
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
TEST_RESOURCE_VERSIONS = {"A": "1.0"}
AGENT_STATUS = {'agent_type': 'Open vSwitch agent',
'binary': 'neutron-openvswitch-agent',
'host': 'overcloud-notcompute',
'topic': 'N/A',
'resource_versions': TEST_RESOURCE_VERSIONS}
TEST_TIME = '2016-02-26T17:08:06.116'
class FakePlugin(base_plugin.NeutronDbPluginV2, agents_db.AgentDbMixin):
"""A fake plugin class containing all DB methods."""
class TestAgentsDbBase(testlib_api.SqlTestCase):
def setUp(self):
super(TestAgentsDbBase, self).setUp()
self.context = context.get_admin_context()
self.plugin = FakePlugin()
def _get_agents(self, hosts, agent_type):
return [
agent_obj.Agent(
context=self.context,
binary='foo-agent',
host=host,
agent_type=agent_type,
topic='foo_topic',
configurations="{}",
created_at=timeutils.utcnow(),
started_at=timeutils.utcnow(),
heartbeat_timestamp=timeutils.utcnow())
for host in hosts
]
def _create_and_save_agents(self, hosts, agent_type, down_agents_count=0,
down_but_version_considered=0):
agents = self._get_agents(hosts, agent_type)
# bring down the specified agents
for agent in agents[:down_agents_count]:
agent['heartbeat_timestamp'] -= datetime.timedelta(minutes=60)
# bring down just enough so their version is still considered
for agent in agents[down_agents_count:(
down_but_version_considered + down_agents_count)]:
agent['heartbeat_timestamp'] -= datetime.timedelta(
seconds=(cfg.CONF.agent_down_time + 1))
for agent in agents:
agent.create()
return agents
class TestAgentsDbMixin(TestAgentsDbBase):
def setUp(self):
super(TestAgentsDbMixin, self).setUp()
self.agent_status = dict(AGENT_STATUS)
def test_get_enabled_agent_on_host_found(self):
agents = self._create_and_save_agents(['foo_host'],
constants.AGENT_TYPE_L3)
expected = self.plugin.get_enabled_agent_on_host(
self.context, constants.AGENT_TYPE_L3, 'foo_host')
self.assertEqual(expected, agents[0])
def test_get_enabled_agent_on_host_not_found(self):
with mock.patch.object(agents_db.LOG, 'debug') as mock_log:
agent = self.plugin.get_enabled_agent_on_host(
self.context, constants.AGENT_TYPE_L3, 'foo_agent')
self.assertIsNone(agent)
self.assertTrue(mock_log.called)
def _assert_ref_fields_are_equal(self, reference, result):
"""Compare (key, value) pairs of a reference dict with the result
Note: the result MAY have additional keys
"""
for field, value in reference.items():
self.assertEqual(value, result[field], field)
def test_create_or_update_agent_new_entry(self):
self.plugin.create_or_update_agent(self.context, self.agent_status)
agent = self.plugin.get_agents(self.context)[0]
self._assert_ref_fields_are_equal(self.agent_status, agent)
def test_create_or_update_agent_existing_entry(self):
self.plugin.create_or_update_agent(self.context, self.agent_status)
self.plugin.create_or_update_agent(self.context, self.agent_status)
self.plugin.create_or_update_agent(self.context, self.agent_status)
agents = self.plugin.get_agents(self.context)
self.assertEqual(len(agents), 1)
agent = agents[0]
self._assert_ref_fields_are_equal(self.agent_status, agent)
def test_create_or_update_agent_logs_heartbeat(self):
status = self.agent_status.copy()
status['configurations'] = {'log_agent_heartbeats': True}
with mock.patch.object(agents_db.LOG, 'info') as info:
self.plugin.create_or_update_agent(self.context, status)
self.assertTrue(info.called)
status['configurations'] = {'log_agent_heartbeats': False}
info.reset_mock()
self.plugin.create_or_update_agent(self.context, status)
self.assertFalse(info.called)
def test_create_or_update_agent_concurrent_insert(self):
# NOTE(rpodolyaka): emulate violation of the unique constraint caused
# by a concurrent insert. Ensure we make another
# attempt on fail
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
with mock.patch('neutron.objects.db.api.create_object') as add_mock:
add_mock.side_effect = [
exc.DBDuplicateEntry(),
mock.Mock()
]
self.plugin.create_or_update_agent(self.context, self.agent_status)
self.assertEqual(add_mock.call_count, 2,
"Agent entry creation hasn't been retried")
def test_create_or_update_agent_disable_new_agents(self):
cfg.CONF.set_override('enable_new_agents', False)
self.plugin.create_or_update_agent(self.context, self.agent_status)
agent = self.plugin.get_agents(self.context)[0]
self.assertFalse(agent['admin_state_up'])
def test_agent_health_check(self):
agents = [{'agent_type': "DHCP Agent",
'heartbeat_timestamp': '2015-05-06 22:40:40.432295',
'host': 'some.node',
'alive': True}]
with mock.patch.object(self.plugin, 'get_agents',
return_value=agents),\
mock.patch.object(agents_db.LOG, 'warning') as warn,\
mock.patch.object(agents_db.LOG, 'debug') as debug:
self.plugin.agent_health_check()
self.assertTrue(debug.called)
self.assertFalse(warn.called)
agents[0]['alive'] = False
self.plugin.agent_health_check()
warn.assert_called_once_with(
mock.ANY,
{'count': 1, 'total': 1,
'data': " Type Last heartbeat host\n"
" DHCP Agent 2015-05-06 22:40:40.432295 some.node"}
)
def test__get_dict(self):
db_obj = mock.Mock(conf1='{"test": "1234"}')
conf1 = self.plugin._get_dict(db_obj, 'conf1')
self.assertIn('test', conf1)
self.assertEqual("1234", conf1['test'])
def test__get_dict_missing(self):
with mock.patch.object(agents_db.LOG, 'warning') as warn:
db_obj = mock.Mock(spec=['agent_type', 'host'])
self.plugin._get_dict(db_obj, 'missing_conf')
self.assertTrue(warn.called)
def test__get_dict_ignore_missing(self):
with mock.patch.object(agents_db.LOG, 'warning') as warn:
db_obj = mock.Mock(spec=['agent_type', 'host'])
missing_conf = self.plugin._get_dict(db_obj, 'missing_conf',
ignore_missing=True)
self.assertEqual({}, missing_conf)
warn.assert_not_called()
def test__get_dict_broken(self):
with mock.patch.object(agents_db.LOG, 'warning') as warn:
db_obj = mock.Mock(conf1='{"test": BROKEN')
conf1 = self.plugin._get_dict(db_obj, 'conf1', ignore_missing=True)
self.assertEqual({}, conf1)
self.assertTrue(warn.called)
def get_configurations_dict(self):
db_obj = mock.Mock(configurations='{"cfg1": "val1"}')
cfg = self.plugin.get_configuration_dict(db_obj)
self.assertIn('cfg', cfg)
def test_get_agents_resource_versions(self):
tracker = mock.Mock()
self._create_and_save_agents(
['host-%d' % i for i in range(5)],
constants.AGENT_TYPE_L3,
down_agents_count=3,
down_but_version_considered=2)
self.plugin.get_agents_resource_versions(tracker)
self.assertEqual(tracker.set_versions.call_count, 2)
class TestAgentsDbGetAgents(TestAgentsDbBase):
scenarios = [
('Get all agents', dict(agents=5, down_agents=2,
agents_alive=None,
expected_agents=5)),
('Get alive agents (True)', dict(agents=5, down_agents=2,
agents_alive='True',
expected_agents=3)),
('Get down agents (False)', dict(agents=5, down_agents=2,
agents_alive='False',
expected_agents=2)),
('Get alive agents (true)', dict(agents=5, down_agents=2,
agents_alive='true',
expected_agents=3)),
('Get down agents (false)', dict(agents=5, down_agents=2,
agents_alive='false',
expected_agents=2)),
('Get agents invalid alive filter', dict(agents=5, down_agents=2,
agents_alive='invalid',
expected_agents=None)),
]
def setUp(self):
# ensure that the first scenario will execute with nosetests
if not hasattr(self, 'agents'):
self.__dict__.update(self.scenarios[0][1])
super(TestAgentsDbGetAgents, self).setUp()
def test_get_agents(self):
hosts = ['host-%s' % i for i in range(self.agents)]
self._create_and_save_agents(hosts, constants.AGENT_TYPE_L3,
down_agents_count=self.down_agents)
if self.agents_alive == 'invalid':
self.assertRaises(n_exc.InvalidInput, self.plugin.get_agents,
self.context,
filters={'alive': [self.agents_alive]})
else:
returned_agents = self.plugin.get_agents(
self.context, filters={'alive': [self.agents_alive]}
if self.agents_alive else None)
self.assertEqual(self.expected_agents, len(returned_agents))
if self.agents_alive:
alive = (self.agents_alive == 'True' or
self.agents_alive == 'true')
for agent in returned_agents:
self.assertEqual(alive, agent['alive'])
class TestAgentExtRpcCallback(TestAgentsDbBase):
def setUp(self):
super(TestAgentExtRpcCallback, self).setUp()
self.callback = agents_db.AgentExtRpcCallback(self.plugin)
self.callback.server_versions_rpc = mock.Mock()
self.versions_rpc = self.callback.server_versions_rpc
self.callback.START_TIME = datetime.datetime(datetime.MINYEAR, 1, 1)
self.update_versions = mock.patch(
'neutron.api.rpc.callbacks.version_manager.'
'update_versions').start()
self.agent_state = {'agent_state': dict(AGENT_STATUS)}
def test_create_or_update_agent_updates_version_manager(self):
self.callback.report_state(self.context, agent_state=self.agent_state,
time=TEST_TIME)
self.update_versions.assert_called_once_with(
mock.ANY, TEST_RESOURCE_VERSIONS)
def test_create_or_update_agent_updates_other_servers(self):
callback = self.callback
callback.report_state(self.context, agent_state=self.agent_state,
time=TEST_TIME)
report_agent_resource_versions = (
self.versions_rpc.report_agent_resource_versions)
report_agent_resource_versions.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS)
def test_no_version_updates_on_further_state_reports(self):
self.test_create_or_update_agent_updates_version_manager()
# agents include resource_versions only in the first report after
# start so versions should not be updated on the second report
second_agent_state = copy.deepcopy(self.agent_state)
second_agent_state['agent_state'].pop('resource_versions')
self.update_versions.reset_mock()
report_agent_resource_versions = (
self.versions_rpc.report_agent_resource_versions)
report_agent_resource_versions.reset_mock()
self.callback.report_state(self.context,
agent_state=second_agent_state,
time=TEST_TIME)
self.assertFalse(self.update_versions.called)
self.assertFalse(report_agent_resource_versions.called)
def test_version_updates_on_agent_revival(self):
self.test_create_or_update_agent_updates_version_manager()
second_agent_state = copy.deepcopy(self.agent_state)
second_agent_state['agent_state'].pop('resource_versions')
self._take_down_agent()
self.update_versions.reset_mock()
report_agent_resource_versions = (
self.versions_rpc.report_agent_resource_versions)
report_agent_resource_versions.reset_mock()
# agent didn't include resource_versions in report but server will
# take them from db for the revived agent
self.callback.report_state(self.context,
agent_state=second_agent_state,
time=TEST_TIME)
self.update_versions.assert_called_once_with(
mock.ANY, TEST_RESOURCE_VERSIONS)
report_agent_resource_versions.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, TEST_RESOURCE_VERSIONS)
def _take_down_agent(self):
with self.context.session.begin(subtransactions=True):
pager = base.Pager(limit=1)
agent_objs = agent_obj.Agent.get_objects(self.context,
_pager=pager)
agent_objs[0].heartbeat_timestamp = (
agent_objs[0].heartbeat_timestamp - datetime.timedelta(
hours=1))
agent_objs[0].update()
| |
#!/usr/bin/env python3
"""Data Science Toolkit client for Python.
This module provides client library functions and CLI for the Data Science
Toolkit. The module was forked from the original dstk.py library provided with
the Data Science Toolkit.
See http://www.datasciencetoolkit.org/developerdocs#python for more details.
"""
import csv
import io
import json
import mimetypes
import os
import re
import requests
import sys
API_BASE = 'http://www.datasciencetoolkit.org'
API_VERSION = 50
# This is the main interface class. You can see an example of it in use
# below, implementing a command-line tool, but you basically just instantiate
# dstk = DSTK()
# and then call the method you want
# coordinates = dstk.ip2coordinates('12.34.56.78')
# The full documentation is at http://www.datasciencetoolkit.org/developerdocs
class DSTK(object):
"""Client class for dstk api."""
def __init__(self, api_base=None, check_version=True):
"""Constructor for the dstk api client.
Args:
api_base: str, base url for the dstk server.
check_version: bool, whether to check the server api version on startup.
"""
if api_base is None:
api_base = os.getenv('DSTK_API_BASE', API_BASE)
self.api_base = api_base
if check_version:
self.check_version()
def check_version(self):
"""Check the server api version."""
api_url = '%s/info' % self.api_base
try:
response = requests.get(api_url)
response_data = response.json()
server_api_version = response_data['version']
except:
raise Exception(
'The server at %s does not seem to be running DSTK, '
'or version information could not be found.' % self.api_base)
if server_api_version < API_VERSION:
raise Exception(
'DSTK: Version %s found at %s but %s is required' % (
server_api_version, api_url, API_VERSION))
def ip2coordinates(self, ips):
if not isinstance(ips, (list, tuple)):
ips = [ips]
api_url = '%s/ip2coordinates' % self.api_base
api_body = json.dumps(ips)
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def street2coordinates(self, addresses):
if not isinstance(addresses, (list, tuple)):
addresses = [addresses]
api_url = '%s/street2coordinates' % self.api_base
api_body = json.dumps(addresses)
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def coordinates2politics(self, coordinates):
api_url = '%s/coordinates2politics' % self.api_base
api_body = json.dumps(coordinates)
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def text2places(self, text):
api_url = '%s/text2places' % self.api_base
api_body = text
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def file2text(self, file_name, file_object):
api_url = '%s/file2text' % self.api_base
content_type = guess_content_type(file_name)
files = {'file': ('inputfile', file_object, content_type)}
response = requests.post(api_url, files=files)
response_data = response.text
return response_data
def text2sentences(self, text):
api_url = '%s/text2sentences' % self.api_base
api_body = text
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def html2text(self, html):
api_url = '%s/html2text' % self.api_base
api_body = html
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def html2story(self, html):
api_url = '%s/html2story' % self.api_base
api_body = html
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def text2people(self, text):
api_url = '%s/text2people' % self.api_base
api_body = text
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def text2times(self, text):
api_url = '%s/text2times' % self.api_base
api_body = text
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def text2sentiment(self, text):
api_url = '%s/text2sentiment' % self.api_base
api_body = text
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def coordinates2statistics(self, coordinates):
api_url = '%s/coordinates2statistics' % self.api_base
api_body = json.dumps(coordinates)
response = requests.post(api_url, data=api_body)
response_data = response.json()
if 'error' in response_data:
raise Exception(response_data['error'])
return response_data
def guess_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# End of the interface. The rest of this file is an example implementation of a
# command line client.
def ip2coordinates_cli(dstk, options, inputs, output):
writer = csv.writer(sys.stdout)
input_ips = []
for input_line in inputs:
ip_match = re.match(r'[12]?\d?\d\.[12]?\d?\d\.[12]?\d?\d\.[12]?\d?\d', input_line)
if ip_match is not None:
input_ips.append(ip_match.group(0))
else:
print('No match')
result = dstk.ip2coordinates(input_ips)
if options['showHeaders']:
for ip, info in result.items():
if info is None:
continue
row = ['ip_address']
for key, value in info.items():
row.append(str(key))
writer.writerow(row)
break
for ip, info in result.items():
if info is None:
info = {}
row = [ip]
for key, value in info.items():
row.append(str(value))
writer.writerow(row)
return
def street2coordinates_cli(dstk, options, inputs, output):
writer = csv.writer(sys.stdout)
result = dstk.street2coordinates(inputs)
if options['showHeaders']:
for ip, info in result.items():
if info is None:
continue
row = ['address']
for key, value in info.items():
row.append(str(key))
writer.writerow(row)
break
for ip, info in result.items():
if info is None:
info = {}
row = [ip]
for key, value in info.items():
row.append(str(value))
writer.writerow(row)
return
def coordinates2politics_cli(dstk, options, inputs, output):
writer = csv.writer(output)
coordinates_list = []
for an_input in inputs:
coordinates = an_input.split(',')
if len(coordinates) != 2:
output.write(
'You must enter coordinates as a series of comma-separated pairs, eg 37.76,-122.42')
exit(-1)
coordinates_list.append([coordinates[0], coordinates[1]])
result = dstk.coordinates2politics(coordinates_list)
if options['showHeaders']:
row = ['latitude', 'longitude', 'name', 'code', 'type', 'friendly_type']
writer.writerow(row)
for info in result:
location = info['location']
politics = info['politics']
for politic in politics:
row = [
location['latitude'],
location['longitude'],
politic['name'],
politic['code'],
politic['type'],
politic['friendly_type'],
]
writer.writerow(row)
return
def file2text_cli(dstk, options, inputs, output):
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
file2text_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
if options['showHeaders']:
output.write('--File--: '+file_name+"\n")
result = dstk.file2text(file_name, file_object)
print(result)
return
def text2places_cli(dstk, options, inputs, output):
writer = csv.writer(output)
if options['showHeaders']:
row = [
'latitude', 'longitude', 'name', 'type', 'start_index', 'end_index',
'matched_string', 'file_name']
writer.writerow(row)
options['showHeaders'] = False
if options['from_stdin']:
result = dstk.text2places("\n".join(inputs))
text2places_format(result, 'stdin', writer)
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
text2places_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
result = dstk.text2places(file_object)
text2places_format(result, file_name, writer)
return
def text2places_format(result, file_name, writer):
for info in result:
row = [
info['latitude'],
info['longitude'],
info['name'],
info['type'],
info['start_index'],
info['end_index'],
info['matched_string'],
file_name
]
writer.writerow(row)
return
def html2text_cli(dstk, options, inputs, output):
if options['from_stdin']:
result = dstk.html2text("\n".join(inputs))
print(result['text'])
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
html2text_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
if options['showHeaders']:
output.write('--File--: '+file_name+"\n")
result = dstk.html2text(file_object)
print(result['text'])
return
def text2sentences_cli(dstk, options, inputs, output):
if options['from_stdin']:
result = dstk.text2sentences("\n".join(inputs))
print(result['sentences'])
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
text2sentences_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
if options['showHeaders']:
output.write('--File--: '+file_name+"\n")
result = dstk.text2sentences(file_object)
print(result['sentences'])
return
def html2story_cli(dstk, options, inputs, output):
if options['from_stdin']:
result = dstk.html2story("\n".join(inputs))
print(result['story'])
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
html2story_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
if options['showHeaders']:
output.write('--File--: '+file_name+"\n")
result = dstk.html2story(file_object)
print(result['story'])
return
def text2people_cli(dstk, options, inputs, output):
writer = csv.writer(sys.stdout)
if options['showHeaders']:
row = [
'matched_string', 'first_name', 'surnames', 'title', 'gender',
'start_index', 'end_index', 'file_name']
writer.writerow(row)
options['showHeaders'] = False
if options['from_stdin']:
result = dstk.text2people("\n".join(inputs))
text2people_format(result, 'stdin', writer)
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
text2places_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
result = dstk.text2people(file_object)
text2people_format(result, file_name, writer)
return
def text2people_format(result, file_name, writer):
for info in result:
row = [
info['matched_string'],
info['first_name'],
info['surnames'],
info['title'],
info['gender'],
str(info['start_index']),
str(info['end_index']),
file_name
]
writer.writerow(row)
return
def text2times_cli(dstk, options, inputs, output):
writer = csv.writer(sys.stdout)
if options['showHeaders']:
row = [
'matched_string', 'time_string', 'time_seconds', 'is_relative',
'start_index', 'end_index', 'file_name']
writer.writerow(row)
options['showHeaders'] = False
if options['from_stdin']:
result = dstk.text2times("\n".join(inputs))
text2times_format(result, 'stdin', writer)
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
text2times_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
result = dstk.text2times(file_object)
text2times_format(result, file_name, writer)
return
def text2times_format(result, file_name, writer):
for info in result:
row = [
info['matched_string'],
info['time_string'],
info['time_seconds'],
info['is_relative'],
str(info['start_index']),
str(info['end_index']),
file_name
]
writer.writerow(row)
return
def text2sentiment_cli(dstk, options, inputs, output):
writer = csv.writer(sys.stdout)
if options['showHeaders']:
row = ['sentiment', 'sentence', 'file_name']
writer.writerow(row)
options['showHeaders'] = False
if options['from_stdin']:
result = []
for sentence in inputs:
result = dstk.text2sentiment(sentence)
text2sentiment_format(result, sentence, 'stdin', writer)
return
for file_name in inputs:
if os.path.isdir(file_name):
children = os.listdir(file_name)
full_children = []
for child in children:
full_children.append(os.path.join(file_name, child))
text2sentiment_cli(dstk, options, full_children, output)
else:
file_object = get_file_or_url_object(file_name)
for sentence in file_object.split("\n"):
result = dstk.text2sentiment(sentence)
text2sentiment_format(result, sentence, 'stdin', writer)
return
def text2sentiment_format(result, sentence, file_name, writer):
row = [
result['score'],
sentence.strip(),
file_name
]
writer.writerow(row)
return
def coordinates2statistics_cli(dstk, options, inputs, output):
writer = csv.writer(output)
coordinates_list = []
for an_input in inputs:
coordinates = an_input.split(',')
if len(coordinates) != 2:
output.write(
'You must enter coordinates as a series of comma-separated pairs, eg 37.76,-122.42')
exit(-1)
coordinates_list.append([coordinates[0], coordinates[1]])
results = dstk.coordinates2statistics(coordinates_list)
if options['showHeaders']:
row = ['latitude', 'longitude', 'statistic', 'value', 'description']
writer.writerow(row)
for result in results:
location = result['location']
statistics = result['statistics']
for statistic, info in statistics.items():
value = info['value']
description = info['description']
row = [
location['latitude'],
location['longitude'],
statistic,
value,
description,
]
writer.writerow(row)
return
def get_file_or_url_object(file_name):
if file_name.startswith('http://') or file_name.startswith('https://'):
response = requests.get(file_name)
file_object = io.BytesIO(b'')
file_object.writelines(response.iter_lines())
file_object.seek(0)
else:
file_object = open(file_name, 'rb')
return file_object
def print_usage(message=''):
print(message)
print("Usage:")
print("python dstk.py <command> [-a/--api_base 'http://yourhost.com'] [-h/--show_headers] <inputs>")
print("Where <command> is one of:")
print(" ip2coordinates (lat/lons for IP addresses)")
print(" street2coordinates (lat/lons for postal addresses)")
print(" coordinates2politics (country/state/county/constituency/etc for lat/lon)")
print(" text2places (lat/lons for places mentioned in unstructured text)")
print(" file2text (PDF/Excel/Word to text, and OCR on PNG/Jpeg/Tiff images)")
print(" text2sentences (parts of the text that look like proper sentences)")
print(" html2text (text version of the HTML document)")
print(" html2story (text version of the HTML with no boilerplate)")
print(" text2people (gender for people mentioned in unstructured text)")
print(" text2times (times and dates mentioned in unstructured text)")
print(" text2sentiment (estimates the positive or negative sentiment of each line of text)")
print(" coordinates2statistics (population/climate/elevation/etc for lat/lon)")
print("If no inputs are specified, then standard input will be read and used")
print("See http://www.datasciencetoolkit.org/developerdocs for more details")
print("Examples:")
print("python dstk.py ip2coordinates 67.169.73.113")
print("python dstk.py street2coordinates \"2543 Graystone Place, Simi Valley, CA 93065\"")
print("python dstk.py file2text scanned.jpg")
exit(-1)
def main():
commands = {
'ip2coordinates': {'handler': ip2coordinates_cli},
'street2coordinates': {'handler': street2coordinates_cli},
'coordinates2politics': {'handler': coordinates2politics_cli},
'text2places': {'handler': text2places_cli},
'file2text': {'handler': file2text_cli},
'text2sentences': {'handler': text2sentences_cli},
'html2text': {'handler': html2text_cli},
'html2story': {'handler': html2story_cli},
'text2people': {'handler': text2people_cli},
'text2times': {'handler': text2times_cli},
'text2sentiment': {'handler': text2sentiment_cli},
'coordinates2statistics': {'handler': coordinates2statistics_cli},
}
switches = {
'api_base': True,
'show_headers': True
}
command = None
options = {'showHeaders': False}
inputs = []
ignore_next = False
for index, arg in enumerate(sys.argv[1:]):
if ignore_next:
ignore_next = False
continue
if arg[0] == '-' and len(arg) > 1:
if len(arg) == 2:
letter = arg[1]
if letter == 'a':
option = 'api_base'
elif letter == 'h':
option = 'show_headers'
else:
option = arg[2:]
if option not in switches:
print_usage('Unknown option "'+arg+'"')
if option == 'api_base':
if (index+2) >= len(sys.argv):
print_usage('Missing argument for option "'+arg+'"')
options['apiBase'] = sys.argv[index+2]
ignore_next = True
elif option == 'show_headers':
options['showHeaders'] = True
else:
if command is None:
command = arg
if command not in commands:
print_usage('Unknown command "'+arg+'"')
else:
inputs.append(arg)
if command is None:
print_usage('No command specified')
if len(inputs) < 1:
options['from_stdin'] = True
inputs = sys.stdin.readlines()
else:
options['from_stdin'] = False
command_info = commands[command]
dstk = DSTK(api_base=options.get('apiBase'))
command_info['handler'](dstk, options, inputs, sys.stdout)
if __name__ == '__main__':
main()
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from . import base
from . import sql_connection
from . import plate as plate_module
from . import process as process_module
from . import composition as composition_module
LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class Container(base.LabControlObject):
"""Container object
Attributes
----------
id
remaining_volume
notes
latest_process
"""
@staticmethod
def factory(container_id):
"""Initializes the correct container subclass
Parameters
----------
container_id : int
The container id
Returns
-------
An instance of a subclass of Container
"""
factory_classes = {'tube': Tube, 'well': Well}
with sql_connection.TRN as TRN:
sql = """SELECT description
FROM labcontrol.container_type
JOIN labcontrol.container USING (container_type_id)
WHERE container_id = %s"""
TRN.add(sql, [container_id])
c_type = TRN.execute_fetchlast()
constructor = factory_classes[c_type]
sql = """SELECT {}
FROM {}
WHERE container_id = %s""".format(
constructor._id_column, constructor._table)
TRN.add(sql, [container_id])
subclass_id = TRN.execute_fetchlast()
instance = constructor(subclass_id)
return instance
@classmethod
def _common_creation_steps(cls, process, remaining_volume):
with sql_connection.TRN as TRN:
sql = """SELECT container_type_id
FROM labcontrol.container_type
WHERE description = %s"""
TRN.add(sql, [cls._container_type])
ct_id = TRN.execute_fetchlast()
sql = """INSERT INTO labcontrol.container
(container_type_id, latest_upstream_process_id,
remaining_volume)
VALUES (%s, %s, %s)
RETURNING container_id"""
TRN.add(sql, [ct_id, process.process_id, remaining_volume])
container_id = TRN.execute_fetchlast()
return container_id
def _get_container_attr(self, attr):
"""Returns the value of the given container attribute
Parameters
----------
attr : str
The attribute to retrieve
Returns
-------
Object
The attribute
"""
with sql_connection.TRN as TRN:
sql = """SELECT {}
FROM labcontrol.container
JOIN {} USING (container_id)
WHERE {} = %s""".format(attr, self._table,
self._id_column)
TRN.add(sql, [self.id])
return TRN.execute_fetchlast()
@property
def remaining_volume(self):
"""The remaining volume of the container"""
return self._get_container_attr('remaining_volume')
@property
def notes(self):
"""The container notes"""
return self._get_container_attr('notes')
@property
def latest_process(self):
"""The latest process applied to the container"""
return process_module.Process.factory(
self._get_container_attr('latest_upstream_process_id'))
@property
def container_id(self):
return self._get_container_attr('container_id')
@property
def composition(self):
"""Returns the composition that the container is holding"""
with sql_connection.TRN as TRN:
sql = """SELECT composition_id
FROM labcontrol.composition
JOIN {} USING (container_id)
WHERE {} = %s""".format(self._table, self._id_column)
TRN.add(sql, [self.id])
comp_id = TRN.execute_fetchlast()
comp = composition_module.Composition.factory(comp_id)
return comp
class Tube(Container):
"""Tube object
Attributes
----------
external_id
discarded
See Also
--------
Container
"""
_table = "labcontrol.tube"
_id_column = "tube_id"
_container_type = "tube"
@classmethod
def create(cls, process, external_id, volume):
"""Creates a new tube
Parameters
----------
process : labcontrol.db.process.Process
The process that created this reagent
external_id : str
The external id of the tube
volume : float
The initial volume of the tube
Returns
-------
labcontrol.db.container.Tube
"""
with sql_connection.TRN as TRN:
container_id = cls._common_creation_steps(process, volume)
sql = """INSERT INTO labcontrol.tube (container_id, external_id)
VALUES (%s, %s)
RETURNING tube_id"""
TRN.add(sql, [container_id, external_id])
tube_id = TRN.execute_fetchlast()
return cls(tube_id)
@property
def external_id(self):
"""The tube external identifier"""
return self._get_attr('external_id')
@property
def discarded(self):
"""Whether the tube is discarded or not"""
return self._get_attr('discarded')
def discard(self):
"""Discard the tube
Raises
------
ValueError
If the tube was already discarded
"""
if self.discarded:
raise ValueError("Can't discard tube %s: it's already discarded."
% self.id)
self._set_attr('discarded', True)
class Well(Container):
"""Well object
Attributes
----------
plate
row
column
See Also
--------
Container
"""
_table = "labcontrol.well"
_id_column = "well_id"
_container_type = 'well'
@classmethod
def create(cls, plate, process, volume, row, col):
"""Creates a new well
Parameters
----------
plate: labcontrol.db.Plate
The plate to which this well belongs to
process: labcontrol.db.Process
The process that generated this well
volume : float
The initial volume of the well
row : int
The row number of the well
col : int
The column number of the well
Returns
-------
labcontrol.db.Well
"""
with sql_connection.TRN as TRN:
container_id = cls._common_creation_steps(process, volume)
sql = """INSERT INTO labcontrol.well
(container_id, plate_id, row_num, col_num)
VALUES (%s, %s, %s, %s)
RETURNING well_id"""
TRN.add(sql, [container_id, plate.id, row, col])
well_id = TRN.execute_fetchlast()
return cls(well_id)
@property
def plate(self):
"""The plate the well belongs to"""
return plate_module.Plate(self._get_attr('plate_id'))
@property
def row(self):
"""The well row"""
return self._get_attr('row_num')
@property
def column(self):
"""The well column"""
return self._get_attr('col_num')
@property
def well_id(self):
"""The well id in the "A1","H12" form"""
row = self.row
col = self.column
# Adapted from https://stackoverflow.com/a/19169180/3746629
result = []
while row:
row, rem = divmod(row-1, 26)
result[:0] = LETTERS[rem]
return ''.join(result) + str(col)
| |
"""
flask_httpauth
==================
This module provides Basic and Digest HTTP authentication for Flask routes.
:copyright: (C) 2014 by Miguel Grinberg.
:license: MIT, see LICENSE for more details.
"""
import hmac
from base64 import b64decode
from functools import wraps
from hashlib import md5
from random import Random, SystemRandom
from flask import request, make_response, session, g, Response, current_app
from werkzeug.datastructures import Authorization
class HTTPAuth(object):
def __init__(self, scheme=None, realm=None, header=None):
self.scheme = scheme
self.realm = realm or "Authentication Required"
self.header = header
self.get_password_callback = None
self.get_user_roles_callback = None
self.auth_error_callback = None
def default_get_password(username):
return None
def default_auth_error(status):
return "Unauthorized Access", status
self.get_password(default_get_password)
self.error_handler(default_auth_error)
def is_compatible_auth(self, headers):
if self.header is None or self.header == 'Authorization':
try:
scheme, _ = request.headers.get('Authorization', '').split(
None, 1)
except ValueError:
# malformed Authorization header
return False
return scheme == self.scheme
else:
return self.header in headers
def get_password(self, f):
self.get_password_callback = f
return f
def get_user_roles(self, f):
self.get_user_roles_callback = f
return f
def error_handler(self, f):
@wraps(f)
def decorated(*args, **kwargs):
res = self.ensure_sync(f)(*args, **kwargs)
check_status_code = not isinstance(res, (tuple, Response))
res = make_response(res)
if check_status_code and res.status_code == 200:
# if user didn't set status code, use 401
res.status_code = 401
if 'WWW-Authenticate' not in res.headers.keys():
res.headers['WWW-Authenticate'] = self.authenticate_header()
return res
self.auth_error_callback = decorated
return decorated
def authenticate_header(self):
return '{0} realm="{1}"'.format(self.scheme, self.realm)
def get_auth(self):
auth = None
if self.header is None or self.header == 'Authorization':
auth = request.authorization
if auth is None and 'Authorization' in request.headers:
# Flask/Werkzeug do not recognize any authentication types
# other than Basic or Digest, so here we parse the header by
# hand
try:
auth_type, token = request.headers['Authorization'].split(
None, 1)
auth = Authorization(auth_type, {'token': token})
except (ValueError, KeyError):
# The Authorization header is either empty or has no token
pass
elif self.header in request.headers:
# using a custom header, so the entire value of the header is
# assumed to be a token
auth = Authorization(self.scheme,
{'token': request.headers[self.header]})
# if the auth type does not match, we act as if there is no auth
# this is better than failing directly, as it allows the callback
# to handle special cases, like supporting multiple auth types
if auth is not None and auth.type.lower() != self.scheme.lower():
auth = None
return auth
def get_auth_password(self, auth):
password = None
if auth and auth.username:
password = self.ensure_sync(self.get_password_callback)(
auth.username)
return password
def authorize(self, role, user, auth):
if role is None:
return True
if isinstance(role, (list, tuple)):
roles = role
else:
roles = [role]
if user is True:
user = auth
if self.get_user_roles_callback is None: # pragma: no cover
raise ValueError('get_user_roles callback is not defined')
user_roles = self.ensure_sync(self.get_user_roles_callback)(user)
if user_roles is None:
user_roles = {}
elif not isinstance(user_roles, (list, tuple)):
user_roles = {user_roles}
else:
user_roles = set(user_roles)
for role in roles:
if isinstance(role, (list, tuple)):
role = set(role)
if role & user_roles == role:
return True
elif role in user_roles:
return True
def login_required(self, f=None, role=None, optional=None):
if f is not None and \
(role is not None or optional is not None): # pragma: no cover
raise ValueError(
'role and optional are the only supported arguments')
def login_required_internal(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = self.get_auth()
# Flask normally handles OPTIONS requests on its own, but in
# the case it is configured to forward those to the
# application, we need to ignore authentication headers and
# let the request through to avoid unwanted interactions with
# CORS.
if request.method != 'OPTIONS': # pragma: no cover
password = self.get_auth_password(auth)
status = None
user = self.authenticate(auth, password)
if user in (False, None):
status = 401
elif not self.authorize(role, user, auth):
status = 403
if not optional and status:
try:
return self.auth_error_callback(status)
except TypeError:
return self.auth_error_callback()
g.flask_httpauth_user = user if user is not True \
else auth.username if auth else None
return self.ensure_sync(f)(*args, **kwargs)
return decorated
if f:
return login_required_internal(f)
return login_required_internal
def username(self):
auth = self.get_auth()
if not auth:
return ""
return auth.username
def current_user(self):
if hasattr(g, 'flask_httpauth_user'):
return g.flask_httpauth_user
def ensure_sync(self, f):
try:
return current_app.ensure_sync(f)
except AttributeError: # pragma: no cover
return f
class HTTPBasicAuth(HTTPAuth):
def __init__(self, scheme=None, realm=None):
super(HTTPBasicAuth, self).__init__(scheme or 'Basic', realm)
self.hash_password_callback = None
self.verify_password_callback = None
def hash_password(self, f):
self.hash_password_callback = f
return f
def verify_password(self, f):
self.verify_password_callback = f
return f
def get_auth(self):
# this version of the Authorization header parser is more flexible
# than Werkzeug's, as it also accepts other schemes besides "Basic"
header = self.header or 'Authorization'
if header not in request.headers:
return None
value = request.headers[header].encode('utf-8')
try:
scheme, credentials = value.split(b' ', 1)
username, password = b64decode(credentials).split(b':', 1)
except (ValueError, TypeError):
return None
try:
username = username.decode('utf-8')
password = password.decode('utf-8')
except UnicodeDecodeError:
username = None
password = None
return Authorization(
scheme, {'username': username, 'password': password})
def authenticate(self, auth, stored_password):
if auth:
username = auth.username
client_password = auth.password
else:
username = ""
client_password = ""
if self.verify_password_callback:
return self.ensure_sync(self.verify_password_callback)(
username, client_password)
if not auth:
return
if self.hash_password_callback:
try:
client_password = self.ensure_sync(
self.hash_password_callback)(client_password)
except TypeError:
client_password = self.ensure_sync(
self.hash_password_callback)(username, client_password)
return auth.username if client_password is not None and \
stored_password is not None and \
hmac.compare_digest(client_password, stored_password) else None
class HTTPDigestAuth(HTTPAuth):
def __init__(self, scheme=None, realm=None, use_ha1_pw=False):
super(HTTPDigestAuth, self).__init__(scheme or 'Digest', realm)
self.use_ha1_pw = use_ha1_pw
self.random = SystemRandom()
try:
self.random.random()
except NotImplementedError: # pragma: no cover
self.random = Random()
self.generate_nonce_callback = None
self.verify_nonce_callback = None
self.generate_opaque_callback = None
self.verify_opaque_callback = None
def _generate_random():
return md5(str(self.random.random()).encode('utf-8')).hexdigest()
def default_generate_nonce():
session["auth_nonce"] = _generate_random()
return session["auth_nonce"]
def default_verify_nonce(nonce):
session_nonce = session.get("auth_nonce")
if nonce is None or session_nonce is None:
return False
return hmac.compare_digest(nonce, session_nonce)
def default_generate_opaque():
session["auth_opaque"] = _generate_random()
return session["auth_opaque"]
def default_verify_opaque(opaque):
session_opaque = session.get("auth_opaque")
if opaque is None or session_opaque is None: # pragma: no cover
return False
return hmac.compare_digest(opaque, session_opaque)
self.generate_nonce(default_generate_nonce)
self.generate_opaque(default_generate_opaque)
self.verify_nonce(default_verify_nonce)
self.verify_opaque(default_verify_opaque)
def generate_nonce(self, f):
self.generate_nonce_callback = f
return f
def verify_nonce(self, f):
self.verify_nonce_callback = f
return f
def generate_opaque(self, f):
self.generate_opaque_callback = f
return f
def verify_opaque(self, f):
self.verify_opaque_callback = f
return f
def get_nonce(self):
return self.generate_nonce_callback()
def get_opaque(self):
return self.generate_opaque_callback()
def generate_ha1(self, username, password):
a1 = username + ":" + self.realm + ":" + password
a1 = a1.encode('utf-8')
return md5(a1).hexdigest()
def authenticate_header(self):
nonce = self.get_nonce()
opaque = self.get_opaque()
return '{0} realm="{1}",nonce="{2}",opaque="{3}"'.format(
self.scheme, self.realm, nonce,
opaque)
def authenticate(self, auth, stored_password_or_ha1):
if not auth or not auth.username or not auth.realm or not auth.uri \
or not auth.nonce or not auth.response \
or not stored_password_or_ha1:
return False
if not(self.verify_nonce_callback(auth.nonce)) or \
not(self.verify_opaque_callback(auth.opaque)):
return False
if self.use_ha1_pw:
ha1 = stored_password_or_ha1
else:
a1 = auth.username + ":" + auth.realm + ":" + \
stored_password_or_ha1
ha1 = md5(a1.encode('utf-8')).hexdigest()
a2 = request.method + ":" + auth.uri
ha2 = md5(a2.encode('utf-8')).hexdigest()
a3 = ha1 + ":" + auth.nonce + ":" + ha2
response = md5(a3.encode('utf-8')).hexdigest()
return hmac.compare_digest(response, auth.response)
class HTTPTokenAuth(HTTPAuth):
def __init__(self, scheme='Bearer', realm=None, header=None):
super(HTTPTokenAuth, self).__init__(scheme, realm, header)
self.verify_token_callback = None
def verify_token(self, f):
self.verify_token_callback = f
return f
def authenticate(self, auth, stored_password):
if auth:
token = auth['token']
else:
token = ""
if self.verify_token_callback:
return self.ensure_sync(self.verify_token_callback)(token)
class MultiAuth(object):
def __init__(self, main_auth, *args):
self.main_auth = main_auth
self.additional_auth = args
def login_required(self, f=None, role=None, optional=None):
if f is not None and \
(role is not None or optional is not None): # pragma: no cover
raise ValueError(
'role and optional are the only supported arguments')
def login_required_internal(f):
@wraps(f)
def decorated(*args, **kwargs):
selected_auth = self.main_auth
if not self.main_auth.is_compatible_auth(request.headers):
for auth in self.additional_auth:
if auth.is_compatible_auth(request.headers):
selected_auth = auth
break
return selected_auth.login_required(
role=role, optional=optional)(f)(*args, **kwargs)
return decorated
if f:
return login_required_internal(f)
return login_required_internal
def current_user(self):
if hasattr(g, 'flask_httpauth_user'): # pragma: no cover
return g.flask_httpauth_user
| |
import sys,os, pickle, numpy, pylab, operator, itertools
import cv2
from shutil import copy as copyfile
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
from DataParseApp import dataparseDialog
from sklearn.decomposition import NMF
projectpath=os.path.split(os.path.abspath(__file__))[0]
sys.path.append(os.path.join(projectpath,'ui'))
pythoncodepath=os.path.split(projectpath)[0]
jcapdataprocesspath=os.path.join(pythoncodepath, 'JCAPDataProcess')
sys.path.append(jcapdataprocesspath)
from VisualizeDataApp import visdataDialog
sys.path.append(os.path.join(jcapdataprocesspath,'AuxPrograms'))
from fcns_ui import *
from fcns_io import *
platemapvisprocesspath=os.path.join(pythoncodepath, 'JCAPPlatemapVisualize')
sys.path.append(platemapvisprocesspath)
from plate_image_align_Dialog import plateimagealignDialog
import numpy as np
###############UPDATE THIS TO BE THE FOLDER CONTAINING parameters.py
#paramsfolder=r'K:\users\hte\Raman\39664\20170607analysis_tippy-top'
paramsfolder=r'K:\users\hte\Raman\33444\20170608analysis'
#if not paramsfolder is None:
sys.path.append(paramsfolder)
from parameters import *
#else:
# plateidstr='3344'
#
# pathd={'ramanfile':r'K:\users\hte\Raman\33444\HSS_33444_map-1-_CRR-EM-copy.txt'}
# pathd['mainfolder']=os.path.split(pathd['ramanfile'])[0]
# pathd['savefolder']=os.path.join(pathd['mainfolder'], '20170607analysis')
# pathd['infopck']=pathd['ramanfile'][:-4]+'__info.pck'
# pathd['allspectra']=os.path.join(pathd['savefolder'],'allspectra.npy')
# pathd['nmfdata']=os.path.join(pathd['savefolder'],'nmf4.pck')
# pathd['edges']=os.path.join(pathd['savefolder'],'edges.png')
# pathd['mapfill']=os.path.join(pathd['savefolder'],'blobmap.png')
# pathd['blobd']=os.path.join(pathd['savefolder'],'blobd.pck')
# pathd['alignedsamples']=os.path.join(pathd['savefolder'],'alignedsamples.png')
# pathd['alignedsamplestxt']=os.path.join(pathd['savefolder'],'alignedsamples.txt')
# pathd['spectrafolder']=os.path.join(pathd['savefolder'],'sample_spectra')
# pathd['map']=os.path.join(pathd['spectrafolder'],'raman_sample_index_map.map')
# pathd['samplepixels']=os.path.join(pathd['spectrafolder'],'samplepixels.png')
# pathd['udibasepath']=os.path.join(pathd['savefolder'],'ave_rmn_')
#
# udi_ternary_projection_inds=[0, 1, 2]#only used for the all.udi file
#
# sample_list=[1850,1851,1852,1853,1854,1855,1905,1906,1907,1908,1909,1910,1911,1912,1913,1914,1915,1916,1917,1918,1919,1969,1970,1971,1972,1973,1974,1975,1976,1977,1978,1979,1980,1981,1982,1983,2033,2034,2035,2036,2037,2038,2039,2040,2041,2042,2043,2044,2045,2046,2047,2097,2098,2099,2100,2101,2102,2103,2104,2105,2106,2107,2108,2109,2110,2111]
# dx_smp=1.
# dy_smp=1.
#
# default_sample_blob_dict=dict({}, \
# smp_is_square=0, smp_width=1., bcknd_is_square=0, bcknd_min_width=1.3, bcknd_max_width=1.4, removedups=1\
# )
#
# show_help_messages=True
platemappath=getplatemappath_plateid(plateidstr)
if not os.path.isdir(pathd['mainfolder']):
print 'NOT A VALID FOLDER'
if not os.path.isdir(pathd['savefolder']):
os.mkdir(pathd['savefolder'])
if not os.path.isdir(pathd['spectrafolder']):
os.mkdir(pathd['spectrafolder'])
class MainMenu(QMainWindow):
def __init__(self, previousmm, execute=True, **kwargs):
super(MainMenu, self).__init__(None)
self.parseui=dataparseDialog(self, title='Visualize ANA, EXP, RUN data', **kwargs)
self.alignui=plateimagealignDialog(self, manual_image_init_bool=False)
if execute:
self.parseui.exec_()
def doNMF(datan,n_components=4):
# from Mitsu
#alternatively PCA ... might me faster
nmf=NMF(n_components=n_components,init='nndsvd')
data_decomp_all=nmf.fit_transform(datan)
data_components_all=nmf.components_
return data_decomp_all,data_components_all
def rgb_comp(arr2d, affine=True):
cmy_cmyk=lambda a:a[:3]*(1.-a[3])+a[3]
rgb_cmy=lambda a:1.-a
rgb_cmyk=lambda a:rgb_cmy(cmy_cmyk(a))
return numpy.array([rgb_cmyk(a) for a in arr2d])
def imGen(data_decomp_all,ramaninfod,cmykindeces=[3, 2, 1, 0]):
cmykvals=copy.copy(data_decomp_all[:, cmykindeces])
cmykvals/=cmykvals.max(axis=0)[numpy.newaxis, :]
img=numpy.reshape(rgb_comp(cmykvals), (ramaninfod['xshape'], ramaninfod['yshape'], 3))
return img
def findEdges(img_gray):
#this uses automatic thresholding from one of the cv2 tutorials
sigma = 0.33
v = np.median(img_gray)
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edges = cv2.Canny(np.uint8(img_gray),lower,upper)
return edges
def findContours(edges):
#the contours are now found by searching the most external convex hull
#this way mos of the not fully closed samples are detected as well
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
iWithContour = cv2.drawContours(edges, contours, -1, (255,20,100), 5)
mapimage = np.zeros_like(edges)
#this fills the contours
for i in range(len(contours)):
cv2.drawContours(mapimage, contours, i, color=255, thickness=-1)
#this is to calculate the center of each contour
x=[]
y=[]
for c in contours:
# compute the center of the contour
M = cv2.moments(c)
try:
x.append(M['m10']/(M['m00']))
y.append(M['m01']/(M['m00']))
except:
#this was nessesary as the divisor is sometimes 0
#yield good results but should be done with caution
x.append(M['m10']/(M['m00']+1e-23))
y.append(M['m01']/(M['m00']+1e-23))
return iWithContour, mapimage, contours, x, y
mainapp=QApplication(sys.argv)
form=MainMenu(None, execute=False)
#form.show()
#form.setFocus()
#mainapp.exec_()
parseui=form.parseui
alignui=form.alignui
parseui.rawpathLineEdit.setText(pathd['ramanfile'])
parseui.infopathLineEdit.setText(pathd['infopck'])
parseui.getinfo(ramaninfop=pathd['infopck'], ramanfp=pathd['ramanfile'])#opens or creates
if os.path.isfile(pathd['allspectra']):
with open(pathd['allspectra'], mode='rb') as f:
fullramandataarray=numpy.load(f)
elif 1:
fullramandataarray=parseui.readfullramanarray(pathd['ramanfile'])#opens or creates
with open(pathd['allspectra'], mode='wb') as f:
numpy.save(f, fullramandataarray)
ramaninfod=parseui.ramaninfod
#parseui.exec_()
#ramaninfod['number of spectra']
#ramaninfod['xdata']
#ramaninfod['ydata']
#ramaninfod['Wavenumbers_str']
#ramaninfod['Spectrum 0 index']
ramaninfod['xdata']/=1000.
ramaninfod['ydata']/=1000.#convert to mm
ramaninfod['xshape']= len(np.unique(ramaninfod['xdata']))
ramaninfod['yshape']= len(np.unique(ramaninfod['ydata']))
ramaninfod['dx']= (ramaninfod['xdata'].max()-ramaninfod['xdata'].min())/(ramaninfod['xshape']-1)
ramaninfod['dy']= (ramaninfod['ydata'].max()-ramaninfod['ydata'].min())/(ramaninfod['yshape']-1)
nx=dx_smp/ramaninfod['dx']
ny=dy_smp/ramaninfod['dy']
ntot=nx*ny
ramanreshape=lambda arr: np.reshape(arr, (ramaninfod['xshape'], ramaninfod['yshape'])).T[::-1, ::-1]
ramannewshape=(ramaninfod['yshape'], ramaninfod['xshape'])
image_of_x=ramanreshape(ramaninfod['xdata'])
image_of_y=ramanreshape(ramaninfod['ydata'])
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].min(), ramaninfod['ydata'].max()]
#extent=[ramaninfod['xdata'].max(), ramaninfod['xdata'].min(), ramaninfod['ydata'].max(), ramaninfod['ydata'].min()]
extent=[image_of_x[0, 0], image_of_x[-1, -1], image_of_y[0, 0], image_of_y[-1, -1]]
def ramanimshow(im, **kwargs):
plt.imshow(im, origin='lower', interpolation='none', aspect=1, extent=extent, **kwargs)
if os.path.isfile(pathd['nmfdata']):
with open(pathd['nmfdata'], mode='rb') as f:
tempd=pickle.load(f)
data_decomp_all,data_components_all,rgbimagedata=[tempd[k] for k in 'data_decomp_all,data_components_all,rgbimagedata'.split(',')]
else:
data_decomp_all,data_components_all = doNMF(fullramandataarray,4)
#rgbimagedata=imGen(data_decomp_all,ramaninfod)
rgbimagedata=np.zeros(ramannewshape+(3,), dtype='float32')
for i, arr in enumerate(data_decomp_all[:, :3].T):
rgbimagedata[:, :, i]=np.array([ramanreshape(arr/arr.max())])
tempd={}
tempd['data_decomp_all']=data_decomp_all
tempd['data_components_all']=data_components_all
tempd['rgbimagedata']=rgbimagedata
with open(pathd['nmfdata'], mode='wb') as f:
tempd=pickle.dump(tempd, f)
if 1 and os.path.isfile(pathd['blobd']):
with open(pathd['blobd'], mode='rb') as f:
blobd=pickle.load(f)
else:
edges = np.zeros(ramannewshape, dtype='uint8')
plt.clf()
for i in range(data_decomp_all.shape[1]):
arr=np.uint8(ramanreshape(data_decomp_all[:,i])/data_decomp_all[:,i].max()*254)
edgetemp=findEdges(arr)
# plt.imshow(edgetemp)
# plt.show()
edges[np.where(edgetemp>0)] = 244
ramanimshow(edges)
plt.savefig(pathd['edges'])
plt.clf()
im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)
image_of_inds=ramanreshape(numpy.arange(ramaninfod['number of spectra']))
mapfill = np.zeros(ramannewshape, dtype='uint8')
blobd={}
l_mask=[cv2.drawContours(np.zeros(ramannewshape, dtype='uint8'), contours, i, color=1, thickness=-1) for i in range(len(contours))]
l_imageinds=[numpy.where(mask==1) for mask in l_mask]
l_xycen=np.array([[image_of_x[imageinds].mean(), image_of_y[imageinds].mean()] for imageinds in l_imageinds])
indstomerge=sorted([(count2+count+1, count) for count, xy0 in enumerate(l_xycen) for count2, xy1 in enumerate(l_xycen[count+1:]) if ((xy0-xy1)**2).sum()<(dx_smp**2+dy_smp**2)/10.])[::-1]
#indstomerge has highest index first so merge going down
for indhigh, indlow in indstomerge:
imageinds=l_imageinds.pop(indhigh)
mask=l_mask.pop(indhigh)
l_mask[indlow][imageinds]=1#update only the masks and then update everythign else afterwards
l_imageinds=[numpy.where(mask==1) for mask in l_mask]
l_xycen=np.array([[image_of_x[imageinds].mean(), image_of_y[imageinds].mean()] for imageinds in l_imageinds])
for imageinds, mask in zip(l_imageinds, l_mask):
indsinblob=sorted(list(image_of_inds[imageinds]))
relx=(image_of_x[imageinds].max()-image_of_x[imageinds].min())/dx_smp
rely=(image_of_y[imageinds].max()-image_of_y[imageinds].min())/dy_smp
if relx<0.5 or relx>1.4 or rely<0.5 or rely>1.4 or len(indsinblob)<ntot*0.5 or len(indsinblob)>ntot*1.5:
print 'skipped blob that was %.2f, %.2f of expected size with %d pixels' %(relx, rely, len(indsinblob))
continue
if numpy.any(mapfill[imageinds]==1):
print 'overlapping blobs detected'
xc=image_of_x[imageinds].mean()
yc=image_of_y[imageinds].mean()
mapfill[imageinds]=1
blobd[(xc, yc)]=indsinblob
plt.clf()
ramanimshow(mapfill)
plt.savefig(pathd['mapfill'])
if show_help_messages:
messageDialog(form, 'The auto detected and cleaned up blobs will be shown.\nThis is an image using the Raman motor coordinates').exec_()
plt.show()
with open(pathd['blobd'], mode='wb') as f:
pickle.dump(blobd, f)
alignui.knownblobsdict=blobd
alignui.openAddFile(p=platemappath)
alignui.image=rgbimagedata
alignui.motimage_extent=extent #left,right,bottom,top in mm
alignui.reloadimagewithextent()
#alignui.plotw_motimage.axes.imshow(alignui.image, origin='lower', interpolation='none', aspect=1, extent=alignui.motimage_extent)
xarr, yarr=np.array(blobd.keys()).T
alignui.plotw_motimage.axes.plot(xarr, yarr, 'wx', ms=4)
alignui.plotw_motimage.fig.canvas.draw()
if show_help_messages:
messageDialog(form, 'NMF analysis done and now plotting NMF image\nwith identified samples marked +. User can choose sample_no and \nright click to add calibration points.\nDo this for at least 1 sample marked with +.').exec_()
alignui.exec_()
alignui.sampleLineEdit.setText(','.join(['%d' %smp for smp in sample_list]))
alignui.addValuesSample()
if show_help_messages:
messageDialog(form, 'sample_no for export have been added. Check that \nthere are no NaN and if there are manually add calibration points\nas necessary and then remove+re-add the NaN samples.').exec_()
alignui.exec_()
alignui.plotw_motimage.fig.savefig(pathd['alignedsamples'])
with open(pathd['alignedsamplestxt'], mode='w') as f:
f.write(str(alignui.browser.toPlainText()))
alignui.openpckinfo(p=pathd['infopck'])
alignui.infox/=1000.
alignui.infoy/=1000.
alignui.perform_genmapfile(p=pathd['map'], **default_sample_blob_dict)
mapfill2=np.zeros(ramaninfod['number of spectra'], dtype='uint8')
for smp, inds in alignui.smp_inds_list__map:
mapfill2[inds]=2 if smp>0 else 1
mapfill2=ramanreshape(mapfill2)
plt.clf()
ramanimshow(mapfill2, vmin=0, vmax=2, cmap='gnuplot')
plt.savefig(pathd['samplepixels'])
if show_help_messages:
messageDialog(form, 'The NMF-identified samples use custom blob shapes and\nthe rest of the requested samples use default sample shape, resulting\nin the following map of pixels that will be exported.').exec_()
plt.show()
parseui.savepathLineEdit.setText(pathd['spectrafolder'])
parseui.match(copypath=pathd['map'])
parseui.extract()
parseui.saveave()
#parseui.readresultsfolder()
if show_help_messages:
messageDialog(form, 'The .rmn files have now been saved, so you can use\nthis next dialog to visualize data or close it to generate\nthe .udi files and open in JCAPDataProcess Visualizer').exec_()
parseui.exec_()
#only initialize visdataDialog so only created when necessary
visui=visdataDialog(form, title='Visualize ANA, EXP, RUN data')
visui.openontheflyfolder(folderpath=pathd['spectrafolder'], plateidstr=plateidstr)
visui.BatchComboBox.setCurrentIndex(2)
visui.runbatchprocess()
savep=pathd['udibasepath']+'all.udi'
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=udi_ternary_projection_inds, savep=savep)
for i, indstup in enumerate(itertools.combinations(range(len(visui.ellabels)), 3)):
excludeinds=[ind for ind in range(len(visui.ellabels)) if not ind in indstup]
inds_where_excluded_els_all_zero=numpy.where(visui.fomplotd['comps'][:, excludeinds].max(axis=1)==0)[0]
if len(inds_where_excluded_els_all_zero)==0:
continue
smplist=[visui.fomplotd['sample_no'][fomplotind] for fomplotind in inds_where_excluded_els_all_zero]
visui.remallsamples()
visui.addrem_select_fomplotdinds(remove=False, smplist=smplist)
savep=''.join([pathd['udibasepath']]+[visui.ellabels[ind] for ind in indstup]+['.udi'])
visui.get_xy_plate_info_browsersamples(saveudibool=True, ternary_el_inds_for_udi_export=indstup, savep=savep)
if show_help_messages:
messageDialog(form, 'udi files now saved and JCAPDataProcess\nVisualizer will be opened for your use.').exec_()
visui.exec_()
if show_help_messages:
messageDialog(form, 'There is nothing more to do and continuing will raise an error.').exec_()
errorattheend
| |
'''
@author: Dallas Fraser
@author: 2019-03-31
@organization: MLSB API
@summary: Tests the importing of team csv
'''
from sqlalchemy import func
from datetime import date
from base64 import b64encode
from api.model import Team
from api.advanced.import_team import parse_lines, BACKGROUND, HEADERS,\
INVALID_ROW, extract_player_information,\
extract_players,\
extract_column_indices_lookup,\
extract_background, TeamList
from api.test.BaseTest import TestSetup, ADMIN, PASSWORD
from api.errors import InvalidField, SponsorDoesNotExist, LeagueDoesNotExist
from api.test.importer.testImportMockSession import TestImportMockSession
headers = {
'Authorization': 'Basic %s' % b64encode(bytes(ADMIN + ':' +
PASSWORD, "utf-8")
).decode("ascii")
}
VALID_YEAR = date.today().year
INVALID_YEAR = 100
class TestTeamImportParseLines(TestSetup):
def testParseLines(self):
"""Test a a valid file in the standard format"""
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = ["{}:,{},".format(BACKGROUND['sponsor_name'], sponsor),
"{}:,{},".format(BACKGROUND['team_color'], color),
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"{}:,{},".format(BACKGROUND['league_name'], league),
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"Test Captain,testcaptainimport@mlsb.ca,M",
"Test Girl,testgirlimport@mlsb.ca,F",
"Test Boy,testboyimport@mlsb.ca,M"]
# parse the lines
result = parse_lines(lines)
# expecting no warnings
self.assertEqual(result['warnings'], [], "Expected no warnings")
# check background
expected_background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
error = "Failed parsing background"
self.output(result['background'])
self.output(expected_background)
self.assertEqual(result['background'], expected_background, error)
# check header
expected_header = [HEADERS['name'],
HEADERS['email'],
HEADERS['gender']]
error = "Failed parsing header"
self.output(result['header'])
self.output(expected_header)
self.assertEqual(result['header'], expected_header, error)
# check the players
expected_players = [player.split(",") for player in lines[-3:]]
self.assertEqual(result['players'],
expected_players,
"Players not returned")
def testParseLinesOrder(self):
"""Test that the order of a valid file does not matter"""
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = [
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"{}:,{},".format(BACKGROUND['league_name'], league),
"Test Captain,testcaptainimport@mlsb.ca,M",
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"Test Girl,testgirlimport@mlsb.ca,F",
"{}:,{},".format(BACKGROUND['team_color'], color),
"Test Boy,testboyimport@mlsb.ca,M",
"{}:,{},".format(BACKGROUND['sponsor_name'], sponsor)
]
# parse the lines
result = parse_lines(lines)
# expecting no warnings
self.assertEqual(result['warnings'], [], "Expected no warnings")
# check background
expected_background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
error = "Failed parsing background"
self.output(result['background'])
self.output(expected_background)
self.assertEqual(result['background'], expected_background, error)
# check header
expected_header = [HEADERS['name'],
HEADERS['email'],
HEADERS['gender']]
error = "Failed parsing header"
self.output(result['header'])
self.output(expected_header)
self.assertEqual(result['header'], expected_header, error)
# check the players
expected_players = [["Test Captain", "testcaptainimport@mlsb.ca", "M"],
["Test Girl", "testgirlimport@mlsb.ca", "F"],
["Test Boy", "testboyimport@mlsb.ca", "M"]]
self.assertEqual(result['players'],
expected_players,
"Players not returned")
def testParseLinesDelimiter(self):
"""Test using a different delimiter"""
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = ["{}:|{}|".format(BACKGROUND['sponsor_name'], sponsor),
"{}:|{}|".format(BACKGROUND['team_color'], color),
"{}:|{}|".format(BACKGROUND['captain_name'], captain),
"{}:|{}|".format(BACKGROUND['league_name'], league),
"{}|{}|{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"Test Captain|testcaptainimport@mlsb.ca|M",
"Test Girl|testgirlimport@mlsb.ca|F",
"Test Boy|testboyimport@mlsb.ca|M"]
# parse the lines
result = parse_lines(lines, delimiter="|")
# expecting no warnings
self.assertEqual(result['warnings'], [], "Expected no warnings")
# check background
expected_background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
error = "Failed parsing background"
self.output(result['background'])
self.output(expected_background)
self.assertEqual(result['background'], expected_background, error)
# check header
expected_header = [HEADERS['name'],
HEADERS['email'],
HEADERS['gender']]
error = "Failed parsing header"
self.output(result['header'])
self.output(expected_header)
self.assertEqual(result['header'], expected_header, error)
# check the players
expected_players = [player.split("|") for player in lines[-3:]]
self.assertEqual(result['players'],
expected_players,
"Players not returned")
def testParseLinesWarnings(self):
"""Test a a valid file in the standard format"""
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = ["{}:,{},".format(BACKGROUND['sponsor_name'], sponsor),
"{}:,{},".format(BACKGROUND['team_color'], color),
"WARNING,WARNING",
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"{}:,{},".format(BACKGROUND['league_name'], league),
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"WARNING,WARNING",
"Test Captain,testcaptainimport@mlsb.ca,M",
"Test Girl,testgirlimport@mlsb.ca,F",
"WARNING,WARNING",
"Test Boy,testboyimport@mlsb.ca,M"]
# parse the lines
result = parse_lines(lines)
# check that there four warnings
expected_warnings = [INVALID_ROW.format("WARNING,WARNING"),
INVALID_ROW.format("WARNING,WARNING"),
INVALID_ROW.format("WARNING,WARNING")]
self.output(result['warnings'])
self.output(expected_warnings)
self.assertEqual(result['warnings'],
expected_warnings,
"Warnings were not returned")
# check background
expected_background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
error = "Failed parsing background"
self.output(result['background'])
self.output(expected_background)
self.assertEqual(result['background'], expected_background, error)
# check header
expected_header = [HEADERS['name'],
HEADERS['email'],
HEADERS['gender']]
error = "Failed parsing header"
self.output(result['header'])
self.output(expected_header)
self.assertEqual(result['header'], expected_header, error)
# check the players
expected_players = [["Test Captain", "testcaptainimport@mlsb.ca", "M"],
["Test Girl", "testgirlimport@mlsb.ca", "F"],
["Test Boy", "testboyimport@mlsb.ca", "M"]]
self.assertEqual(result['players'],
expected_players,
"Players not returned")
class TestTeamImportExtracingFunction(TestSetup):
def testExtractPlayerInformation(self):
"""Test extract player information"""
# the test date
name = "Test Import Parse PlayerCaptain"
email = "testImportParsePlayer@mlsb.ca"
gender = "M"
info = [name, email, gender]
# parse the information using the lookup
lookup = {"email": 1, "name": 0, "gender": 2}
result = extract_player_information(info, lookup)
# expecting the player to not be found but data parsed
self.assertEqual(result['player_id'],
None,
"Player id set for non-existent player")
self.assertEqual(result['name'],
name,
"Player name was not extracted")
self.assertEqual(result['email'],
email,
"Player email was not extracted")
self.assertEqual(result['gender'],
gender,
"Player gender was not extracted")
# now again with player in database
player = self.add_player(name, email, gender, "", True)
result = extract_player_information(info, lookup)
# expecting the player to not be found but data parsed
self.assertEqual(result['player_id'],
player['player_id'],
"Player id not set for existing player")
self.assertEqual(result['name'],
name,
"Player name was not extracted")
self.assertEqual(result['email'],
email,
"Player email was not extracted")
self.assertEqual(result['gender'],
gender,
"Player gender was not extracted")
def testExtractPlayers(self):
"""Test extracting a list of players"""
# player data to extract
player_one = {'name': "p1",
'email': "testImportPlayersOne@mlsb.ca",
'gender': "M"}
player_two = {'name': "p2",
'email': "testImportPlayersTwo@mlsb.ca",
'gender': "F"}
players = [[player_one['email'],
player_one['name'],
player_one['gender']],
[player_two['email'],
player_two['name'],
player_two['gender']]]
# extract the two players
lookup = {"email": 0, "name": 1, "gender": 2}
result = extract_players(players, lookup)
# should have two players
self.assertEqual(len(result['player_info']),
2,
"Some player was not extracted")
# should have no warnings
self.assertEqual(len(result['warnings']),
0,
"Unexpected wanring when extracting players")
# check player one
self.assertEqual(result['player_info'][0]['player_id'],
None,
"Player id set for non-existent player")
self.assertEqual(result['player_info'][0]['name'],
player_one['name'],
"Player name was not extracted")
self.assertEqual(result['player_info'][0]['email'],
player_one['email'],
"Player email was not extracted")
self.assertEqual(result['player_info'][0]['name'],
player_one['name'],
"Player name was not parsed")
# check player two
self.assertEqual(result['player_info'][1]['player_id'],
None,
"Player id set for non-existent player")
self.assertEqual(result['player_info'][1]['name'],
player_two['name'],
"Player name was not extracted")
self.assertEqual(result['player_info'][1]['email'],
player_two['email'],
"Player email was not extracted")
self.assertEqual(result['player_info'][1]['name'],
player_two['name'],
"Player name was not parsed")
def testExtractPlayersWarnings(self):
"""Test extract list of players that have warnings"""
# player data to extract
player_one = {'name': "ex. p1",
'email': "testImportPlayersOne@mlsb.ca",
'gender': "M"}
player_two = {'name': "p2",
'email': "testImportPlayersTwo@mlsb.ca",
'gender': "F"}
players = [[player_one['email'],
player_one['name'],
player_one['gender']],
[player_two['email'],
player_two['name'],
player_two['gender'],
"Extra Row"]]
# extract the two players
lookup = {"email": 0, "name": 1, "gender": 2}
result = extract_players(players, lookup)
# should have two players
self.assertEqual(len(result['player_info']),
0,
"Some player was not extracted")
# should have no warnings
self.assertEqual(len(result['warnings']),
2,
"Unexpected wanring when extracting players")
def testExtractColumnIndicesLookup(self):
"""Test extracting the lookup for fields to columns indices"""
# simple working example
header = ["Email", "name", "GeNdEr"]
lookup = extract_column_indices_lookup(header)
self.assertEqual(0, lookup['email'], "Did not extract email header")
self.assertEqual(1, lookup['name'], "Did not extract name header")
self.assertEqual(2, lookup['gender'], "Did not extract gender header")
try:
header = ["Email", "name"]
lookup = extract_column_indices_lookup(header)
self.assertTrue(False, "Should have raised exception")
except InvalidField:
pass
try:
header = ["Email", "gender"]
lookup = extract_column_indices_lookup(header)
self.assertTrue(False, "Should have raised exception")
except InvalidField:
pass
try:
header = ["name", "gender"]
lookup = extract_column_indices_lookup(header)
self.assertTrue(False, "Should have raised exception")
except InvalidField:
pass
class TestTeamImportExtractBackground(TestSetup):
def testExtractBackgroundErrors(self):
"""Test that errors are raised for incomplete background """
# some date to use through out test
sponsor = "TTIEB Non-existent sponsor"
color = "Some Color"
captain = "TTIEB Non-existent player"
league = "TTIEB Non-existent league"
# missing background values
try:
extract_background({})
self.assertTrue(False, "Expecting exception raised")
except InvalidField:
pass
# given league example
background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': "ex. League Example"}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except InvalidField:
pass
# given captain example
background = {'sponsor': sponsor,
'color': color,
'captain': "ex. captain",
'league': league}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except InvalidField:
pass
# given color example
background = {'sponsor': sponsor,
'color': "ex. color",
'captain': captain,
'league': league}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except InvalidField:
pass
# given sponsor example
background = {'sponsor': sponsor,
'color': "ex. color",
'captain': captain,
'league': league}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except InvalidField:
pass
def testExtractBackgroundCantFindSponsor(self):
"""Test extract background when cant find sponsor"""
# some date to use through out test
league = "TTIEB Non-existent league"
self.add_league(league)
sponsor = "TTIEB Non-existent sponsor"
color = "Some Color"
captain = "TTIEB Non-existent player"
background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except SponsorDoesNotExist:
pass
def testExtractBackgroundCantFindLeague(self):
""" Test extract background when cant find league"""
# some date to use through out test
league = "TTIEB Non-existent league"
sponsor = "TTIEB Non-existent sponsor"
self.add_sponsor(sponsor)
color = "Some Color"
captain = "TTIEB Non-existent player"
background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
try:
extract_background(background)
self.assertTrue(False, "Expecting exception raised")
except LeagueDoesNotExist:
pass
def testExtractBackgroundNewTeam(self):
"""Test extract background for a new team"""
# some date to use through out test
league = "TTIEB Non-existent league"
sponsor = "TTIEB Non-existent sponsor"
self.add_sponsor(sponsor)
self.add_league(league)
color = "Some Color"
captain = "TTIEB Non-existent player"
background = {'sponsor': sponsor,
'color': color,
'captain': captain,
'league': league}
# extract the background
result = extract_background(background)
# make sure the values match what was given
self.assertEqual(result['sponsor']['sponsor_name'],
sponsor,
"Extracted wrong sponsor")
self.assertEqual(result['league']['league_name'],
league,
"Extracted wrong league")
self.assertEqual(result['team']['color'],
color,
"Extracted wrong color")
self.assertEqual(result['captain']['player_name'],
captain,
"Extract wrong captain")
def testExtractBackgroundExistingTeam(self):
"""Test extract background for an existing team"""
# some date to use through out test
league_name = "TTIEB Non-existent league"
sponsor_name = "TTIEB Non-existent sponsor"
color = "Some Color"
sponsor = self.add_sponsor(sponsor_name)
league = self.add_league(league_name)
team = self.add_team(color, sponsor, league, date.today().year)
captain = "TTIEB Non-existent player"
background = {'sponsor': sponsor_name,
'color': color,
'captain': captain,
'league': league_name}
# extract the background
result = extract_background(background)
# make sure the values match what was given
self.assertEqual(result['sponsor']['sponsor_name'],
sponsor_name,
"Extracted wrong sponsor")
self.assertEqual(result['league']['league_name'],
league_name,
"Extracted wrong league")
self.assertEqual(result['team']['color'],
color,
"Extracted wrong color")
self.assertEqual(result['team']['team_id'],
team["team_id"],
"Extracted wrong existing team")
self.assertEqual(result['captain']['player_name'],
captain,
"Extract wrong captain")
class TestTeamImportAddTeam(TestSetup):
def testAddTeamAlreadyExists(self):
"""Import a team that already exists"""
# the testing lines
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = ["{}:,{},".format(BACKGROUND['sponsor_name'], sponsor),
"{}:,{},".format(BACKGROUND['team_color'], color),
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"{}:,{},".format(BACKGROUND['league_name'], league),
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"Test Captain,testcaptainimport@mlsb.ca,M",
"Test Girl,testgirlimport@mlsb.ca,F",
"Test Boy,testboyimport@mlsb.ca,M"]
# added the needed background
sponsor = self.add_sponsor(sponsor)
league = self.add_league(league)
team = self.add_team(color, sponsor, league, date.today().year)
# import the a test team
importer = TeamList(lines, session=TestImportMockSession(self))
importer.add_team_functional()
self.assertEqual(importer.warnings, [], "Importing team gave warnings")
team = Team.query.get(team['team_id'])
self.assertEqual(len(team.players),
3,
"Importing team players were not created")
def testAddTeam(self):
"""Import a team that already exists"""
# the testing lines
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
lines = ["{}:,{},".format(BACKGROUND['sponsor_name'], sponsor),
"{}:,{},".format(BACKGROUND['team_color'], color),
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"{}:,{},".format(BACKGROUND['league_name'], league),
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"Test Captain,testcaptainimport@mlsb.ca,M",
"Test Girl,testgirlimport@mlsb.ca,F",
"Test Boy,testboyimport@mlsb.ca,M"]
# added the needed background
sponsor = self.add_sponsor(sponsor)
league = self.add_league(league)
# import the a test team
importer = TeamList(lines, session=TestImportMockSession(self))
importer.add_team_functional()
self.assertEqual(importer.warnings, [], "Importing team gave warnings")
teams = (Team.query
.filter(func.lower(Team.color) == func.lower(color))
.filter(Team.sponsor_id == sponsor['sponsor_id'])
.filter(Team.year == date.today().year)).all()
self.assertTrue(len(teams) > 0, "Import team was not created")
team = teams[0]
self.assertEqual(len(team.players),
3,
"Importing team players were not created")
def testAddTeamPlayerAlreadyExists(self):
"""Import a team where one player already exists"""
# the testing lines
sponsor = "Test Import Sponsor"
color = "Blue"
captain = "Test Captain"
league = "Test Import League"
player_email = "testgirlimport@mlsb.ca"
player_name = "Test Girl"
player_gender = "F"
lines = ["{}:,{},".format(BACKGROUND['sponsor_name'], sponsor),
"{}:,{},".format(BACKGROUND['team_color'], color),
"{}:,{},".format(BACKGROUND['captain_name'], captain),
"{}:,{},".format(BACKGROUND['league_name'], league),
"{},{},{}".format(HEADERS['name'],
HEADERS['email'],
HEADERS['gender']),
"Test Captain,testcaptainimport@mlsb.ca,M",
"{},{},{}".format(player_name, player_email, player_gender)]
# added the needed background
sponsor = self.add_sponsor(sponsor)
league = self.add_league(league)
player = self.add_player(player_name,
player_email,
gender=player_gender)
# import the a test team
importer = TeamList(lines, session=TestImportMockSession(self))
importer.add_team_functional()
self.assertEqual(importer.warnings, [], "Importing team gave warnings")
teams = (Team.query
.filter(func.lower(Team.color) == func.lower(color))
.filter(Team.sponsor_id == sponsor['sponsor_id'])
.filter(Team.year == date.today().year)).all()
self.assertTrue(len(teams) > 0, "Import team was not created")
team = teams[0]
self.assertEqual(len(team.players),
2,
"Importing team players were not created")
player_ids = [p.id for p in team.players]
self.assertTrue(player['player_id'] in player_ids,
"Import team existing player not added")
| |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
import pandas as pd
# this is similar to ContrastResults after t_test, copied and adjusted
class PredictionResults(object):
"""
Results class for predictions.
Parameters
----------
predicted_mean : ndarray
The array containing the prediction means.
var_pred_mean : ndarray
The array of the variance of the prediction means.
var_resid : ndarray
The array of residual variances.
df : int
The degree of freedom used if dist is 't'.
dist : {'norm', 't', object}
Either a string for the normal or t distribution or another object
that exposes a `ppf` method.
row_labels : list[str]
Row labels used in summary frame.
"""
def __init__(self, predicted_mean, var_pred_mean, var_resid,
df=None, dist=None, row_labels=None):
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
def conf_int(self, obs=False, alpha=0.05):
"""
Returns the confidence interval of the value, `effect` of the
constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
se = self.se_obs if obs else self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
return np.column_stack((lower, upper))
def summary_frame(self, alpha=0.05):
# TODO: finish and cleanup
ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha, obs=False)
to_include = {}
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
to_include['obs_ci_lower'] = ci_obs[:, 0]
to_include['obs_ci_upper'] = ci_obs[:, 1]
self.table = to_include
# pandas dict does not handle 2d_array
# data = np.column_stack(list(to_include.values()))
# names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction(self, exog=None, transform=True, weights=None,
row_labels=None, pred_kwds=None):
"""
Compute prediction results.
Parameters
----------
exog : array_like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
row_labels : list
A list of row labels to use. If not provided, read `exog` is
available.
**kwargs
Some models can take additional keyword arguments, see the predict
method of the model for the details.
Returns
-------
linear_model.PredictionResults
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
# prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
if isinstance(exog, pd.Series):
# GH-6509
exog = pd.DataFrame(exog)
exog = dmatrix(self.model.data.design_info, exog)
if exog is not None:
if row_labels is None:
row_labels = getattr(exog, 'index', None)
if callable(row_labels):
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1:
# Params informs whether a row or column vector
if self.params.shape[0] > 1:
exog = exog[None, :]
else:
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
if pred_kwds is None:
pred_kwds = {}
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
var_pred_mean = (exog * np.dot(covb, exog.T).T).sum(1)
var_resid = self.scale # self.mse_resid / weights
# TODO: check that we have correct scale, Refactor scale #???
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale']
if weights is not None:
var_resid /= weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels)
| |
import time
from datetime import date, timedelta
import pytest
from django.conf import settings
from django.contrib.auth.tokens import (
PasswordResetTokenGenerator, default_token_generator,
)
from django.core import mail as djmail
from django.test import TestCase
from django_otp.oath import TOTP
from django_otp.plugins.otp_totp.models import TOTPDevice
from u2flib_server.jsapi import JSONDict
from pretix.base.models import U2FDevice, User
class LoginFormTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
def test_wrong_credentials(self):
response = self.client.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'foo',
})
self.assertEqual(response.status_code, 200)
def test_correct_credentials(self):
response = self.client.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
def test_inactive_account(self):
self.user.is_active = False
self.user.save()
response = self.client.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 200)
def test_redirect(self):
response = self.client.post('/control/login?next=/control/events/', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
def test_redirect_to_2fa(self):
self.user.require_2fa = True
self.user.save()
response = self.client.post('/control/login?next=/control/events/', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/login/2fa?next=/control/events/', response['Location'])
assert self.client.session['pretix_auth_2fa_user'] == self.user.pk
assert 'pretix_auth_2fa_time' in self.client.session
def test_logged_in(self):
response = self.client.post('/control/login?next=/control/events/', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
response = self.client.get('/control/login')
self.assertEqual(response.status_code, 302)
response = self.client.get('/control/login?next=/control/events/')
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
def test_logout(self):
response = self.client.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
response = self.client.get('/control/logout')
self.assertEqual(response.status_code, 302)
response = self.client.get('/control/login')
self.assertEqual(response.status_code, 200)
class RegistrationFormTest(TestCase):
def test_different_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'foo',
'password_repeat': 'foobar'
})
self.assertEqual(response.status_code, 200)
def test_user_attribute_similarity_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'dummydummy',
'password_repeat': 'dummydummy'
})
self.assertEqual(response.status_code, 200)
def test_short_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'foobar',
'password_repeat': 'foobar'
})
self.assertEqual(response.status_code, 200)
def test_common_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'password',
'password_repeat': 'password'
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'football',
'password_repeat': 'football'
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'jennifer',
'password_repeat': 'jennifer'
})
self.assertEqual(response.status_code, 200)
def test_numeric_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': '12345678',
'password_repeat': '12345678'
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': '23423523452345235',
'password_repeat': '23423523452345235'
})
self.assertEqual(response.status_code, 200)
def test_empty_passwords(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': '',
'password_repeat': ''
})
self.assertEqual(response.status_code, 200)
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'foobarbar',
'password_repeat': ''
})
self.assertEqual(response.status_code, 200)
def test_email_duplicate(self):
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy')
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'foobarbar',
'password_repeat': 'foobarbar'
})
self.assertEqual(response.status_code, 200)
def test_success(self):
response = self.client.post('/control/register', {
'email': 'dummy@dummy.dummy',
'password': 'foobarbar',
'password_repeat': 'foobarbar'
})
self.assertEqual(response.status_code, 302)
@pytest.fixture
def class_monkeypatch(request, monkeypatch):
request.cls.monkeypatch = monkeypatch
@pytest.mark.usefixtures("class_monkeypatch")
class Login2FAFormTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy', require_2fa=True)
session = self.client.session
session['pretix_auth_2fa_user'] = self.user.pk
session['pretix_auth_2fa_time'] = str(int(time.time()))
session.save()
def test_invalid_session(self):
session = self.client.session
session['pretix_auth_2fa_user'] = self.user.pk + 12
session['pretix_auth_2fa_time'] = str(int(time.time()))
session.save()
response = self.client.get('/control/login/2fa')
self.assertEqual(response.status_code, 302)
self.assertIn('/control/login', response['Location'])
def test_expired_session(self):
session = self.client.session
session['pretix_auth_2fa_user'] = self.user.pk + 12
session['pretix_auth_2fa_time'] = str(int(time.time()) - 3600)
session.save()
response = self.client.get('/control/login/2fa')
self.assertEqual(response.status_code, 302)
self.assertIn('/control/login', response['Location'])
def test_totp_invalid(self):
response = self.client.get('/control/login/2fa')
assert 'token' in response.rendered_content
d = TOTPDevice.objects.create(user=self.user, name='test')
totp = TOTP(d.bin_key, d.step, d.t0, d.digits, d.drift)
totp.time = time.time()
response = self.client.post('/control/login/2fa'.format(d.pk), {
'token': str(totp.token() + 2)
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/login/2fa', response['Location'])
def test_totp_valid(self):
response = self.client.get('/control/login/2fa')
assert 'token' in response.rendered_content
d = TOTPDevice.objects.create(user=self.user, name='test')
totp = TOTP(d.bin_key, d.step, d.t0, d.digits, d.drift)
totp.time = time.time()
response = self.client.post('/control/login/2fa?next=/control/events/'.format(d.pk), {
'token': str(totp.token())
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
def test_u2f_invalid(self):
def fail(*args, **kwargs):
raise Exception("Failed")
m = self.monkeypatch
m.setattr("u2flib_server.u2f.verify_authenticate", fail)
m.setattr("u2flib_server.u2f.start_authenticate",
lambda *args, **kwargs: JSONDict({'authenticateRequests': []}))
d = U2FDevice.objects.create(user=self.user, name='test', json_data="{}")
response = self.client.get('/control/login/2fa')
assert 'token' in response.rendered_content
response = self.client.post('/control/login/2fa'.format(d.pk), {
'token': '{"response": "true"}'
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/login/2fa', response['Location'])
m.undo()
def test_u2f_valid(self):
m = self.monkeypatch
m.setattr("u2flib_server.u2f.verify_authenticate", lambda *args, **kwargs: True)
m.setattr("u2flib_server.u2f.start_authenticate",
lambda *args, **kwargs: JSONDict({'authenticateRequests': []}))
d = U2FDevice.objects.create(user=self.user, name='test', json_data="{}")
response = self.client.get('/control/login/2fa')
assert 'token' in response.rendered_content
response = self.client.post('/control/login/2fa'.format(d.pk), {
'token': '{"response": "true"}'
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/', response['Location'])
m.undo()
class PasswordRecoveryFormTest(TestCase):
def setUp(self):
super().setUp()
self.user = User.objects.create_user('demo@demo.dummy', 'demo')
def test_unknown(self):
response = self.client.post('/control/forgot', {
'email': 'dummy@dummy.dummy',
})
self.assertEqual(response.status_code, 200)
def test_email_sent(self):
djmail.outbox = []
response = self.client.post('/control/forgot', {
'email': 'demo@demo.dummy',
})
self.assertEqual(response.status_code, 302)
assert len(djmail.outbox) == 1
assert djmail.outbox[0].to == [self.user.email]
assert "recover?id=%d&token=" % self.user.id in djmail.outbox[0].body
def test_recovery_unknown_user(self):
response = self.client.get('/control/forgot/recover?id=0&token=foo')
self.assertEqual(response.status_code, 302)
response = self.client.post(
'/control/forgot/recover?id=0&token=foo',
{
'password': 'foobar',
'password_repeat': 'foobar'
}
)
self.assertEqual(response.status_code, 302)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_invalid_token(self):
response = self.client.get('/control/forgot/recover?id=%d&token=foo' % self.user.id)
self.assertEqual(response.status_code, 302)
response = self.client.post(
'/control/forgot/recover?id=%d&token=foo' % self.user.id,
{
'password': 'foobarbar',
'password_repeat': 'foobarbar'
}
)
self.assertEqual(response.status_code, 302)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_expired_token(self):
class Mocked(PasswordResetTokenGenerator):
def _today(self):
return date.today() - timedelta(settings.PASSWORD_RESET_TIMEOUT_DAYS + 1)
generator = Mocked()
token = generator.make_token(self.user)
response = self.client.get(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token)
)
self.assertEqual(response.status_code, 302)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'foobarbar',
'password_repeat': 'foobarbar'
}
)
self.assertEqual(response.status_code, 302)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_success(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'foobarbar',
'password_repeat': 'foobarbar'
}
)
self.assertEqual(response.status_code, 302)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('foobarbar'))
def test_recovery_valid_token_empty_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'foobarbar',
'password_repeat': ''
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': '',
'password_repeat': 'foobarbar'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_different_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'foo',
'password_repeat': 'foobar'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_user_attribute_similarity_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'dummydemo',
'password_repeat': 'dummydemo'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_short_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'foobar',
'password_repeat': 'foobar'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_common_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': 'football',
'password_repeat': 'football'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
def test_recovery_valid_token_numeric_passwords(self):
token = default_token_generator.make_token(self.user)
response = self.client.get('/control/forgot/recover?id=%d&token=%s' % (self.user.id, token))
self.assertEqual(response.status_code, 200)
response = self.client.post(
'/control/forgot/recover?id=%d&token=%s' % (self.user.id, token),
{
'password': '12345678',
'password_repeat': '12345678'
}
)
self.assertEqual(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertTrue(self.user.check_password('demo'))
| |
# -*- coding: utf-8 -*-
"""Multivariate Normal and t distributions
Created on Sat May 28 15:38:23 2011
@author: Josef Perktold
TODO:
* renaming,
- after adding t distribution, cov doesn't make sense for Sigma DONE
- should mean also be renamed to mu, if there will be distributions
with mean != mu
* not sure about corner cases
- behavior with (almost) singular sigma or transforms
- df <= 2, is everything correct if variance is not finite or defined ?
* check to return possibly univariate distribution for marginals or conditional
distributions, does univariate special case work? seems ok for conditional
* are all the extra transformation methods useful outside of testing ?
- looks like I have some mixup in definitions of standardize, normalize
* new methods marginal, conditional, ... just added, typos ?
- largely tested for MVNormal, not yet for MVT DONE
* conditional: reusing, vectorizing, should we reuse a projection matrix or
allow for a vectorized, conditional_mean similar to OLS.predict
* add additional things similar to LikelihoodModelResults? quadratic forms,
F distribution, and others ???
* add Delta method for nonlinear functions here, current function is hidden
somewhere in miscmodels
* raise ValueErrors for wrong input shapes, currently only partially checked
* quantile method (ppf for equal bounds for multiple testing) is missing
http://svitsrv25.epfl.ch/R-doc/library/mvtnorm/html/qmvt.html seems to use
just a root finder for inversion of cdf
* normalize has ambiguous definition, and mixing it up in different versions
std from sigma or std from cov ?
I would like to get what I need for mvt-cdf, or not
univariate standard t distribution has scale=1 but std>1
FIXED: add std_sigma, and normalize uses std_sigma
* more work: bivariate distributions,
inherit from multivariate but overwrite some methods for better efficiency,
e.g. cdf and expect
I kept the original MVNormal0 class as reference, can be deleted
See Also
--------
sandbox/examples/ex_mvelliptical.py
Examples
--------
Note, several parts of these examples are random and the numbers will not be
(exactly) the same.
>>> import numpy as np
>>> import statsmodels.sandbox.distributions.mv_normal as mvd
>>>
>>> from numpy.testing import assert_array_almost_equal
>>>
>>> cov3 = np.array([[ 1. , 0.5 , 0.75],
... [ 0.5 , 1.5 , 0.6 ],
... [ 0.75, 0.6 , 2. ]])
>>> mu = np.array([-1, 0.0, 2.0])
multivariate normal distribution
--------------------------------
>>> mvn3 = mvd.MVNormal(mu, cov3)
>>> mvn3.rvs(size=3)
array([[-0.08559948, -1.0319881 , 1.76073533],
[ 0.30079522, 0.55859618, 4.16538667],
[-1.36540091, -1.50152847, 3.87571161]])
>>> mvn3.std
array([ 1. , 1.22474487, 1.41421356])
>>> a = [0.0, 1.0, 1.5]
>>> mvn3.pdf(a)
0.013867410439318712
>>> mvn3.cdf(a)
0.31163181123730122
Monte Carlo integration
>>> mvn3.expect_mc(lambda x: (x<a).all(-1), size=100000)
0.30958999999999998
>>> mvn3.expect_mc(lambda x: (x<a).all(-1), size=1000000)
0.31197399999999997
multivariate t distribution
---------------------------
>>> mvt3 = mvd.MVT(mu, cov3, 4)
>>> mvt3.rvs(size=4)
array([[-0.94185437, 0.3933273 , 2.40005487],
[ 0.07563648, 0.06655433, 7.90752238],
[ 1.06596474, 0.32701158, 2.03482886],
[ 3.80529746, 7.0192967 , 8.41899229]])
>>> mvt3.pdf(a)
0.010402959362646937
>>> mvt3.cdf(a)
0.30269483623249821
>>> mvt3.expect_mc(lambda x: (x<a).all(-1), size=1000000)
0.30271199999999998
>>> mvt3.cov
array([[ 2. , 1. , 1.5],
[ 1. , 3. , 1.2],
[ 1.5, 1.2, 4. ]])
>>> mvt3.corr
array([[ 1. , 0.40824829, 0.53033009],
[ 0.40824829, 1. , 0.34641016],
[ 0.53033009, 0.34641016, 1. ]])
get normalized distribution
>>> mvt3n = mvt3.normalized()
>>> mvt3n.sigma
array([[ 1. , 0.40824829, 0.53033009],
[ 0.40824829, 1. , 0.34641016],
[ 0.53033009, 0.34641016, 1. ]])
>>> mvt3n.cov
array([[ 2. , 0.81649658, 1.06066017],
[ 0.81649658, 2. , 0.69282032],
[ 1.06066017, 0.69282032, 2. ]])
What's currently there?
>>> [i for i in dir(mvn3) if not i[0]=='_']
['affine_transformed', 'cdf', 'cholsigmainv', 'conditional', 'corr', 'cov',
'expect_mc', 'extra_args', 'logdetsigma', 'logpdf', 'marginal', 'mean',
'normalize', 'normalized', 'normalized2', 'nvars', 'pdf', 'rvs', 'sigma',
'sigmainv', 'standardize', 'standardized', 'std', 'std_sigma', 'whiten']
>>> [i for i in dir(mvt3) if not i[0]=='_']
['affine_transformed', 'cdf', 'cholsigmainv', 'corr', 'cov', 'df', 'expect_mc',
'extra_args', 'logdetsigma', 'logpdf', 'marginal', 'mean', 'normalize',
'normalized', 'normalized2', 'nvars', 'pdf', 'rvs', 'sigma', 'sigmainv',
'standardize', 'standardized', 'std', 'std_sigma', 'whiten']
"""
import numpy as np
from statsmodels.sandbox.distributions.multivariate import (
mvstdtprob, mvstdnormcdf, mvnormcdf)
def expect_mc(dist, func=lambda x: 1, size=50000):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
number of random samples to use in the Monte Carlo integration,
Notes
-----
this doesn't batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
integrate probability that both observations are negative
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc(mvn, lambda x: (x<np.array([0,0])).all(-1), size=100000)
0.25306000000000001
get tail probabilities of marginal distribution (should be 0.1)
>>> c = stats.norm.isf(0.05, scale=np.sqrt(2.))
>>> expect_mc(mvn, lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09969, 0.0986 ])
or calling the method
>>> mvn.expect_mc(lambda x: (np.abs(x)>np.array([c, c])), size=100000)
array([ 0.09937, 0.10075])
'''
def fun(x):
return func(x) # * dist.pdf(x)
rvs = dist.rvs(size=size)
return fun(rvs).mean(0)
def expect_mc_bounds(dist, func=lambda x: 1, size=50000, lower=None, upper=None,
conditional=False, overfact=1.2):
'''calculate expected value of function by Monte Carlo integration
Parameters
----------
dist : distribution instance
needs to have rvs defined as a method for drawing random numbers
func : callable
function for which expectation is calculated, this function needs to
be vectorized, integration is over axis=0
size : int
minimum number of random samples to use in the Monte Carlo integration,
the actual number used can be larger because of oversampling.
lower : None or array_like
lower integration bounds, if None, then it is set to -inf
upper : None or array_like
upper integration bounds, if None, then it is set to +inf
conditional : bool
If true, then the expectation is conditional on being in within
[lower, upper] bounds, otherwise it is unconditional
overfact : float
oversampling factor, the actual number of random variables drawn in
each attempt are overfact * remaining draws. Extra draws are also
used in the integration.
Notes
-----
this doesn't batch
Returns
-------
expected value : ndarray
return of function func integrated over axis=0 by MonteCarlo, this will
have the same shape as the return of func without axis=0
Examples
--------
>>> mvn = mve.MVNormal([0,0],2.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.ones(x.shape[0]),
lower=[-10,-10],upper=[0,0])
0.24990416666666668
get 3 marginal moments with one integration
>>> mvn = mve.MVNormal([0,0],1.)
>>> mve.expect_mc_bounds(mvn, lambda x: np.dstack([x, x**2, x**3, x**4]),
lower=[-np.inf,-np.inf], upper=[np.inf,np.inf])
array([[ 2.88629497e-03, 9.96706297e-01, -2.51005344e-03,
2.95240921e+00],
[ -5.48020088e-03, 9.96004409e-01, -2.23803072e-02,
2.96289203e+00]])
>>> from scipy import stats
>>> [stats.norm.moment(i) for i in [1,2,3,4]]
[0.0, 1.0, 0.0, 3.0]
'''
#call rvs once to find length of random vector
rvsdim = dist.rvs(size=1).shape[-1]
if lower is None:
lower = -np.inf * np.ones(rvsdim)
else:
lower = np.asarray(lower)
if upper is None:
upper = np.inf * np.ones(rvsdim)
else:
upper = np.asarray(upper)
def fun(x):
return func(x) # * dist.pdf(x)
rvsli = []
used = 0 #remain = size #inplace changes size
total = 0
while True:
remain = size - used #just a temp variable
rvs = dist.rvs(size=int(remain * overfact))
total += int(size * overfact)
rvsok = rvs[((rvs >= lower) & (rvs <= upper)).all(-1)]
#if rvsok.ndim == 1: #possible shape problems if only 1 random vector
rvsok = np.atleast_2d(rvsok)
used += rvsok.shape[0]
rvsli.append(rvsok) #[:remain]) use extras instead
print used
if used >= size: break
rvs = np.vstack(rvsli)
print rvs.shape
assert used == rvs.shape[0] #saftey check
mean_conditional = fun(rvs).mean(0)
if conditional:
return mean_conditional
else:
return mean_conditional * (used * 1. / total)
def bivariate_normal(x, mu, cov):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
X, Y = np.transpose(x)
mux, muy = mu
sigmax, sigmaxy, tmp, sigmay = np.ravel(cov)
sigmax, sigmay = np.sqrt(sigmax), np.sqrt(sigmay)
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
class BivariateNormal(object):
#TODO: make integration limits more flexible
# or normalize before integration
def __init__(self, mean, cov):
self.mean = mu
self.cov = cov
self.sigmax, self.sigmaxy, tmp, self.sigmay = np.ravel(cov)
self.nvars = 2
def rvs(self, size=1):
return np.random.multivariate_normal(self.mean, self.cov, size=size)
def pdf(self, x):
return bivariate_normal(x, self.mean, self.cov)
def logpdf(self, x):
#TODO: replace this
return np.log(self.pdf(x))
def cdf(self, x):
return self.expect(upper=x)
def expect(self, func=lambda x: 1, lower=(-10,-10), upper=(10,10)):
def fun(x, y):
x = np.column_stack((x,y))
return func(x) * self.pdf(x)
from scipy.integrate import dblquad
return dblquad(fun, lower[0], upper[0], lambda y: lower[1],
lambda y: upper[1])
def kl(self, other):
'''Kullback-Leibler divergence between this and another distribution
int f(x) (log f(x) - log g(x)) dx
where f is the pdf of self, and g is the pdf of other
uses double integration with scipy.integrate.dblquad
limits currently hardcoded
'''
fun = lambda x : self.logpdf(x) - other.logpdf(x)
return self.expect(fun)
def kl_mc(self, other, size=500000):
fun = lambda x : self.logpdf(x) - other.logpdf(x)
rvs = self.rvs(size=size)
return fun(rvs).mean()
class MVElliptical(object):
'''Base Class for multivariate elliptical distributions, normal and t
contains common initialization, and some common methods
subclass needs to implement at least rvs and logpdf methods
'''
#getting common things between normal and t distribution
def __init__(self, mean, sigma, *args, **kwds):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
self.extra_args = []
self.mean = np.asarray(mean)
self.sigma = sigma = np.asarray(sigma)
sigma = np.squeeze(sigma)
self.nvars = nvars = len(mean)
#self.covchol = np.linalg.cholesky(sigma)
#in the following sigma is original, self.sigma is full matrix
if sigma.shape == ():
#iid
self.sigma = np.eye(nvars) * sigma
self.sigmainv = np.eye(nvars) / sigma
self.cholsigmainv = np.eye(nvars) / np.sqrt(sigma)
elif (sigma.ndim == 1) and (len(sigma) == nvars):
#independent heteroscedastic
self.sigma = np.diag(sigma)
self.sigmainv = np.diag(1. / sigma)
self.cholsigmainv = np.diag( 1. / np.sqrt(sigma))
elif sigma.shape == (nvars, nvars): #python tuple comparison
#general
self.sigmainv = np.linalg.pinv(sigma)
self.cholsigmainv = np.linalg.cholesky(self.sigmainv).T
else:
raise ValueError('sigma has invalid shape')
#store logdetsigma for logpdf
self.logdetsigma = np.log(np.linalg.det(self.sigma))
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
'''
raise NotImplementedError
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
raise NotImplementedError
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
raise NotImplementedError
def affine_transformed(self, shift, scale_matrix):
'''affine transformation define in subclass because of distribution
specific restrictions'''
#implemented in subclass at least for now
raise NotImplementedError
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholsigmainv.T)
Notes
-----
This only does rescaling, it doesn't subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
return np.dot(x, self.cholsigmainv.T)
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x))
def standardize(self, x):
'''standardize the random variable, i.e. subtract mean and whiten
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x - self.mean, self.cholsigmainv.T)
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
return self.whiten(x - self.mean)
def standardized(self):
'''return new standardized MVNormal instance
'''
return self.affine_transformed(-self.mean, self.cholsigmainv)
def normalize(self, x):
'''normalize the random variable, i.e. subtract mean and rescale
The distribution will have zero mean and sigma equal to correlation
Parameters
-----------
x : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
(x - self.mean)/std_sigma
Notes
-----
See Also
--------
whiten : rescale random variable, standardize without subtracting mean.
'''
std_ = np.atleast_2d(self.std_sigma)
return (x - self.mean)/std_ #/std_.T
def normalized(self, demeaned=True):
'''return a normalized distribution where sigma=corr
if demeaned is True, then mean will be set to zero
'''
if demeaned:
mean_new = np.zeros_like(self.mean)
else:
mean_new = self.mean / self.std_sigma
sigma_new = self.corr
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args)
def normalized2(self, demeaned=True):
'''return a normalized distribution where sigma=corr
second implementation for testing affine transformation
'''
if demeaned:
shift = -self.mean
else:
shift = self.mean * (1. / self.std_sigma - 1.)
return self.affine_transformed(shift, np.diag(1. / self.std_sigma))
#the following "standardizes" cov instead
#return self.affine_transformed(shift, self.cholsigmainv)
@property
def std(self):
'''standard deviation, square root of diagonal elements of cov
'''
return np.sqrt(np.diag(self.cov))
@property
def std_sigma(self):
'''standard deviation, square root of diagonal elements of sigma
'''
return np.sqrt(np.diag(self.sigma))
@property
def corr(self):
'''correlation matrix'''
return self.cov / np.outer(self.std, self.std)
expect_mc = expect_mc
def marginal(self, indices):
'''return marginal distribution for variables given by indices
this should be correct for normal and t distribution
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
Returns
-------
mvdist : instance
new instance of the same multivariate distribution class that
contains the marginal distribution of the variables given in
indices
'''
indices = np.asarray(indices)
mean_new = self.mean[indices]
sigma_new = self.sigma[indices[:,None], indices]
args = [getattr(self, ea) for ea in self.extra_args]
return self.__class__(mean_new, sigma_new, *args)
#parts taken from linear_model, but heavy adjustments
class MVNormal0(object):
'''Class for Multivariate Normal Distribution
original full version, kept for testing, new version inherits from
MVElliptical
uses Cholesky decomposition of covariance matrix for the transformation
of the data
'''
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov = np.asarray(cov)
cov = np.squeeze(cov)
self.nvars = nvars = len(mean)
#in the following cov is original, self.cov is full matrix
if cov.shape == ():
#iid
self.cov = np.eye(nvars) * cov
self.covinv = np.eye(nvars) / cov
self.cholcovinv = np.eye(nvars) / np.sqrt(cov)
elif (cov.ndim == 1) and (len(cov) == nvars):
#independent heteroscedastic
self.cov = np.diag(cov)
self.covinv = np.diag(1. / cov)
self.cholcovinv = np.diag( 1. / np.sqrt(cov))
elif cov.shape == (nvars, nvars): #python tuple comparison
#general
self.covinv = np.linalg.pinv(cov)
self.cholcovinv = np.linalg.cholesky(self.covinv).T
else:
raise ValueError('cov has invalid shape')
#store logdetcov for logpdf
self.logdetcov = np.log(np.linalg.det(self.cov))
def whiten(self, x):
"""
whiten the data by linear transformation
Parameters
-----------
X : array-like, 1d or 2d
Data to be whitened, if 2d then each row contains an independent
sample of the multivariate random vector
Returns
-------
np.dot(x, self.cholcovinv.T)
Notes
-----
This only does rescaling, it doesn't subtract the mean, use standardize
for this instead
See Also
--------
standardize : subtract mean and rescale to standardized random variable.
"""
x = np.asarray(x)
if np.any(self.cov):
#return np.dot(self.cholcovinv, x)
return np.dot(x, self.cholcovinv.T)
else:
return x
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.cov, size=size)
def pdf(self, x):
'''probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
pdf : float or array
probability density value of each random vector
'''
return np.exp(self.logpdf(x))
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetcov
llf *= 0.5
return llf
expect_mc = expect_mc
class MVNormal(MVElliptical):
'''Class for Multivariate Normal Distribution
uses Cholesky decomposition of covariance matrix for the transformation
of the data
'''
__name__ == 'Multivariate Normal Distribution'
def rvs(self, size=1):
'''random variable
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
Notes
-----
uses numpy.random.multivariate_normal directly
'''
return np.random.multivariate_normal(self.mean, self.sigma, size=size)
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
this should be made to work with 2d x,
with multivariate normal vector in each row and iid across rows
doesn't work now because of dot in whiten
'''
x = np.asarray(x)
x_whitened = self.whiten(x - self.mean)
SSR = np.sum(x_whitened**2, -1)
llf = -SSR
llf -= self.nvars * np.log(2. * np.pi)
llf -= self.logdetsigma
llf *= 0.5
return llf
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
#lower = -np.inf * np.ones_like(x)
#return mvstdnormcdf(lower, self.standardize(x), self.corr, **kwds)
return mvnormcdf(x, self.mean, self.cov, **kwds)
@property
def cov(self):
'''covariance matrix'''
return self.sigma
def affine_transformed(self, shift, scale_matrix):
'''return distribution of an affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
the affine transformation is defined by
y = a + B x
where a is shift,
B is a scale matrix for the linear transformation
Notes
-----
This should also work to select marginal distributions, but not
tested for this case yet.
currently only tested because it's called by standardized
'''
B = scale_matrix #tmp variable
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVNormal(mean_new, sigma_new)
def conditional(self, indices, values):
'''return conditional distribution
indices are the variables to keep, the complement is the conditioning
set
values are the values of the conditioning variables
\bar{\mu} = \mu_1 + \Sigma_{12} \Sigma_{22}^{-1} \left( a - \mu_2 \right)
and covariance matrix
\overline{\Sigma} = \Sigma_{11} - \Sigma_{12} \Sigma_{22}^{-1} \Sigma_{21}.T
Parameters
----------
indices : array_like, int
list of indices of variables in the marginal distribution
given : array_like
values of the conditioning variables
Returns
-------
mvn : instance of MVNormal
new instance of the MVNormal class that contains the conditional
distribution of the variables given in indices for given
values of the excluded variables.
'''
#indices need to be nd arrays for broadcasting
keep = np.asarray(indices)
given = np.asarray([i for i in range(self.nvars) if not i in keep])
sigmakk = self.sigma[keep[:, None], keep]
sigmagg = self.sigma[given[:, None], given]
sigmakg = self.sigma[keep[:, None], given]
sigmagk = self.sigma[given[:, None], keep]
sigma_new = sigmakk - np.dot(sigmakg, np.linalg.solve(sigmagg, sigmagk))
mean_new = self.mean[keep] + \
np.dot(sigmakg, np.linalg.solve(sigmagg, values-self.mean[given]))
# #or
# sig = np.linalg.solve(sigmagg, sigmagk).T
# mean_new = self.mean[keep] + np.dot(sigmakg, values-self.mean[given])
# sigma_new = sigmakk - np.dot(sigmakg, sig)
return MVNormal(mean_new, sigma_new)
from scipy import special
#redefine some shortcuts
np_log = np.log
np_pi = np.pi
sps_gamln = special.gammaln
class MVT(MVElliptical):
__name__ == 'Multivariate Student T Distribution'
def __init__(self, mean, sigma, df):
'''initialize instance
Parameters
----------
mean : array_like
parameter mu (might be renamed), for symmetric distributions this
is the mean
sigma : array_like, 2d
dispersion matrix, covariance matrix in normal distribution, but
only proportional to covariance matrix in t distribution
args : list
distribution specific arguments, e.g. df for t distribution
kwds : dict
currently not used
'''
super(MVT, self).__init__(mean, sigma)
self.extra_args = ['df'] #overwrites extra_args of super
self.df = df
def rvs(self, size=1):
'''random variables with Student T distribution
Parameters
----------
size : int or tuple
the number and shape of random variables to draw.
Returns
-------
rvs : ndarray
the returned random variables with shape given by size and the
dimension of the multivariate random vector as additional last
dimension
- TODO: Not sure if this works for size tuples with len>1.
Notes
-----
generated as a chi-square mixture of multivariate normal random
variables.
does this require df>2 ?
'''
from multivariate import multivariate_t_rvs
return multivariate_t_rvs(self.mean, self.sigma, df=self.df, n=size)
def logpdf(self, x):
'''logarithm of probability density function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
Returns
-------
logpdf : float or array
probability density value of each random vector
'''
x = np.asarray(x)
df = self.df
nvars = self.nvars
x_whitened = self.whiten(x - self.mean) #should be float
llf = - nvars * np_log(df * np_pi)
llf -= self.logdetsigma
llf -= (df + nvars) * np_log(1 + np.sum(x_whitened**2,-1) / df)
llf *= 0.5
llf += sps_gamln((df + nvars) / 2.) - sps_gamln(df / 2.)
return llf
def cdf(self, x, **kwds):
'''cumulative distribution function
Parameters
----------
x : array_like
can be 1d or 2d, if 2d, then each row is taken as independent
multivariate random vector
kwds : dict
contains options for the numerical calculation of the cdf
Returns
-------
cdf : float or array
probability density value of each random vector
'''
lower = -np.inf * np.ones_like(x)
#std_sigma = np.sqrt(np.diag(self.sigma))
upper = (x - self.mean)/self.std_sigma
return mvstdtprob(lower, upper, self.corr, self.df, **kwds)
#mvstdtcdf doesn't exist yet
#return mvstdtcdf(lower, x, self.corr, df, **kwds)
@property
def cov(self):
'''covariance matrix
The covariance matrix for the t distribution does not exist for df<=2,
and is equal to sigma * df/(df-2) for df>2
'''
if self.df <= 2:
return np.nan * np.ones_like(self.sigma)
else:
return self.df / (self.df - 2.) * self.sigma
def affine_transformed(self, shift, scale_matrix):
'''return distribution of a full rank affine transform
for full rank scale_matrix only
Parameters
----------
shift : array_like
shift of mean
scale_matrix : array_like
linear transformation matrix
Returns
-------
mvt : instance of MVT
instance of multivariate t distribution given by affine
transformation
Notes
-----
This checks for eigvals<=0, so there are possible problems for cases
with positive eigenvalues close to zero.
see: http://www.statlect.com/mcdstu1.htm
I'm not sure about general case, non-full rank transformation are not
multivariate t distributed.
y = a + B x
where a is shift,
B is full rank scale matrix with same dimension as sigma
'''
#full rank method could also be in elliptical and called with super
#after the rank check
B = scale_matrix #tmp variable as shorthand
if not B.shape == (self.nvars, self.nvars):
if (np.linalg.eigvals(B) <= 0).any():
raise ValueError('affine transform has to be full rank')
mean_new = np.dot(B, self.mean) + shift
sigma_new = np.dot(np.dot(B, self.sigma), B.T)
return MVT(mean_new, sigma_new, self.df)
def quad2d(func=lambda x: 1, lower=(-10,-10), upper=(10,10)):
def fun(x, y):
x = np.column_stack((x,y))
return func(x)
from scipy.integrate import dblquad
return dblquad(fun, lower[0], upper[0], lambda y: lower[1],
lambda y: upper[1])
if __name__ == '__main__':
from numpy.testing import assert_almost_equal, assert_array_almost_equal
examples = ['mvn']
mu = (0,0)
covx = np.array([[1.0, 0.5], [0.5, 1.0]])
mu3 = [-1, 0., 2.]
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
if 'mvn' in examples:
bvn = BivariateNormal(mu, covx)
rvs = bvn.rvs(size=1000)
print rvs.mean(0)
print np.cov(rvs, rowvar=0)
print bvn.expect()
print bvn.cdf([0,0])
bvn1 = BivariateNormal(mu, np.eye(2))
bvn2 = BivariateNormal(mu, 4*np.eye(2))
fun = lambda(x) : np.log(bvn1.pdf(x)) - np.log(bvn.pdf(x))
print bvn1.expect(fun)
print bvn1.kl(bvn2), bvn1.kl_mc(bvn2)
print bvn2.kl(bvn1), bvn2.kl_mc(bvn1)
print bvn1.kl(bvn), bvn1.kl_mc(bvn)
mvn = MVNormal(mu, covx)
mvn.pdf([0,0])
mvn.pdf(np.zeros((2,2)))
#np.dot(mvn.cholcovinv.T, mvn.cholcovinv) - mvn.covinv
cov3 = np.array([[ 1. , 0.5 , 0.75],
[ 0.5 , 1.5 , 0.6 ],
[ 0.75, 0.6 , 2. ]])
mu3 = [-1, 0., 2.]
mvn3 = MVNormal(mu3, cov3)
mvn3.pdf((0., 2., 3.))
mvn3.logpdf((0., 2., 3.))
#comparisons with R mvtnorm::dmvnorm
#decimal=14
# mvn3.logpdf(cov3) - [-7.667977543898155, -6.917977543898155, -5.167977543898155]
# #decimal 18
# mvn3.pdf(cov3) - [0.000467562492721686, 0.000989829804859273, 0.005696077243833402]
# #cheating new mean, same cov
# mvn3.mean = np.array([0,0,0])
# #decimal= 16
# mvn3.pdf(cov3) - [0.02914269740502042, 0.02269635555984291, 0.01767593948287269]
#as asserts
r_val = [-7.667977543898155, -6.917977543898155, -5.167977543898155]
assert_array_almost_equal( mvn3.logpdf(cov3), r_val, decimal = 14)
#decimal 18
r_val = [0.000467562492721686, 0.000989829804859273, 0.005696077243833402]
assert_array_almost_equal( mvn3.pdf(cov3), r_val, decimal = 17)
#cheating new mean, same cov, too dangerous, got wrong instance in tests
#mvn3.mean = np.array([0,0,0])
mvn3c = MVNormal(np.array([0,0,0]), cov3)
r_val = [0.02914269740502042, 0.02269635555984291, 0.01767593948287269]
assert_array_almost_equal( mvn3c.pdf(cov3), r_val, decimal = 16)
mvn3b = MVNormal((0,0,0), 1)
fun = lambda(x) : np.log(mvn3.pdf(x)) - np.log(mvn3b.pdf(x))
print mvn3.expect_mc(fun)
print mvn3.expect_mc(fun, size=200000)
mvt = MVT((0,0), 1, 5)
assert_almost_equal(mvt.logpdf(np.array([0.,0.])), -1.837877066409345,
decimal=15)
assert_almost_equal(mvt.pdf(np.array([0.,0.])), 0.1591549430918953,
decimal=15)
mvt.logpdf(np.array([1.,1.]))-(-3.01552989458359)
mvt1 = MVT((0,0), 1, 1)
mvt1.logpdf(np.array([1.,1.]))-(-3.48579549941151) #decimal=16
rvs = mvt.rvs(100000)
assert_almost_equal(np.cov(rvs, rowvar=0), mvt.cov, decimal=1)
mvt31 = MVT(mu3, cov3, 1)
assert_almost_equal(mvt31.pdf(cov3),
[0.0007276818698165781, 0.0009980625182293658, 0.0027661422056214652],
decimal=18)
mvt = MVT(mu3, cov3, 3)
assert_almost_equal(mvt.pdf(cov3),
[0.000863777424247410, 0.001277510788307594, 0.004156314279452241],
decimal=17)
| |
import logging
"""
This script provides a Duck typed file reader for c3d files. It was
modified from c3d_converter.py Taken from:
http://blog.noisygecko.com/c3d_convert.py
Convert a "c3d" format motion capture file into a
comma separated file.
This code was taken from "c3d_import.py" by
Jean-Baptiste PERIN (jb_perin(at)yahoo.fr)
"""
import __builtin__
import math
import os
import string
import struct
import sys
class Marker:
def __init__(self, x, y, z):
self.x=0.0
self.y=0.0
self.z=0.0
self.Timestamp = 0.0
self.__iterNumber = 0
def __iter__(self):
self.__iterNumber = 0
return self;
def next(self):
self.__iterNumber += 1;
if self.__iterNumber == 1:
return self.x
elif self.__iterNumber == 2:
return self.y
elif self.__iterNumber == 3:
return self.z
elif self.__iterNumber == 4:
return self.Timestamp
else:
#logging.debug('Marker stoping iteration:'+str(self.__iterNumber))
raise StopIteration
def __repr__(self):
return str("x,"+str(self.x) +",y," + str(self.y)+",x,"+\
str(self.z)+",Timestamp,"+str(self.Timestamp)+"]")
class ParameterGroup:
def __init__(self, nom, description, parameter):
self.name = nom
self.description = description
self.parameter = parameter
def __repr__(self):
return self.name, " ", self.description, " ", self.parameter
class Parameter:
def __init__(self, name, datatype, dim, data, description):
self.name = name
self.datatype = datatype
self.dim = dim
self.data = data
self.description = description
def __repr__(self):
return self.name, " ", self.description, " ", self.dim
def getNumber(str, length):
sum = 0
# logging.debug(length)
# logging.debug(type(str))
for i in range(length):
#sum = (sum << 8) + ord(str[i]) for big indian
sum = sum + ord(str[i])*(2**(8*i))
return sum, str[length:]
def getFloat(theString,proctype=-1):
if proctype == 2:
#logging.debug('Converting')
return uint32le_to_VAXF(theString[0:4]),theString[4:]
return struct.unpack('f',theString[0:4])[0], theString[4:]
def uint32le_to_VAXF(theString):
uint32le = struct.unpack('I',theString[0:4])[0]
return uint32le_to_VAXF_orig(uint32le)
def uint32le_to_VAXF_orig( uint32le ):
"""
uint32le_to_VAXF_orig Converts from IEEELE (UINT32) to VAXF (single precision)
Uses http://www.opengroup.org/onlinepubs/9629399/chap14.htm#tagfcjh_20
"""
A = 2 ;#VAX specific
B = 128 ;#VAX specific
C = 0.5 ;#VAX specific
# Flip the upper and lower bits (based on how Vax data storage format)
# VAX <-----WORD1-----><-----WORD2----->
# IEEE-LE <-----WORD2-----><-----WORD1----->
#logging.debug('Uint23le:'+str(uint32le))
#logging.debug("Total: "+str(hex(uint32le)))
word2 = uint32le&0xFFFF0000;#mask FFFF0000
#logging.debug("Word2: "+bin(word2)+' : '+str(word2))
word1 = uint32le&0x0000FFFF;#mask 0000FFFF
#logging.debug("Word1: %s "%bin(word1))
vaxInt = (word1<<16)|(word2>>16);
#logging.debug("VaxInt:"+hex(vaxInt)+' : '+str(vaxInt))
# Pull out the sign, exponent, and fractional component
# VAX FLOAT BYTES <-----WORD1----><-----WORD2---->
# VAX FLOAT BITS 0123456789ABCDEF0123456789ABCDEF
# Sign Exp Fract SEEEEEEEEFFFFFFFFFFFFFFFFFFFFFFF
S = int((vaxInt>>31)&0b1);#2147483648=hex2dec('80000000')
E = int((vaxInt>>23)&0b11111111);#2139095040=hex2dec('7F800000')
F = int(vaxInt&0x7FFFFF);# 8388607=hex2dec('007FFFFF')
#logging.debug("S: %s"% bin(S) )
#logging.debug("S: %i"% S )
#logging.debug("E: %s "%bin(E))
#logging.debug("F: %s "%bin(F))
M = C+float(F)/16777216;#VAX Specific 16777216=2^24
returnVal = math.pow(-1,S) * M * math.pow(A,(float(E)-B));#Generic
#logging.debug("FINAL FLOAT: %f "%returnVal)
return returnVal
def open(filename,mode='r'):
return C3DFile(filename,mode);
class C3DFile(object):
def __init__(self,filename,mode):
# Input: filename - file (including path) to be read
#
# Variable:
# Markers 3D-marker data [Nmarkers x self.NvideoFrames x Ndim(=3)]
# self.VideoFrameRate Frames/sec
# AnalogSignals Analog signals [Nsignals x NanalogSamples ]
# AnalogFrameRate Samples/sec
# Event Event(Nevents).time ..value ..name
# ParameterGroup ParameterGroup(Ngroups).Parameters(Nparameters).data ..etc.
# CameraInfo MarkerRelated CameraInfo [Nmarkers x self.NvideoFrames]
# ResidualError MarkerRelated ErrorInfo [Nmarkers x self.NvideoFrames]
self.VideoFrameRate=0;
self.ParameterGroups=[];
logging.info("*********************")
logging.info( "**** Opening File ***")
logging.info( "*********************")
logging.info( "FileName = "+filename)
self.fileId=__builtin__.open(filename,'rb'); # native format (PC-intel)
content = self.fileId.read();
content_memory = content
NrecordFirstParameterblock, content = getNumber(content,1)
# Reading record number of parameter section
key, content = getNumber(content,1)
if key!=80:
logging.info( 'File: '+filename+' does not comply to the C3D format')
self.fileId.close()
return
content = content[512*(NrecordFirstParameterblock-1)+1:]
self.proctype,content =getNumber(content,1)
self.proctype = self.proctype-83
# proctype: 1(INTEL-PC); 2(DEC-VAX); 3(MIPS-SUN/SGI)
logging.info( "*************************")
logging.info( "**** Processor coding ***")
logging.info( "*************************")
if self.proctype==1:
logging.info( "Intel-PC")
elif self.proctype==2:
logging.info( "DEC-VAX")
elif self.proctype==3:
logging.info( "MIPS-SUN/SGI")
else:
logging.info( "unknown processor type")
#if self.proctype==2,
# fclose(self.fileId);
# self.fileId=fopen(filename,'r','d'); % DEC VAX D floating point and VAX ordering
#end
logging.info( "***********************")
logging.info( "**** Reading Header ***")
logging.info( "***********************")
# ###############################################
# ## ##
# ## read header ##
# ## ##
# ###############################################
#%NrecordFirstParameterblock=fread(self.fileId,1,'int8'); % Reading record number of parameter section
#%key1=fread(self.fileId,1,'int8'); % key = 80;
content = content_memory
#
#fseek(self.fileId,2,'bof');
content = content[2:]
#
self.Nmarkers, content=getNumber(content, 2)
NanalogSamplesPerVideoFrame, content = getNumber(content, 2)
StartFrame, content = getNumber(content, 2)
EndFrame, content = getNumber(content, 2)
MaxInterpolationGap, content = getNumber(content, 2)
self.Scale, content = getFloat(content,self.proctype)
NrecordDataBlock, content = getNumber(content, 2)
self.NanalogFramesPerVideoFrame, content = getNumber(content, 2)
if self.NanalogFramesPerVideoFrame > 0:
self.NanalogChannels=NanalogSamplesPerVideoFrame/\
self.NanalogFramesPerVideoFrame
else:
self.NanalogChannels=0
self.VideoFrameRate, content = getFloat(content,self.proctype)
AnalogFrameRate=self.VideoFrameRate*self.NanalogFramesPerVideoFrame
logging.info( "self.NanalogFramesPerVideoFrame= "+\
str(self.NanalogFramesPerVideoFrame))
logging.info( "AnalogFrameRate= "+str(AnalogFrameRate))
logging.info( "self.VideoFrameRate= "+str(self.VideoFrameRate))
logging.info( "self.Scale= "+str(self.Scale))
logging.info( "self.Nmarkers= "+str(self.Nmarkers))
logging.info( "StartFrame= "+str(StartFrame))
logging.info( "EndFrame= "+str(EndFrame))
logging.info( "***********************")
logging.info( "**** Reading Events ...")
logging.info( "***********************")
content = content_memory
content = content[298:] #bizarre .. ce devrait etre 150 selon la doc
EventIndicator, content = getNumber(content, 2)
EventTime=[]
EventValue=[]
EventName=[]
logging.info( "EventIndicator = "+str(EventIndicator))
if EventIndicator==12345:
Nevents, content = getNumber(content, 2)
logging.info("Nevents= "+str(Nevents))
content = content[2:]
if Nevents>0:
for i in range(Nevents):
letime, content = getFloat(content,self.proctype)
EventTime.append(letime)
content = content_memory
content = content[188*2:]
for i in range(Nevents):
lavalue, content = getNumber(content, 1)
EventValue.append(lavalue)
content = content_memory
content = content[198*2:]
for i in range(Nevents):
lenom = content[0:4]
content = content[4:]
EventName.append(lenom)
logging.info( "***************************")
logging.info( "**** Reading Parameters ...")
logging.info( "***************************")
content = content_memory
content = content[512*(NrecordFirstParameterblock-1):]
self.ParameterGroups = []
ParameterNumberIndex = []
#
dat1, content = getNumber(content, 1)
key2, content = getNumber(content, 1)
NparameterRecords, content = getNumber(content, 1)
logging.info( "NparameterRecords="+str(NparameterRecords))
proctype1,content =getNumber(content,1)
proctype1 = proctype1-83
# proctype: 1(INTEL-PC); 2(DEC-VAX); 3(MIPS-SUN/SGI)
for i in range(NparameterRecords):
leparam = ParameterGroup(None, None, [])
self.ParameterGroups.append(leparam)
ParameterNumberIndex.append(0)
#
#
Ncharacters, content = getNumber(content, 1)
logging.debug('The NUMBER OF CHARACTERS:'+str(Ncharacters))
if Ncharacters>=128:
Ncharacters = -(2**8)+(Ncharacters)
GroupNumber, content = getNumber(content, 1)
if GroupNumber>=128:
GroupNumber = -(2**8)+(GroupNumber)
#logging.info( "GroupNumber = ", GroupNumber
while Ncharacters > 0:
if GroupNumber<0 and abs(GroupNumber) < len(self.ParameterGroups):
GroupNumber=abs(GroupNumber)
GroupName = content[0:Ncharacters]
content = content[Ncharacters:]
logging.debug("Group Name ="+GroupName+" Group Number = "+\
str(GroupNumber)+"Len self.ParameterGroups:"+\
str(len(self.ParameterGroups)))
self.ParameterGroups[GroupNumber].name = GroupName
#logging.info( "ParameterGroupName =", GroupName
offset, content = getNumber(content, 2)
deschars, content = getNumber(content, 1)
GroupDescription = content[0:deschars]
content = content[deschars:]
self.ParameterGroups[GroupNumber].description = GroupDescription
#
ParameterNumberIndex[GroupNumber]=0
content = content[offset-3-deschars:]
elif abs(GroupNumber) >= len(self.ParameterGroups):
logging.debug('NOT DECODING GROUP.')
else:
ParameterNumberIndex[GroupNumber]=ParameterNumberIndex[GroupNumber]+1
ParameterNumber=ParameterNumberIndex[GroupNumber]
#logging.info( "ParameterNumber=", ParameterNumber
self.ParameterGroups[GroupNumber].parameter.append(
Parameter(None, None, [], [], None))
ParameterName = content[0:Ncharacters]
content = content[Ncharacters:]
#logging.info( "ParameterName = ",ParameterName
if len(ParameterName)>0:
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].name=ParameterName
offset, content = getNumber(content, 2)
filepos = len(content_memory)-len(content)
nextrec = filepos+offset-2
theType, content=getNumber(content, 1)
if theType>=128:
theType = -(2**8)+theType
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].type=theType
dimnum, content=getNumber(content, 1)
if dimnum == 0:
datalength = abs(theType)
else:
mult=1
dimension=[]
for j in range (dimnum):
ladim, content = getNumber(content, 1)
dimension.append(ladim)
mult=mult*dimension[j]
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].dim.append(dimension[j])
datalength = abs(theType)*mult
#logging.info( "ParameterNumber = ", ParameterNumber, " Group Number = ", GroupNumber
if theType==-1:
data = ""
wordlength=dimension[0]
if dimnum==2 and datalength>0:
for j in range(dimension[1]):
data=string.rstrip(content[0:wordlength])
content = content[wordlength:]
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data.append(data)
elif dimnum==1 and datalength>0:
data=content[0:wordlength]
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data.append(data) # ???
if string.rstrip(ParameterName) == "LABELS" and \
string.rstrip(GroupName) == "POINT":
self.markerNames = self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data
logging.info( "POINT = "+str(self.markerNames)+' '+str(type(self.markerNames)))
elif string.rstrip(ParameterName) == "LABEL_PREFIXES" and \
string.rstrip(GroupName) == "SUBJECTS":
logging.info( "SUBJECTS = "+\
str( self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data))
else:
#logging.info( self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data
pass
elif theType == 1:
data = []
Nparameters=datalength/abs(theType)
logging.debug("Nparameters="+str(Nparameters))
for i in range(Nparameters):
ladata,content = getNumber(content, 1)
data.append(ladata)
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data=data
#logging.info( self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data
#logging.info( "type boolean"
elif theType == 2 and datalength>0:
data = []
Nparameters=datalength/abs(theType)
for i in range(Nparameters):
ladata,content = getNumber(content, 2)
data.append(ladata)
#self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data
if dimnum>1:
#???? logging.info( "arg je comprends pas"
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data=data
#???self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=reshape(data,dimension)
else:
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data=data
#logging.info( self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data
#pass
#logging.info( "type integer"
elif theType == 4 and datalength>0:
data = []
Nparameters=datalength/abs(theType)
for i in range(Nparameters):
ladata,content = getFloat(content,self.proctype)
data.append(ladata)
if dimnum>1:
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data=data
#logging.info( "arg je comprends pas"
#???self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=reshape(data,dimension)
else:
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].data=data
#logging.info( self.ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data
else:
#logging.info( "error"
pass
deschars, content= getNumber(content, 1)
if deschars>0:
description = content[0:deschars]
content = content[deschars:]
self.ParameterGroups[GroupNumber].\
parameter[ParameterNumber-1].description=description
content = content_memory
content = content[nextrec:]
logging.debug('LEN CONTENT:'+str(len(content)))
if not content:
break
Ncharacters,content = getNumber(content, 1)
logging.debug('The NUMBER OF CHARACTERS:'+str(Ncharacters))
if Ncharacters>=128:
Ncharacters = -(2**8)+(Ncharacters)
GroupNumber,content = getNumber(content, 1)
if GroupNumber>=128:
GroupNumber = -(2**8)+(GroupNumber)
#logging.info( "GroupNumber = ", GroupNumber
## ###############################################
## ## ##
## ## read data block ##
## ## ##
## ###############################################
## Get the coordinate and analog data
#
content = content_memory
self.content = content[(NrecordDataBlock-1)*512:]
self.ptr_read = 0
self.NvideoFrames = EndFrame - StartFrame + 1
self.currentMarker = 0
logging.info( "NVideoFrames = "+str(self.NvideoFrames))
def close(self):
self.fileId.close()
def getMarkerNames(self):
return self.markerNames
def getVideoSamplingRate(self):
return self.VideoFrameRate
def getVideoFrames(self):
return self.NvideoFrames
def getScale(self):
return self.Scale
def readline(self,size=0):
# Markers 3D-marker data [Nmarkers x self.NvideoFrames x Ndim(=3)]
# CameraInfo MarkerRelated CameraInfo [Nmarkers x self.NvideoFrames]
# ResidualError MarkerRelated ErrorInfo [Nmarkers x self.NvideoFrames]
Markers= {}
ResidualError = {}
CameraInfo= {}
for markerName in self.markerNames:
Markers[markerName] = Marker(0.0,0.0,0.0)
ResidualError[markerName] = 0
CameraInfo[markerName] = 0
#logging.debug( "***************************")
#logging.debug( "**** Reading DataBlock ....")
#logging.debug( "***************************")
if self.ptr_read >= len(self.content):
logging.debug( "Nothin More to read.")
return False
self.currentMarker += 1
self.currentTimeStamp = self.currentMarker/self.VideoFrameRate
if self.Scale < 0.0:
#logging.info( "*",
for markerName in self.markerNames:
x = getFloat(self.content[self.ptr_read:self.ptr_read+4],
self.proctype)[0]
self.ptr_read+=4
y = getFloat(self.content[self.ptr_read:self.ptr_read+4],
self.proctype)[0]
self.ptr_read+=4
z = getFloat(self.content[self.ptr_read:self.ptr_read+4],
self.proctype)[0]
self.ptr_read+=4
Markers[markerName].x = x#*self.Scale
Markers[markerName].y = y#*self.Scale
Markers[markerName].z = z#*self.Scale
Markers[markerName].Timestamp = self.currentTimeStamp#*self.Scale
a= getFloat(self.content[self.ptr_read:self.ptr_read+4],
self.proctype)[0]
self.ptr_read+=4
a = int(a)
highbyte = int(a/256)
lowbyte=a-highbyte*256
CameraInfo[markerName] = highbyte
ResidualError[markerName] = lowbyte*abs(self.Scale)
#if i< 2:
#logging.info( Markerss[i][j]
self.ptr_read+=self.NanalogFramesPerVideoFrame*self.NanalogChannels*2
else:
#UNTESTED
for makerName in self.markerNames:
#x, self.content = getNumber(self.content,2)
x = ord(self.content[self.ptr_read]) + \
ord(self.content[self.ptr_read+1])*(2**8)
self.ptr_read+=2
if x > 32768:
x=-(2**16)+(x)
#y, self.content = getNumber(self.content,2)
y = ord(self.content[self.ptr_read]) + \
ord(self.content[self.ptr_read+1])*(2**8)
self.ptr_read+=2
if y > 32768:
y=-(2**16)+(y)
#z, self.content = getNumber(self.content,2)
z = ord(self.content[self.ptr_read]) + \
ord(self.content[self.ptr_read+1])*(2**8)
self.ptr_read+=2
if z > 32768:
z=-(2**16)+(z)
Markers[makerName].x = x*self.Scale
Markers[makerName].y = y*self.Scale
Markers[makerName].z = z*self.Scale
Markers[markerName].Timestamp = self.currentTimeStamp#*self.Scale
#if i< 2:
# logging.info( Markers[i][j]
ResidualError[makerName], self.content = getNumber(self.content, 1)
CameraInfo[makerName], self.content = getNumber(self.content, 1)
self.ptr_read += self.NanalogFramesPerVideoFrame*self.NanalogChannels*2
#return [Markers,ResidualError,CameraInfo]
return Markers
#for j in range (self.NanalogFramesPerVideoFrame):
# for k in range(self.NanalogChannels):
# val, self.content = getNumber(self.content, 2)
#AnalogSignals(j+self.NanalogFramesPerVideoFrame*(i-1),1:self.NanalogChannels)=val
def __iter__(self):
return self
def next(self):
data = self.readline()
if data == False or data == None:
raise StopIteration
return data
| |
import nltk
import json
import numpy as np
import traceback
from codecs import open
from pathlib import Path
from puls_util import separate_title_from_body
from cap_transform import (make_capitalized_title,
make_uppercase_title,
make_lowercase_title)
from data import get_label
from error_display import print_label_error
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def evaluate(predicate_func,
title_transformation_func,
title_file_path="reuters.txt",
doc_data_dir="/cs/puls/tmp/Capitalization/reuters-text",
pass_doc=False):
# Head word title should be different for monocase
total_correct_n = 0.
total_n = 0
logger.info("Evaluation of %r starts..", predicate_func)
with open(title_file_path, "r", "utf8") as f:
total_instance_number = 10000.
finished_instance_number = 0;
for l in f:
if finished_instance_number == total_instance_number:
break
fname, raw_title = json.loads(l)
raw_words = nltk.word_tokenize(raw_title)
try:
cap_words = title_transformation_func(title_words = raw_words)
except:
logger.error("%s encountered error in making capitalized title." %(fname))
traceback.print_exc(file=sys.stdout)
continue
if pass_doc:
with open("%s/%s" %(doc_data_dir, fname), "r", "utf8") as f:
kwargs = {"doc": f.read()}
else:
kwargs = {}
normalized_words = predicate_func(words = cap_words, **kwargs)
correct_labels = [get_label(w)
for w in raw_words]
try:
predicted_labels = [get_label(w)
for w in normalized_words]
except:
logger.error("%s encountered error in recovery." %(fname))
traceback.print_exc(file=sys.stdout)
continue
total_correct_n += len(filter(lambda (rw, nw): rw == nw, zip(raw_words, normalized_words)))
total_n += len(correct_labels)
finished_instance_number += 1
if finished_instance_number % 1000 == 0:
logger.info("%f finished", finished_instance_number / total_instance_number)
print total_correct_n / total_n
def is_consistent_prediction(pred_tokens, true_tokens):
"""
check if predicted label sequence is consistent
with the actual label sequence.
consistent means:
- same length
- same content(after lower-casing)
"""
lower_case = lambda tokens: map(lambda t: t.lower(), tokens)
if len(pred_tokens) == len(true_tokens):
if lower_case(pred_tokens) == lower_case(true_tokens):
return True
return False
def eval_stat(pred_tokens, true_tokens, accepted_labels):
ret = np.zeros((len(accepted_labels), 3))
label2row = {l: i for i, l in enumerate(accepted_labels)}
accepted_labels = set(accepted_labels)
pred_tokens, true_tokens = pred_tokens[1:], true_tokens[1:]
pred_labels = map(get_label, pred_tokens)
true_labels = map(get_label, true_tokens)
for i, true_l in enumerate(true_labels):
pred_l = pred_labels[i]
if true_l in accepted_labels and pred_l in accepted_labels:
if pred_l == true_l:
ret[label2row[true_l], 0] += 1
ret[label2row[pred_l], 1] += 1
ret[label2row[true_l], 2] += 1
return ret
def eval_rule_based(output_path, okform_dir,
accepted_labels=set(['AL', 'IC']),
print_errors=False):
"""
Return:
numpy.ndarray: (#label, 3)
count of #match, #mode, #ref for each label
First word of sentence is ignored
"""
ret_stat = np.zeros((len(accepted_labels), 3),
dtype=np.float64)
n_finished = 0
n_errorless = 0
with Path(output_path).open('r', encoding='utf8') as prediction_file:
while True:
if n_finished % 1000 == 0:
logger.info('Finished {}/{}'.format(n_errorless, n_finished))
line1 = prediction_file.readline()
line2 = prediction_file.readline()
if not line2:
break
try:
id_ = line1.strip()
pred_json = json.loads(line2.strip())
if pred_json['resultingHeadline'] is None:
continue
pred_tokens = pred_json['resultingHeadline']
auxil_path = str(Path(okform_dir) /
Path(id_).with_suffix('.auxil'))
paf_path = str(Path(okform_dir) /
Path(id_).with_suffix('.paf'))
title_sents, _ = separate_title_from_body(auxil_path, paf_path)
true_tokens = [item['token']
for item in title_sents[0]['features']]
if is_consistent_prediction(pred_tokens, true_tokens):
stat = eval_stat(pred_tokens, true_tokens,
accepted_labels)
if print_errors:
print_label_error(true_tokens,
# we don't have features here
features=None,
instance_id=id_,
excluded_indices=set([0]),
correct_labels=map(get_label,
true_tokens),
predicted_labels=map(get_label,
pred_tokens),
target_true_label='IC',
target_pred_label='AL',
print_features=False)
ret_stat += stat
n_errorless += 1
else:
logger.debug(
'Predicted and true tokens inconsisent:\n{}\n{}\n'.format(
pred_tokens, true_tokens)
)
except:
logger.error(traceback.format_exc())
continue
finally:
n_finished += 1
return ret_stat
if __name__ == "__main__":
# from baseline1 import normalize_title as b1
# from baseline2 import normalize_title as b2
# from baseline3 import normalize_title as b3
# evaluate(predicate_func = b2, pass_doc = True)
# evaluate(predicate_func = b1)
# evaluate(predicate_func=b3,
# title_transformation_func=make_capitalized_title,
# pass_doc=True)
r = eval_rule_based(
output_path='/cs/taatto/home/hxiao/capitalization-recovery/result/puls-100k/rule-based/predictions-2015-09-07.txt',
okform_dir='/cs/taatto/home/hxiao/capitalization-recovery/corpus/puls-format',
accepted_labels=set(['AL', 'IC']),
print_errors=False)
print(r)
| |
# -*- coding: utf-8 -*-
"""Utilities that may be packaged in external libraries."""
from random import SystemRandom
from collections import OrderedDict
from importlib import import_module
from django.views.generic import TemplateView
from django.contrib.auth.hashers import BasePasswordHasher, mask_hash
def random_unicode(min_length=None,
max_length=None,
alphabet=u'abcdefghijklmnopqrstuvwxyz'
u'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
u'0123456789'):
"""Return random unicode.
.. note:: Uses :py:func:`os.urandom`.
"""
if min_length is None:
if max_length is None:
raise ValueError("Provide min_length or max_length.")
else:
min_length = 1
if max_length is None:
max_length = min_length
if min_length < 1:
raise ValueError("Minimum length is 1.")
if max_length < min_length:
raise ValueError("Maximum length must be greater than minimum length.")
random = SystemRandom()
length = random.randint(min_length, max_length)
return u''.join(random.choice(alphabet) for i in range(length))
def random_password(min_length=16, max_length=32,
alphabet='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""Return random password of random length with limited ASCII alphabet.
.. note::
The default value of allowed chars does not have "I" or "O" or
letters and digits that look similar -- just to avoid confusion.
"""
return random_unicode(min_length, max_length, alphabet)
class PlainPasswordHasher(BasePasswordHasher):
"Plain password hashing algorithm for test (DO NOT USE in production)."
algorithm = "plain"
def salt(self):
return ''
def encode(self, password, salt):
return '%s$$%s' % (self.algorithm, password)
def verify(self, password, encoded):
algorithm, hash = encoded.split('$$', 1)
assert algorithm == self.algorithm
return password == hash
def safe_summary(self, encoded):
return OrderedDict([
('algorithm', self.algorithm),
('hash', mask_hash(encoded, show=3)),
])
class UnauthorizedView(TemplateView):
template_name = '401.html'
def render_to_response(self, context, **response_kwargs):
"""Render response with status code 401."""
response_kwargs.setdefault('status', 401)
return TemplateView.render_to_response(self, context,
**response_kwargs)
class ForbiddenView(TemplateView):
template_name = '403.html'
def render_to_response(self, context, **response_kwargs):
"""Render response with status code 401."""
response_kwargs.setdefault('status', 403)
return TemplateView.render_to_response(self, context,
**response_kwargs)
def import_member(import_string):
"""Import one member of Python module by path.
>>> import os.path
>>> imported = import_member('os.path.supports_unicode_filenames')
>>> os.path.supports_unicode_filenames is imported
True
"""
module_name, factory_name = import_string.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, factory_name)
#: Sentinel to detect undefined function argument.
UNDEFINED_FUNCTION = object()
class NotCallableError(TypeError):
"""Raised when operation requires a callable."""
class Decorator(object):
"""Base class to create class-based decorators.
See: https://tech.people-doc.com/python-class-based-decorators.html
Override :meth:`setup`, :meth:`run` or :meth:`decorate` to create custom
decorators:
* :meth:`setup` is dedicated to setup, i.e. setting decorator's internal
options.
:meth:`__init__` calls :py:meth:`setup`.
* :meth:`decorate` is dedicated to wrapping function, i.e. remember the
function to decorate.
:meth:`__init__` or :meth:`__call__` may call :meth:`decorate`,
depending on the usage.
* :meth:`run` is dedicated to execution, i.e. running the decorated
function.
:meth:`__call__` calls :meth:`run` if a function has already been
decorated.
Decorator instances are callables. The :meth:`__call__` method has a
special implementation in Decorator. Generally, consider overriding
:meth:`run` instead of :meth:`__call__`.
"""
#: Sentinel to detect undefined function argument.
UNDEFINED_FUNCTION = UNDEFINED_FUNCTION
#: Shortcut to exception:
NotCallableError = NotCallableError
def __init__(self, func=UNDEFINED_FUNCTION):
"""Constructor.
Accepts one optional positional argument: the function to decorate.
Other arguments **must** be keyword arguments.
And beware passing ``func`` as keyword argument: it would be used as
the function to decorate.
Handle decorator's options; return decorator instance (``self``).
Default implementation decorates ``func``.
Override this method and adapt its signature depending on your needs.
If the decorator has mandatory options, they should be positional
arguments in :meth:`setup` (or an exception should be raised inside
:meth:`setup`).
If the decorator accepts optional configuration, there should be
keyword arguments in :meth:`setup`.
"""
#: Decorated function.
self.decorated = self.UNDEFINED_FUNCTION
# Decorate function, if it has been passed to :meth:`__init__`, i.e.
# if decorator has been used with ``@`` and without parentheses:
#
# .. code:: python
#
# @Decorator
# def some_function():
# pass
#
# Which is an equivalent to:
#
# .. code:: python
#
# def some_function():
# pass
# some_function = Decorator(some_function)
if func is not self.UNDEFINED_FUNCTION:
self.decorate(func)
return self
def decorate(self, func):
"""Set :attr:`decorated`; return decorator instance (``self``).
Raises :class:`NotCallableError` (inherits from :class:`TypeError` if
``func`` is not a callable.
"""
if not callable(func):
raise NotCallableError(
'Cannot decorate non callable object "{func}"'
.format(func=func))
self.decorated = func
return self
def __call__(self, *args, **kwargs):
"""Run decorated function if available, else decorate first arg.
First use case of :meth:`__call__` is: decorator instance has already
been initialized with function to decorate, and the decorated function
is called:
.. code:: python
@Decorator # No parentheses => __init__() will be called with
# some_function as first (and only) argument.
def some_function():
pass
some_function() # Decorator.__call__()
Second use case is: decorator instance has been initialized with
configuration, but without function to decorate. Then the decorator
instance is used to decorate a function:
.. code:: python
@Decorator() # Parentheses => ``some_function`` will be decorated
# via ``Decorator.__call__(some_function)``.
def some_function():
pass
"""
if self.decorated is self.UNDEFINED_FUNCTION:
func = args[0]
if args[1:] or kwargs:
raise ValueError('Cannot decorate and setup simultaneously '
'with __call__(). Use __init__() or '
'setup() for setup. Use __call__() or '
'decorate() to decorate.')
self.decorate(func)
return self
else:
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
"""Actually run the decorator.
This base implementation is a transparent proxy to the decorated
function: it passes positional and keyword arguments as is, and returns
result.
"""
return self.decorated(*args, **kwargs)
| |
import time, copy
import threading, logging
import traceback
import tornado
from tornado import gen
from tornado.web import RequestHandler
from tornado.wsgi import WSGIAdapter
from concurrent.futures import ThreadPoolExecutor
from itchatmp.config import SERVER_WAIT_TIME, COROUTINE
from itchatmp.content import (NORMAL, COMPATIBLE, SAFE,
TEXT, INCOME_MSG, OUTCOME_MSG)
from itchatmp.views import (
deconstruct_msg, construct_msg, reply_msg_format,
decrypt_msg, encrypt_msg, oauth)
from itchatmp.controllers.envtest import env_test
from itchatmp.exceptions import ParameterError
from itchatmp.log import set_logging
logger = logging.getLogger('itchatmp')
def load_register(core):
core.update_config = update_config
core.run = run
core.msg_register = msg_register
def construct_get_post_fn(core):
def get_fn(handler):
''' only for verifying server
return echostr if verify
return greeting words if not
'''
if core.filterRequest and not core.filter_request(handler.request):
logger.debug('A request from unknown ip is filtered')
return 'Greeting from itchatmp!'
else:
return verify_echostr(core, handler) or 'Greeting from itchatmp!'
def sync_post_fn(handler):
if core.filterRequest and not core.filter_request(handler.request):
logger.debug('A request from unknown ip is filtered')
return None, None
else:
msgDict = deconstruct_msg(handler.request.body)
isActualEncrypt = 'Encrypt' in msgDict
tns = get_tns(core, handler)
msgDict = verify_message(core, handler, tns, msgDict)
if not msgDict:
logger.debug('Ignore a request because verify failed')
else:
reply_fn = get_reply_fn(core, msgDict['MsgType'])
if reply_fn is None:
return None, None
try:
reply = reply_fn(copy.deepcopy(msgDict))
except Exception as e:
logger.warning(traceback.format_exc())
else: # if nothing goes wrong
if reply:
return verify_reply(core, tns, reply, msgDict, isActualEncrypt)
return None, None
@gen.coroutine
def coroutine_post_fn(handler):
if core.filterRequest and not core.filter_request(handler.request):
logger.debug('A request from unknown ip is filtered')
else:
msgDict = deconstruct_msg(handler.request.body)
tns = get_tns(core, handler)
isActualEncrypt = 'Encrypt' in msgDict
msgDict = verify_message(core, handler, tns, msgDict)
if not msgDict:
logger.debug('Ignore a request because verify failed')
else:
reply_fn = get_reply_fn(core, msgDict['MsgType'])
if reply_fn is None:
raise gen.Return((None, None))
try:
reply = yield reply_fn(copy.deepcopy(msgDict))
except Exception as e:
logger.warning(traceback.format_exc())
else: # if nothing goes wrong
if reply:
r = yield verify_reply(core, tns, reply, msgDict, isActualEncrypt)
raise gen.Return(r)
raise gen.Return((None, None))
return get_fn, coroutine_post_fn if COROUTINE else sync_post_fn
def get_tns(core, handler):
if handler.get_argument('msg_signature', ''):
tns = [handler.get_argument(key, '') for
key in ('timestamp', 'nonce', 'msg_signature')]
else:
tns = [handler.get_argument(key, '') for
key in ('timestamp', 'nonce', 'signature')]
return tns
def verify_echostr(core, handler):
'''
verify signature and return echostr if valid
if not, None will be returned
'''
tns = get_tns(core, handler)
echostr = handler.get_argument('echostr', '')
if handler.get_argument('msg_signature', ''):
if oauth(*(tns + [echostr, core.config.token])):
msgDict = decrypt_msg(*(tns + [core.config, {'echostr': echostr}]))
echostr = msgDict.get('echostr')
else:
valid = oauth(*(tns + [core.config.token]))
if not valid:
echostr = None
return echostr
def verify_message(core, handler, tns, msgDict):
'''
verify signature and return decrypted message if valid
if not, None will be returned
'''
if handler.get_argument('msg_signature', ''):
valid = oauth(*(tns +
[core.config.token, msgDict.get('Encrypt', '')]))
else:
valid = oauth(*(tns + [core.config.token]))
if valid:
if core.config.encryptMode == SAFE:
msgDict = decrypt_msg(*(tns + [core.config, msgDict]))
else:
msgDict = {}
return msgDict
if COROUTINE:
@gen.coroutine
def verify_reply(core, tns, reply, msgDict, isActualEncrypt):
reply = reply_msg_format(reply)
if reply:
if reply.get('MsgType') in OUTCOME_MSG:
reply['ToUserName'] = msgDict['FromUserName']
reply['FromUserName'] = msgDict['ToUserName']
if 'FileDir' in reply and reply['MsgType'] != TEXT:
r = yield core.upload(reply['MsgType'], reply['FileDir'])
if not r:
logger.warning(r)
raise gen.Return((None, None))
else:
reply['MediaId'] = r['media_id']
if core.config.encryptMode == SAFE and isActualEncrypt:
raise gen.Return((encrypt_msg(*(tns +
[core.config, reply])), reply))
else:
raise gen.Return((construct_msg(reply), reply))
else:
logger.warning('Reply is invalid: unknown MsgType')
else:
logger.warning('Reply is invalid: %s' % reply.get('errmsg'))
raise gen.Return((None, None))
else:
def verify_reply(core, tns, reply, msgDict, isActualEncrypt):
reply = reply_msg_format(reply)
if reply:
if reply.get('MsgType') in OUTCOME_MSG:
reply['ToUserName'] = msgDict['FromUserName']
reply['FromUserName'] = msgDict['ToUserName']
if 'FileDir' in reply and reply['MsgType'] != TEXT:
r = core.upload(reply['MsgType'], reply['FileDir'])
if not r:
logger.warning(r); return None, None
else:
reply['MediaId'] = r['media_id']
if core.config.encryptMode == SAFE and isActualEncrypt:
return encrypt_msg(*(tns +
[core.config, reply])), reply
else:
return construct_msg(reply), reply
else:
logger.warning('Reply is invalid: unknown MsgType')
else:
logger.warning('Reply is invalid: %s' % reply.get('errmsg'))
return None, None
def construct_handler(core, isWsgi):
get_fn, post_fn = construct_get_post_fn(core)
class BaseHandler(RequestHandler):
def initialize(self):
self.closed = False
def on_connection_close(self):
self.closed = True
def get(self):
self.finish(get_fn(self))
if isWsgi:
class MainHandler(BaseHandler):
def post(self):
r, rawReply = post_fn(self)
if self.closed: # server has stopped waiting
if rawReply:
r = core.send(rawReply, rawReply.get('ToUserName', ''))
if not r:
logger.warning('Reply error: %s' % r.get('errmsg', ''))
else:
self.finish(r)
else:
ioLoop = core.ioLoop
if COROUTINE:
class MainHandler(BaseHandler):
@tornado.gen.coroutine
def post(self):
def time_out_callback():
self.finish()
self.closed = True
timeoutHandler = ioLoop.call_later(SERVER_WAIT_TIME,
time_out_callback)
r, rawReply = yield post_fn(self)
ioLoop.remove_timeout(timeoutHandler)
if self.closed:
if rawReply:
r = yield core.send(rawReply, rawReply.get('ToUserName', ''))
if not r:
logger.warning('Reply error: %s' % r.get('errmsg', ''))
else:
self.finish(r)
else:
threadPool = ThreadPoolExecutor(core.threadPoolNumber)
class MainHandler(BaseHandler):
@tornado.gen.coroutine
def post(self):
def time_out_callback():
self.finish()
self.closed = True
timeoutHandler = ioLoop.call_later(SERVER_WAIT_TIME,
time_out_callback)
r, rawReply = yield threadPool.submit(post_fn, self)
ioLoop.remove_timeout(timeoutHandler)
if self.closed:
if rawReply:
r = yield threadPool.submit(core.send,
(rawReply, rawReply.get('ToUserName', '')))
if not r:
logger.warning('Reply error: %s' % r.get('errmsg', ''))
else:
self.finish(r)
return MainHandler
def update_config(self, config=None, atStorage=None, userStorage=None,
filterRequest=None, threadPoolNumber=None):
self.config = config or self.config
self.atStorage = atStorage or self.atStorage
self.userStorage = userStorage or self.userStorage
self.filterRequest = filterRequest or self.filterRequest
self.threadPoolNumber = threadPoolNumber or self.threadPoolNumber
def run(self, isWsgi=False, debug=True, port=80):
self.isWsgi = isWsgi
self.debug = debug
if debug:
set_logging(loggingLevel=logging.DEBUG)
MainHandler = construct_handler(self, isWsgi)
app = tornado.web.Application(
[('/', MainHandler)], debug=debug)
logger.info('itchatmp started!%s' % (
' press Ctrl+C to exit.' if debug else ''))
if isWsgi:
return WSGIAdapter(app)
else:
port = int(port)
env_test(port)
app.listen(port)
try:
self.ioLoop.start()
except:
logger.info('Bye~')
self.ioLoop.stop()
def msg_register(self, msgType):
''' decorator to register message handlers
* msgType can be type like TEXT or a list of them
* register twice will override the older one
'''
def _msg_register(fn):
if COROUTINE:
fn = gen.coroutine(fn)
msgTypeList = msgType if isinstance(msgType, list) else [msgType]
for t in msgTypeList:
if t in INCOME_MSG:
self._replyFnDict[t] = fn
else:
raise ParameterError(
'Known type register "%s"' % t)
return fn
return _msg_register
def get_reply_fn(core, msgType):
return core._replyFnDict.get(msgType)
| |
import functools
from distutils.version import LooseVersion
import numpy as np
from ..core import indexing
from ..core.utils import FrozenDict, is_remote_uri
from ..core.variable import Variable
from .common import WritableCFDataStore, find_root_and_group
from .file_manager import CachingFileManager, DummyFileManager
from .locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock
from .netCDF4_ import (
BaseNetCDF4Array,
_encode_nc4_variable,
_extract_nc4_variable_encoding,
_get_datatype,
_nc4_require_group,
)
class H5NetCDFArrayWrapper(BaseNetCDF4Array):
def get_array(self, needs_lock=True):
ds = self.datastore._acquire(needs_lock)
variable = ds.variables[self.variable_name]
return variable
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER_1VECTOR, self._getitem
)
def _getitem(self, key):
# h5py requires using lists for fancy indexing:
# https://github.com/h5py/h5py/issues/992
key = tuple(list(k) if isinstance(k, np.ndarray) else k for k in key)
with self.datastore.lock:
array = self.get_array(needs_lock=False)
return array[key]
def maybe_decode_bytes(txt):
if isinstance(txt, bytes):
return txt.decode("utf-8")
else:
return txt
def _read_attributes(h5netcdf_var):
# GH451
# to ensure conventions decoding works properly on Python 3, decode all
# bytes attributes to strings
attrs = {}
for k, v in h5netcdf_var.attrs.items():
if k not in ["_FillValue", "missing_value"]:
v = maybe_decode_bytes(v)
attrs[k] = v
return attrs
_extract_h5nc_encoding = functools.partial(
_extract_nc4_variable_encoding, lsd_okay=False, h5py_okay=True, backend="h5netcdf"
)
def _h5netcdf_create_group(dataset, name):
return dataset.create_group(name)
class H5NetCDFStore(WritableCFDataStore):
"""Store for reading and writing data via h5netcdf
"""
__slots__ = (
"autoclose",
"format",
"is_remote",
"lock",
"_filename",
"_group",
"_manager",
"_mode",
)
def __init__(self, manager, group=None, mode=None, lock=HDF5_LOCK, autoclose=False):
import h5netcdf
if isinstance(manager, (h5netcdf.File, h5netcdf.Group)):
if group is None:
root, group = find_root_and_group(manager)
else:
if not type(manager) is h5netcdf.File:
raise ValueError(
"must supply a h5netcdf.File if the group "
"argument is provided"
)
root = manager
manager = DummyFileManager(root)
self._manager = manager
self._group = group
self._mode = mode
self.format = None
# todo: utilizing find_root_and_group seems a bit clunky
# making filename available on h5netcdf.Group seems better
self._filename = find_root_and_group(self.ds)[0].filename
self.is_remote = is_remote_uri(self._filename)
self.lock = ensure_lock(lock)
self.autoclose = autoclose
@classmethod
def open(
cls,
filename,
mode="r",
format=None,
group=None,
lock=None,
autoclose=False,
invalid_netcdf=None,
phony_dims=None,
):
import h5netcdf
if format not in [None, "NETCDF4"]:
raise ValueError("invalid format for h5netcdf backend")
kwargs = {"invalid_netcdf": invalid_netcdf}
if phony_dims is not None:
if LooseVersion(h5netcdf.__version__) >= LooseVersion("0.8.0"):
kwargs["phony_dims"] = phony_dims
else:
raise ValueError(
"h5netcdf backend keyword argument 'phony_dims' needs "
"h5netcdf >= 0.8.0."
)
if lock is None:
if mode == "r":
lock = HDF5_LOCK
else:
lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])
manager = CachingFileManager(h5netcdf.File, filename, mode=mode, kwargs=kwargs)
return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
def _acquire(self, needs_lock=True):
with self._manager.acquire_context(needs_lock) as root:
ds = _nc4_require_group(
root, self._group, self._mode, create_group=_h5netcdf_create_group
)
return ds
@property
def ds(self):
return self._acquire()
def open_store_variable(self, name, var):
import h5py
dimensions = var.dimensions
data = indexing.LazilyOuterIndexedArray(H5NetCDFArrayWrapper(name, self))
attrs = _read_attributes(var)
# netCDF4 specific encoding
encoding = {
"chunksizes": var.chunks,
"fletcher32": var.fletcher32,
"shuffle": var.shuffle,
}
# Convert h5py-style compression options to NetCDF4-Python
# style, if possible
if var.compression == "gzip":
encoding["zlib"] = True
encoding["complevel"] = var.compression_opts
elif var.compression is not None:
encoding["compression"] = var.compression
encoding["compression_opts"] = var.compression_opts
# save source so __repr__ can detect if it's local or not
encoding["source"] = self._filename
encoding["original_shape"] = var.shape
vlen_dtype = h5py.check_dtype(vlen=var.dtype)
if vlen_dtype is str:
encoding["dtype"] = str
elif vlen_dtype is not None: # pragma: no cover
# xarray doesn't support writing arbitrary vlen dtypes yet.
pass
else:
encoding["dtype"] = var.dtype
return Variable(dimensions, data, attrs, encoding)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return FrozenDict(_read_attributes(self.ds))
def get_dimensions(self):
return self.ds.dimensions
def get_encoding(self):
encoding = {}
encoding["unlimited_dims"] = {
k for k, v in self.ds.dimensions.items() if v is None
}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
if is_unlimited:
self.ds.dimensions[name] = None
self.ds.resize_dimension(name, length)
else:
self.ds.dimensions[name] = length
def set_attribute(self, key, value):
self.ds.attrs[key] = value
def encode_variable(self, variable):
return _encode_nc4_variable(variable)
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
import h5py
attrs = variable.attrs.copy()
dtype = _get_datatype(variable, raise_on_invalid_encoding=check_encoding)
fillvalue = attrs.pop("_FillValue", None)
if dtype is str and fillvalue is not None:
raise NotImplementedError(
"h5netcdf does not yet support setting a fill value for "
"variable-length strings "
"(https://github.com/shoyer/h5netcdf/issues/37). "
"Either remove '_FillValue' from encoding on variable %r "
"or set {'dtype': 'S1'} in encoding to use the fixed width "
"NC_CHAR type." % name
)
if dtype is str:
dtype = h5py.special_dtype(vlen=str)
encoding = _extract_h5nc_encoding(variable, raise_on_invalid=check_encoding)
kwargs = {}
# Convert from NetCDF4-Python style compression settings to h5py style
# If both styles are used together, h5py takes precedence
# If set_encoding=True, raise ValueError in case of mismatch
if encoding.pop("zlib", False):
if check_encoding and encoding.get("compression") not in (None, "gzip"):
raise ValueError("'zlib' and 'compression' encodings mismatch")
encoding.setdefault("compression", "gzip")
if (
check_encoding
and "complevel" in encoding
and "compression_opts" in encoding
and encoding["complevel"] != encoding["compression_opts"]
):
raise ValueError("'complevel' and 'compression_opts' encodings " "mismatch")
complevel = encoding.pop("complevel", 0)
if complevel != 0:
encoding.setdefault("compression_opts", complevel)
encoding["chunks"] = encoding.pop("chunksizes", None)
# Do not apply compression, filters or chunking to scalars.
if variable.shape:
for key in [
"compression",
"compression_opts",
"shuffle",
"chunks",
"fletcher32",
]:
if key in encoding:
kwargs[key] = encoding[key]
if name not in self.ds:
nc4_var = self.ds.create_variable(
name,
dtype=dtype,
dimensions=variable.dims,
fillvalue=fillvalue,
**kwargs,
)
else:
nc4_var = self.ds[name]
for k, v in attrs.items():
nc4_var.attrs[k] = v
target = H5NetCDFArrayWrapper(name, self)
return target, variable.data
def sync(self):
self.ds.sync()
def close(self, **kwargs):
self._manager.close(**kwargs)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class ResponseBase(Model):
"""Response base.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Identifiable
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'_type': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
}
_subtype_map = {
'_type': {'Identifiable': 'Identifiable'}
}
def __init__(self, **kwargs) -> None:
super(ResponseBase, self).__init__(**kwargs)
self._type = None
class Identifiable(ResponseBase):
"""Defines the identity of a resource.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Response
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
}
_subtype_map = {
'_type': {'Response': 'Response'}
}
def __init__(self, **kwargs) -> None:
super(Identifiable, self).__init__(**kwargs)
self.id = None
self._type = 'Identifiable'
class Response(Identifiable):
"""Defines a response. All schemas that could be returned at the root of a
response should inherit from this.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Answer, Thing, ErrorResponse
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
}
_subtype_map = {
'_type': {'Answer': 'Answer', 'Thing': 'Thing', 'ErrorResponse': 'ErrorResponse'}
}
def __init__(self, **kwargs) -> None:
super(Response, self).__init__(**kwargs)
self.read_link = None
self.web_search_url = None
self.potential_action = None
self.immediate_action = None
self.preferred_clickthrough_url = None
self.adaptive_card = None
self._type = 'Response'
class Thing(Response):
"""Defines a thing.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CreativeWork
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'url': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
_subtype_map = {
'_type': {'CreativeWork': 'CreativeWork'}
}
def __init__(self, **kwargs) -> None:
super(Thing, self).__init__(**kwargs)
self.url = None
self._type = 'Thing'
class CreativeWork(Thing):
"""The most generic kind of creative work, including books, movies,
photographs, software programs, etc.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Action
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar about: For internal use only.
:vartype about:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar mentions: For internal use only.
:vartype mentions:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar creator:
:vartype creator: ~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar text: Text content of this creative work
:vartype text: str
:ivar discussion_url:
:vartype discussion_url: str
:ivar comment_count:
:vartype comment_count: int
:ivar main_entity:
:vartype main_entity:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar head_line:
:vartype head_line: str
:ivar copyright_holder:
:vartype copyright_holder:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar copyright_year:
:vartype copyright_year: int
:ivar disclaimer:
:vartype disclaimer: str
:ivar is_accessible_for_free:
:vartype is_accessible_for_free: bool
:ivar genre:
:vartype genre: list[str]
:ivar is_family_friendly:
:vartype is_family_friendly: bool
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'url': {'readonly': True},
'thumbnail_url': {'readonly': True},
'about': {'readonly': True},
'mentions': {'readonly': True},
'provider': {'readonly': True},
'creator': {'readonly': True},
'text': {'readonly': True},
'discussion_url': {'readonly': True},
'comment_count': {'readonly': True},
'main_entity': {'readonly': True},
'head_line': {'readonly': True},
'copyright_holder': {'readonly': True},
'copyright_year': {'readonly': True},
'disclaimer': {'readonly': True},
'is_accessible_for_free': {'readonly': True},
'genre': {'readonly': True},
'is_family_friendly': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'about': {'key': 'about', 'type': '[Thing]'},
'mentions': {'key': 'mentions', 'type': '[Thing]'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'creator': {'key': 'creator', 'type': 'Thing'},
'text': {'key': 'text', 'type': 'str'},
'discussion_url': {'key': 'discussionUrl', 'type': 'str'},
'comment_count': {'key': 'commentCount', 'type': 'int'},
'main_entity': {'key': 'mainEntity', 'type': 'Thing'},
'head_line': {'key': 'headLine', 'type': 'str'},
'copyright_holder': {'key': 'copyrightHolder', 'type': 'Thing'},
'copyright_year': {'key': 'copyrightYear', 'type': 'int'},
'disclaimer': {'key': 'disclaimer', 'type': 'str'},
'is_accessible_for_free': {'key': 'isAccessibleForFree', 'type': 'bool'},
'genre': {'key': 'genre', 'type': '[str]'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
}
_subtype_map = {
'_type': {'Action': 'Action'}
}
def __init__(self, **kwargs) -> None:
super(CreativeWork, self).__init__(**kwargs)
self.thumbnail_url = None
self.about = None
self.mentions = None
self.provider = None
self.creator = None
self.text = None
self.discussion_url = None
self.comment_count = None
self.main_entity = None
self.head_line = None
self.copyright_holder = None
self.copyright_year = None
self.disclaimer = None
self.is_accessible_for_free = None
self.genre = None
self.is_family_friendly = None
self._type = 'CreativeWork'
class Action(CreativeWork):
"""Action.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchAction
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar about: For internal use only.
:vartype about:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar mentions: For internal use only.
:vartype mentions:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar creator:
:vartype creator: ~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar text: Text content of this creative work
:vartype text: str
:ivar discussion_url:
:vartype discussion_url: str
:ivar comment_count:
:vartype comment_count: int
:ivar main_entity:
:vartype main_entity:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar head_line:
:vartype head_line: str
:ivar copyright_holder:
:vartype copyright_holder:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar copyright_year:
:vartype copyright_year: int
:ivar disclaimer:
:vartype disclaimer: str
:ivar is_accessible_for_free:
:vartype is_accessible_for_free: bool
:ivar genre:
:vartype genre: list[str]
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:ivar result:
:vartype result:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar display_name:
:vartype display_name: str
:ivar is_top_action:
:vartype is_top_action: bool
:ivar service_url:
:vartype service_url: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'url': {'readonly': True},
'thumbnail_url': {'readonly': True},
'about': {'readonly': True},
'mentions': {'readonly': True},
'provider': {'readonly': True},
'creator': {'readonly': True},
'text': {'readonly': True},
'discussion_url': {'readonly': True},
'comment_count': {'readonly': True},
'main_entity': {'readonly': True},
'head_line': {'readonly': True},
'copyright_holder': {'readonly': True},
'copyright_year': {'readonly': True},
'disclaimer': {'readonly': True},
'is_accessible_for_free': {'readonly': True},
'genre': {'readonly': True},
'is_family_friendly': {'readonly': True},
'result': {'readonly': True},
'display_name': {'readonly': True},
'is_top_action': {'readonly': True},
'service_url': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'about': {'key': 'about', 'type': '[Thing]'},
'mentions': {'key': 'mentions', 'type': '[Thing]'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'creator': {'key': 'creator', 'type': 'Thing'},
'text': {'key': 'text', 'type': 'str'},
'discussion_url': {'key': 'discussionUrl', 'type': 'str'},
'comment_count': {'key': 'commentCount', 'type': 'int'},
'main_entity': {'key': 'mainEntity', 'type': 'Thing'},
'head_line': {'key': 'headLine', 'type': 'str'},
'copyright_holder': {'key': 'copyrightHolder', 'type': 'Thing'},
'copyright_year': {'key': 'copyrightYear', 'type': 'int'},
'disclaimer': {'key': 'disclaimer', 'type': 'str'},
'is_accessible_for_free': {'key': 'isAccessibleForFree', 'type': 'bool'},
'genre': {'key': 'genre', 'type': '[str]'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'result': {'key': 'result', 'type': '[Thing]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_top_action': {'key': 'isTopAction', 'type': 'bool'},
'service_url': {'key': 'serviceUrl', 'type': 'str'},
}
_subtype_map = {
'_type': {'SearchAction': 'SearchAction'}
}
def __init__(self, **kwargs) -> None:
super(Action, self).__init__(**kwargs)
self.result = None
self.display_name = None
self.is_top_action = None
self.service_url = None
self._type = 'Action'
class Answer(Response):
"""Defines an answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SearchResultsAnswer
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
}
_subtype_map = {
'_type': {'SearchResultsAnswer': 'SearchResultsAnswer'}
}
def __init__(self, **kwargs) -> None:
super(Answer, self).__init__(**kwargs)
self._type = 'Answer'
class Error(Model):
"""Defines the error that occurred.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error code that identifies the category of
error. Possible values include: 'None', 'ServerError', 'InvalidRequest',
'RateLimitExceeded', 'InvalidAuthorization', 'InsufficientAuthorization'.
Default value: "None" .
:type code: str or
~azure.cognitiveservices.search.autosuggest.models.ErrorCode
:param message: Required. A description of the error.
:type message: str
:ivar more_details: A description that provides additional information
about the error.
:vartype more_details: str
:ivar parameter: The parameter in the request that caused the error.
:vartype parameter: str
:ivar value: The parameter's value in the request that was not valid.
:vartype value: str
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
'more_details': {'readonly': True},
'parameter': {'readonly': True},
'value': {'readonly': True},
'_type': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'more_details': {'key': 'moreDetails', 'type': 'str'},
'parameter': {'key': 'parameter', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
'_type': {'key': '_type', 'type': 'str'},
}
def __init__(self, *, message: str, code="None", **kwargs) -> None:
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
self.more_details = None
self.parameter = None
self.value = None
self._type = None
class ErrorResponse(Response):
"""The top-level response that represents a failed request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:param errors: Required. A list of errors that describe the reasons why
the request failed.
:type errors:
list[~azure.cognitiveservices.search.autosuggest.models.Error]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'errors': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Error]'},
}
def __init__(self, *, errors, **kwargs) -> None:
super(ErrorResponse, self).__init__(**kwargs)
self.errors = errors
self._type = 'ErrorResponse'
class ErrorResponseException(HttpOperationError):
"""Server responded with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class QueryContext(Model):
"""Defines the query context that Bing used for the request.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param original_query: Required. The query string as specified in the
request.
:type original_query: str
:ivar altered_query: The query string used by Bing to perform the query.
Bing uses the altered query string if the original query string contained
spelling mistakes. For example, if the query string is "saling downwind",
the altered query string will be "sailing downwind". This field is
included only if the original query string contains a spelling mistake.
:vartype altered_query: str
:ivar alteration_override_query: The query string to use to force Bing to
use the original string. For example, if the query string is "saling
downwind", the override query string will be "+saling downwind". Remember
to encode the query string which results in "%2Bsaling+downwind". This
field is included only if the original query string contains a spelling
mistake.
:vartype alteration_override_query: str
:ivar adult_intent: A Boolean value that indicates whether the specified
query has adult intent. The value is true if the query has adult intent;
otherwise, false.
:vartype adult_intent: bool
:ivar ask_user_for_location: A Boolean value that indicates whether Bing
requires the user's location to provide accurate results. If you specified
the user's location by using the X-MSEdge-ClientIP and X-Search-Location
headers, you can ignore this field. For location aware queries, such as
"today's weather" or "restaurants near me" that need the user's location
to provide accurate results, this field is set to true. For location aware
queries that include the location (for example, "Seattle weather"), this
field is set to false. This field is also set to false for queries that
are not location aware, such as "best sellers".
:vartype ask_user_for_location: bool
:ivar is_transactional:
:vartype is_transactional: bool
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'original_query': {'required': True},
'altered_query': {'readonly': True},
'alteration_override_query': {'readonly': True},
'adult_intent': {'readonly': True},
'ask_user_for_location': {'readonly': True},
'is_transactional': {'readonly': True},
'_type': {'required': True},
}
_attribute_map = {
'original_query': {'key': 'originalQuery', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'alteration_override_query': {'key': 'alterationOverrideQuery', 'type': 'str'},
'adult_intent': {'key': 'adultIntent', 'type': 'bool'},
'ask_user_for_location': {'key': 'askUserForLocation', 'type': 'bool'},
'is_transactional': {'key': 'isTransactional', 'type': 'bool'},
'_type': {'key': '_type', 'type': 'str'},
}
def __init__(self, *, original_query: str, **kwargs) -> None:
super(QueryContext, self).__init__(**kwargs)
self.original_query = original_query
self.altered_query = None
self.alteration_override_query = None
self.adult_intent = None
self.ask_user_for_location = None
self.is_transactional = None
self._type = None
class SearchAction(Action):
"""SearchAction.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar url: The URL to get more information about the thing represented by
this object.
:vartype url: str
:ivar thumbnail_url: The URL to a thumbnail of the item.
:vartype thumbnail_url: str
:ivar about: For internal use only.
:vartype about:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar mentions: For internal use only.
:vartype mentions:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar provider: The source of the creative work.
:vartype provider:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar creator:
:vartype creator: ~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar text: Text content of this creative work
:vartype text: str
:ivar discussion_url:
:vartype discussion_url: str
:ivar comment_count:
:vartype comment_count: int
:ivar main_entity:
:vartype main_entity:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar head_line:
:vartype head_line: str
:ivar copyright_holder:
:vartype copyright_holder:
~azure.cognitiveservices.search.autosuggest.models.Thing
:ivar copyright_year:
:vartype copyright_year: int
:ivar disclaimer:
:vartype disclaimer: str
:ivar is_accessible_for_free:
:vartype is_accessible_for_free: bool
:ivar genre:
:vartype genre: list[str]
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:ivar result:
:vartype result:
list[~azure.cognitiveservices.search.autosuggest.models.Thing]
:ivar display_name:
:vartype display_name: str
:ivar is_top_action:
:vartype is_top_action: bool
:ivar service_url:
:vartype service_url: str
:ivar display_text:
:vartype display_text: str
:ivar query:
:vartype query: str
:ivar search_kind: Possible values include: 'WebSearch', 'HistorySearch',
'DocumentSearch', 'TagSearch', 'LocationSearch', 'CustomSearch'. Default
value: "WebSearch" .
:vartype search_kind: str or
~azure.cognitiveservices.search.autosuggest.models.SearchKind
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'url': {'readonly': True},
'thumbnail_url': {'readonly': True},
'about': {'readonly': True},
'mentions': {'readonly': True},
'provider': {'readonly': True},
'creator': {'readonly': True},
'text': {'readonly': True},
'discussion_url': {'readonly': True},
'comment_count': {'readonly': True},
'main_entity': {'readonly': True},
'head_line': {'readonly': True},
'copyright_holder': {'readonly': True},
'copyright_year': {'readonly': True},
'disclaimer': {'readonly': True},
'is_accessible_for_free': {'readonly': True},
'genre': {'readonly': True},
'is_family_friendly': {'readonly': True},
'result': {'readonly': True},
'display_name': {'readonly': True},
'is_top_action': {'readonly': True},
'service_url': {'readonly': True},
'display_text': {'readonly': True},
'query': {'readonly': True},
'search_kind': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'thumbnail_url': {'key': 'thumbnailUrl', 'type': 'str'},
'about': {'key': 'about', 'type': '[Thing]'},
'mentions': {'key': 'mentions', 'type': '[Thing]'},
'provider': {'key': 'provider', 'type': '[Thing]'},
'creator': {'key': 'creator', 'type': 'Thing'},
'text': {'key': 'text', 'type': 'str'},
'discussion_url': {'key': 'discussionUrl', 'type': 'str'},
'comment_count': {'key': 'commentCount', 'type': 'int'},
'main_entity': {'key': 'mainEntity', 'type': 'Thing'},
'head_line': {'key': 'headLine', 'type': 'str'},
'copyright_holder': {'key': 'copyrightHolder', 'type': 'Thing'},
'copyright_year': {'key': 'copyrightYear', 'type': 'int'},
'disclaimer': {'key': 'disclaimer', 'type': 'str'},
'is_accessible_for_free': {'key': 'isAccessibleForFree', 'type': 'bool'},
'genre': {'key': 'genre', 'type': '[str]'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'result': {'key': 'result', 'type': '[Thing]'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_top_action': {'key': 'isTopAction', 'type': 'bool'},
'service_url': {'key': 'serviceUrl', 'type': 'str'},
'display_text': {'key': 'displayText', 'type': 'str'},
'query': {'key': 'query', 'type': 'str'},
'search_kind': {'key': 'searchKind', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(SearchAction, self).__init__(**kwargs)
self.display_text = None
self.query = None
self.search_kind = None
self._type = 'SearchAction'
class SearchResultsAnswer(Answer):
"""Defines a search result answer.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Suggestions
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.autosuggest.models.QueryContext
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'query_context': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
}
_subtype_map = {
'_type': {'Suggestions': 'Suggestions'}
}
def __init__(self, **kwargs) -> None:
super(SearchResultsAnswer, self).__init__(**kwargs)
self.query_context = None
self._type = 'SearchResultsAnswer'
class Suggestions(SearchResultsAnswer):
"""Suggestions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param _type: Required. Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar read_link: The URL that returns this resource.
:vartype read_link: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar potential_action:
:vartype potential_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar immediate_action:
:vartype immediate_action:
list[~azure.cognitiveservices.search.autosuggest.models.Action]
:ivar preferred_clickthrough_url:
:vartype preferred_clickthrough_url: str
:ivar adaptive_card:
:vartype adaptive_card: str
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.autosuggest.models.QueryContext
:param suggestion_groups: Required.
:type suggestion_groups:
list[~azure.cognitiveservices.search.autosuggest.models.SuggestionsSuggestionGroup]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'read_link': {'readonly': True},
'web_search_url': {'readonly': True},
'potential_action': {'readonly': True},
'immediate_action': {'readonly': True},
'preferred_clickthrough_url': {'readonly': True},
'adaptive_card': {'readonly': True},
'query_context': {'readonly': True},
'suggestion_groups': {'required': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'read_link': {'key': 'readLink', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'potential_action': {'key': 'potentialAction', 'type': '[Action]'},
'immediate_action': {'key': 'immediateAction', 'type': '[Action]'},
'preferred_clickthrough_url': {'key': 'preferredClickthroughUrl', 'type': 'str'},
'adaptive_card': {'key': 'adaptiveCard', 'type': 'str'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'suggestion_groups': {'key': 'suggestionGroups', 'type': '[SuggestionsSuggestionGroup]'},
}
def __init__(self, *, suggestion_groups, **kwargs) -> None:
super(Suggestions, self).__init__(**kwargs)
self.suggestion_groups = suggestion_groups
self._type = 'Suggestions'
class SuggestionsSuggestionGroup(Model):
"""SuggestionsSuggestionGroup.
All required parameters must be populated in order to send to Azure.
:param name: Required. Possible values include: 'Unknown', 'Web',
'StoreApps', 'SearchHistory', 'PersonalSearchDocuments',
'PersonalSearchTags', 'Custom'. Default value: "Unknown" .
:type name: str or
~azure.cognitiveservices.search.autosuggest.models.ScenarioType
:param search_suggestions: Required.
:type search_suggestions:
list[~azure.cognitiveservices.search.autosuggest.models.SearchAction]
:param _type: Required. Constant filled by server.
:type _type: str
"""
_validation = {
'name': {'required': True},
'search_suggestions': {'required': True},
'_type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'search_suggestions': {'key': 'searchSuggestions', 'type': '[SearchAction]'},
'_type': {'key': '_type', 'type': 'str'},
}
def __init__(self, *, search_suggestions, name="Unknown", **kwargs) -> None:
super(SuggestionsSuggestionGroup, self).__init__(**kwargs)
self.name = name
self.search_suggestions = search_suggestions
self._type = None
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Misc tools to find activations and cut on maps
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
import warnings
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
from scipy import stats, ndimage
# Local imports
from ..mask import largest_cc
from ..datasets.transforms.affine_utils import get_bounds
import scipy.stats
################################################################################
# Functions for automatic choice of cuts coordinates
################################################################################
def coord_transform(x, y, z, affine):
""" Convert the x, y, z coordinates from one image space to another
space.
Parameters
----------
x : number or ndarray
The x coordinates in the input space
y : number or ndarray
The y coordinates in the input space
z : number or ndarray
The z coordinates in the input space
affine : 2D 4x4 ndarray
affine that maps from input to output space.
Returns
-------
x : number or ndarray
The x coordinates in the output space
y : number or ndarray
The y coordinates in the output space
z : number or ndarray
The z coordinates in the output space
Warning: The x, y and z have their Talairach ordering, not 3D
numy image ordering.
"""
coords = np.c_[np.atleast_1d(x).flat,
np.atleast_1d(y).flat,
np.atleast_1d(z).flat,
np.ones_like(np.atleast_1d(z).flat)].T
x, y, z, _ = np.dot(affine, coords)
return x.squeeze(), y.squeeze(), z.squeeze()
def find_cut_coords(map, mask=None, activation_threshold=None):
""" Find the center of the largest activation connect component.
Parameters
-----------
map : 3D ndarray
The activation map, as a 3D image.
mask : 3D ndarray, boolean, optional
An optional brain mask.
activation_threshold : float, optional
The lower threshold to the positive activation. If None, the
activation threshold is computed using find_activation.
Returns
-------
x: float
the x coordinate in voxels.
y: float
the y coordinate in voxels.
z: float
the z coordinate in voxels.
"""
# To speed up computations, we work with partial views of the array,
# and keep track of the offset
offset = np.zeros(3)
# Deal with masked arrays:
if hasattr(map, 'mask'):
not_mask = np.logical_not(map.mask)
if mask is None:
mask = not_mask
else:
mask *= not_mask
map = np.asarray(map)
my_map = map.copy()
if mask is not None:
slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
my_map = my_map[slice_x, slice_y, slice_z]
mask = mask[slice_x, slice_y, slice_z]
my_map *= mask
offset += [slice_x.start, slice_y.start, slice_z.start]
# Testing min and max is faster than np.all(my_map == 0)
if (my_map.max() == 0) and (my_map.min() == 0):
return .5*np.array(map.shape)
if activation_threshold is None:
activation_threshold = stats.scoreatpercentile(
np.abs(my_map[my_map !=0]).ravel(), 80)
mask = np.abs(my_map) > activation_threshold-1.e-15
mask = largest_cc(mask)
slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0]
my_map = my_map[slice_x, slice_y, slice_z]
mask = mask[slice_x, slice_y, slice_z]
my_map *= mask
offset += [slice_x.start, slice_y.start, slice_z.start]
# For the second threshold, we use a mean, as it is much faster,
# althought it is less robust
second_threshold = np.abs(np.mean(my_map[mask]))
second_mask = (np.abs(my_map)>second_threshold)
if second_mask.sum() > 50:
my_map *= largest_cc(second_mask)
cut_coords = ndimage.center_of_mass(np.abs(my_map))
return cut_coords + offset
################################################################################
def get_mask_bounds(mask, affine):
""" Return the world-space bounds occupied by a mask given an affine.
Notes
-----
The mask should have only one connect component.
The affine should be diagonal or diagonal-permuted.
"""
(xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(mask.shape, affine)
slices = ndimage.find_objects(mask)
if len(slices) == 0:
warnings.warn("empty mask", stacklevel=2)
else:
x_slice, y_slice, z_slice = slices[0]
x_width, y_width, z_width = mask.shape
xmin, xmax = (xmin + x_slice.start*(xmax - xmin)/x_width,
xmin + x_slice.stop *(xmax - xmin)/x_width)
ymin, ymax = (ymin + y_slice.start*(ymax - ymin)/y_width,
ymin + y_slice.stop *(ymax - ymin)/y_width)
zmin, zmax = (zmin + z_slice.start*(zmax - zmin)/z_width,
zmin + z_slice.stop *(zmax - zmin)/z_width)
return xmin, xmax, ymin, ymax, zmin, zmax
def _maximally_separated_subset(x, k):
"""
Given a set of n points x = {x_1, x_2, ..., x_n} and a positive integer
k < n, this function returns a subset of k points which are maximally
spaced.
Returns
-------
msssk: 1D array of k floats
computed maximally-separated subset of k elements from x
"""
# base cases
if k < 1: raise ValueError("k = %i < 1 is senseless." % k)
if k == 1: return [x[len(x) // 2]]
# would-be maximally separated subset of k (not showing the terminal nodes)
msss = range(1, len(x) - 1)
# sorting is necessary for the heuristic to work
x = np.sort(x)
# iteratively delete points x_j of msss, for which x_(j + 1) - x_(j - 1) is
# smallest, untill only k - 2 points survive
while len(msss) + 2 > k:
# survivors
y = np.array([x[0]] + list(x[msss]) + [x[-1]])
# remove most troublesome point
msss = np.delete(msss, np.argmin(y[2:] - y[:-2]))
# return maximally separated subset of k elements
return x[[0] + list(msss) + [len(x) - 1]]
def find_maxsep_cut_coords(map3d, affine, slicer='z', n_cuts=None,
threshold=None):
"""
Heuristic function to find n_cuts along a given axis, which
are maximally separated in space.
map3d: 3D array
the data under consideration
slicer: string, optional (default "z")
sectional slicer; possible values are "x", "y", or "z"
n_cuts: int > 1, optional (default None)
number of cuts in the plot; if no value is specified, then a default
value of 5 is forced
threshold: float, optional (default None)
thresholding to be applied to the map
Returns
-------
n_cuts: 1D array of length n_cuts
the computed n_cuts
Raises
------
ValueError
"""
if n_cuts is None: n_cuts = 5
if n_cuts < 1: raise ValueError("n_cuts = %i < 1 is senseless." % n_cuts)
# sanitize slicer
if not slicer in ['x', 'y', 'z']:
raise ValueError(
"slicer must be one of 'x', 'y', and 'z', got '%s'." % slicer)
slicer = "xyz".index(slicer)
# load data
if map3d.ndim != 3:
raise TypeError(
"map3d must be 3D array, got shape %iD" % map3d.ndim)
_map3d = np.rollaxis(map3d.copy(), slicer, start=3)
_map3d = np.abs(_map3d)
_map3d[_map3d < threshold] = 0
# count activated voxels per plane
n_activated_voxels_per_plane = np.array([(_map3d[..., z] > 0).sum()
for z in xrange(_map3d.shape[-1])])
perm = np.argsort(n_activated_voxels_per_plane)
n_activated_voxels_per_plane = n_activated_voxels_per_plane[perm]
good_planes = np.nonzero(n_activated_voxels_per_plane > 0)[0]
good_planes = perm[::-1][:n_cuts * 4 if n_cuts > 1 else 1]
# cast into coord space
good_planes = np.array([
# map cut coord into native space
np.dot(affine,
np.array([0, 0, 0, 1] # origin
) + coord * np.eye(4)[slicer])[slicer]
for coord in good_planes])
# compute cut_coords maximally-separated planes
return _maximally_separated_subset(good_planes, n_cuts)
| |
"""
example human gene
https://www.wikidata.org/wiki/Q14911732
https://mygene.info/v3/gene/1017
https://www.ncbi.nlm.nih.gov/gene/1017
http://uswest.ensembl.org/Homo_sapiens/Gene/Summary?g=ENSG00000123374;r=12:55966769-55972784
example mouse gene
https://www.wikidata.org/wiki/Q21129787
example yeast gene:
https://www.wikidata.org/wiki/Q27539933
https://mygene.info/v3/gene/856615
example microbial gene:
https://www.wikidata.org/wiki/Q23097138
https://mygene.info/v3/gene/7150837
sparql query for listing current subclasses: http://tinyurl.com/y8ecgka7
"""
# TODO: Gene on two chromosomes
# https://www.wikidata.org/wiki/Q20787772
import argparse
import json
import os
import sys
import time
import traceback
from datetime import datetime
from functools import partial
from itertools import chain
from tqdm import tqdm
from scheduled_bots import get_default_core_props, PROPS
from scheduled_bots.geneprotein.Downloader import MyGeneDownloader
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers
from wikidataintegrator.ref_handlers import update_retrieved_if_new
from wikidataintegrator.wdi_fastrun import FastRunContainer
core_props = get_default_core_props()
FASTRUN_PROPS = {'Entrez Gene ID', 'strand orientation', 'Saccharomyces Genome Database ID', 'RefSeq RNA ID',
'ZFIN Gene ID', 'Ensembl Transcript ID', 'HGNC ID', 'encodes', 'genomic assembly', 'found in taxon',
'HomoloGene ID', 'MGI Gene Symbol', 'cytogenetic location', 'Mouse Genome Informatics ID',
'FlyBase Gene ID', 'genomic end', 'NCBI Locus tag', 'Rat Genome Database ID', 'Ensembl Gene ID',
'instance of', 'chromosome', 'HGNC Gene Symbol', 'Wormbase Gene ID', 'genomic start'}
DAYS = 120
update_retrieved_if_new = partial(update_retrieved_if_new, days=DAYS)
from scheduled_bots.geneprotein import HelperBot, organisms_info, type_of_gene_map, descriptions_by_type, not_worth_adding
from scheduled_bots.geneprotein.ChromosomeBot import ChromosomeBot
from scheduled_bots.geneprotein.MicrobialChromosomeBot import MicrobialChromosomeBot
from scheduled_bots.geneprotein.HelperBot import make_ref_source, parse_mygene_src_version, source_items
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
__metadata__ = {
'name': 'GeneBot',
'maintainer': 'GSS',
'tags': ['gene'],
}
# If the source is "entrez", the reference identifier to be used is "Ensembl Gene ID" (P594)
source_ref_id = {
'ensembl': "Ensembl Gene ID",
'entrez': 'Entrez Gene ID'
}
class Gene:
"""
Generic gene class. Subclasses: Human, Mammal, Microbe
"""
def __init__(self, record, organism_info, login):
"""
generate pbb_core item object
:param record: dict from mygene,tagged with @value and @source
:param organism_info: looks like {
"type": "fungal",
"name": "Saccharomyces cerevisiae S288c",
"wdid": "Q27510868",
'taxid': 559292
}
:param login:
"""
self.record = record
self.organism_info = organism_info
self.login = login
self.wd_item_gene = None
self.label = None
self.description = None
self.aliases = None
self.external_ids = dict()
self.type_of_gene = None
self.statements = None
self.entrez = None
self.entrez_ref = None
self.ensembl_ref = None
def create_description(self):
if self.type_of_gene is None:
raise ValueError("must set type_of_gene first")
self.description = descriptions_by_type[self.type_of_gene].format(self.organism_info['name'])
def create_label(self):
self.label = self.record['name']['@value']
def create_aliases(self):
if self.label is None:
self.create_label()
aliases = []
if 'symbol' in self.record:
aliases.append(self.record['symbol']['@value'])
if 'name' in self.record:
aliases.append(self.record['name']['@value'])
if 'NCBI Locus tag' in self.external_ids:
aliases.append(self.external_ids['NCBI Locus tag'])
if 'alias' in self.record:
aliases.extend(self.record['alias']['@value'])
aliases = set(aliases) - {self.label} - set(descriptions_by_type.keys()) - set(not_worth_adding)
self.aliases = list(aliases)
def set_label_desc_aliases(self, wditem):
wditem.set_label(self.label)
curr_descr = wditem.get_description()
if curr_descr == "" or "of the species" in curr_descr or "gene found in" in curr_descr.lower():
wditem.set_description(self.description)
wditem.set_aliases(self.aliases)
return wditem
def validate_record(self):
# handled by HelperBot
# allow for subclasses to add additional checks
raise NotImplementedError()
def parse_external_ids(self):
############
# required external IDs
############
entrez_gene = str(self.record['entrezgene']['@value'])
self.external_ids['Entrez Gene ID'] = entrez_gene
self.entrez = entrez_gene
taxid = self.record['taxid']['@value']
############
# optional external IDs
############
# taxid, example gene
# mouse: 10090, 102466888
# rat: 10116, 100362233
# sgd: 559292, 853415
# fly: 7227, 31303
# worm: 6239, 174065
# zfin: 7955, 368434
# ncbi locus tag
if 'locus_tag' in self.record:
self.external_ids['NCBI Locus tag'] = self.record['locus_tag']['@value']
if 'MGI' in self.record:
self.external_ids['Mouse Genome Informatics ID'] = self.record['MGI']['@value']
if 'RGD' in self.record:
self.external_ids['Rat Genome Database ID'] = self.record['RGD']['@value']
if 'SGD' in self.record:
self.external_ids['Saccharomyces Genome Database ID'] = self.record['SGD']['@value']
if 'FLYBASE' in self.record:
self.external_ids['FlyBase Gene ID'] = self.record['FLYBASE']['@value']
if 'WormBase' in self.record:
self.external_ids['Wormbase Gene ID'] = self.record['WormBase']['@value']
if 'ZFIN' in self.record:
self.external_ids['ZFIN Gene ID'] = self.record['ZFIN']['@value']
if 'HGNC' in self.record:
self.external_ids['HGNC ID'] = self.record['HGNC']['@value']
if taxid == 9606 and 'symbol' in self.record and 'HGNC' in self.record:
# see: https://github.com/stuppie/scheduled-bots/issues/2
# "and 'HGNC' in record" is required because there is something wrong with mygene
self.external_ids['HGNC Gene Symbol'] = self.record['symbol']['@value']
if taxid == 10090 and 'symbol' in self.record:
self.external_ids['MGI Gene Symbol'] = self.record['symbol']['@value']
if 'homologene' in self.record:
self.external_ids['HomoloGene ID'] = str(self.record['homologene']['@value']['id'])
if 'map_location' in self.record:
self.external_ids['cytogenetic location'] = self.record['map_location']['@value']
############
# optional external IDs (can have more than one)
############
if 'ensembl' in self.record:
ensembl_transcript = set(chain(*[x['transcript'] for x in self.record['ensembl']['@value']]))
self.external_ids['Ensembl Transcript ID'] = ensembl_transcript
ensembl_gene = [x['gene'] for x in self.record['ensembl']['@value']]
self.external_ids['Ensembl Gene ID'] = ensembl_gene
# RefSeq RNA ID
if 'refseq' in self.record and 'rna' in self.record['refseq']['@value']:
self.external_ids['RefSeq RNA ID'] = self.record['refseq']['@value']['rna']
def create_ref_sources(self):
# create an entrez ref and ensembl ref (optional)
self.entrez_ref = make_ref_source(self.record['entrezgene']['@source'], PROPS['Entrez Gene ID'],
self.external_ids['Entrez Gene ID'], login=self.login)
if 'Ensembl Gene ID' in self.external_ids:
if len(self.external_ids['Ensembl Gene ID']) != 1:
raise ValueError("more than one ensembl gene ID: {}".format(self.record['entrezgene']))
ensembl_gene_id = list(self.external_ids['Ensembl Gene ID'])[0]
self.ensembl_ref = make_ref_source(self.record['ensembl']['@source'], PROPS['Ensembl Gene ID'],
ensembl_gene_id, login=self.login)
def create_statements(self):
"""
create statements common to all genes
"""
s = []
if not self.entrez_ref:
self.create_ref_sources()
############
# ID statements (required)
############
s.append(wdi_core.WDString(self.external_ids['Entrez Gene ID'], PROPS['Entrez Gene ID'],
references=[self.entrez_ref]))
# optional ID statements
if self.ensembl_ref:
for ensembl_gene_id in self.external_ids['Ensembl Gene ID']:
s.append(wdi_core.WDString(ensembl_gene_id, PROPS['Ensembl Gene ID'], references=[self.ensembl_ref]))
if 'Ensembl Transcript ID' in self.external_ids:
for id in self.external_ids['Ensembl Transcript ID']:
s.append(wdi_core.WDString(id, PROPS['Ensembl Transcript ID'], references=[self.ensembl_ref]))
key = 'RefSeq RNA ID'
if key in self.external_ids:
for id in self.external_ids[key]:
s.append(wdi_core.WDString(id, PROPS[key], references=[self.entrez_ref]))
for key in ['NCBI Locus tag', 'Saccharomyces Genome Database ID', 'Mouse Genome Informatics ID',
'MGI Gene Symbol', 'HomoloGene ID', 'Rat Genome Database ID', 'FlyBase Gene ID',
'Wormbase Gene ID', 'ZFIN Gene ID', 'cytogenetic location']:
if key in self.external_ids:
s.append(wdi_core.WDString(self.external_ids[key], PROPS[key], references=[self.entrez_ref]))
############
# Gene statements
############
# if there is an ensembl ID, this comes from ensembl, otherwise, entrez
gene_ref = self.ensembl_ref if self.ensembl_ref is not None else self.entrez_ref
# instance of gene, ncRNA.. etc
type_of_gene = self.record['type_of_gene']['@value']
assert type_of_gene in type_of_gene_map, "unknown type of gene: {}".format(type_of_gene)
self.type_of_gene = type_of_gene
# "protein-coding gene" will be instance of "gene"
s.append(wdi_core.WDItemID(type_of_gene_map[type_of_gene], PROPS['instance of'], references=[gene_ref]))
if type_of_gene not in {'protein-coding', 'pseudo', 'other', 'unknown'}:
# make sure we add instance of "gene" as well
s.append(wdi_core.WDItemID("Q7187", PROPS['instance of'], references=[gene_ref]))
# found in taxon
s.append(wdi_core.WDItemID(self.organism_info['wdid'], PROPS['found in taxon'], references=[gene_ref]))
return s
def create_item(self, fast_run=True, write=True):
self.parse_external_ids()
self.statements = self.create_statements()
# remove subclass of gene statements
# s = wdi_core.WDItemID("Q7187", "P279")
# setattr(s, 'remove', '')
self.create_label()
self.create_description()
self.create_aliases()
self.fast_run_base_filter = {PROPS['Entrez Gene ID']: '',
PROPS['found in taxon']: self.organism_info['wdid']}
self.wd_item_gene = wdi_core.WDItemEngine(data=self.statements,
append_value=[PROPS['instance of']],
fast_run=fast_run, fast_run_base_filter=self.fast_run_base_filter,
fast_run_use_refs=True, ref_handler=update_retrieved_if_new,
global_ref_mode="CUSTOM",
core_props=core_props)
self.wd_item_gene = self.set_label_desc_aliases(self.wd_item_gene)
self.status = wdi_helpers.try_write(self.wd_item_gene, self.external_ids['Entrez Gene ID'],
PROPS['Entrez Gene ID'],
self.login, write=write)
class ChromosomalGene(Gene):
"""
yeast, mouse, rat, worm, fly, zebrafish
"""
def __init__(self, record, organism_info, chr_num_wdid, login):
"""
:param chr_num_wdid: mapping of chr number (str) to wdid
"""
super().__init__(record, organism_info, login)
self.chr_num_wdid = chr_num_wdid
def create_label(self):
self.label = self.record['symbol']['@value']
def create_statements(self):
# create generic gene statements
s = super().create_statements()
# add on gene position statements
if 'genomic_pos' in self.record:
ss = self.create_gp_statements_chr()
if ss:
s.extend(ss)
return s
def parse_external_ids(self):
super().parse_external_ids()
if 'Ensembl Gene ID' in self.external_ids:
# figure out which to use as reference
genomic_pos_values = self.record['genomic_pos']['@value']
genomic_pos_values = [x for x in genomic_pos_values if x['chr'] in self.chr_num_wdid]
if len(genomic_pos_values) == 1:
genomic_pos_value = genomic_pos_values[0]
if 'ensemblgene' in genomic_pos_value:
self.external_ids['Reference Ensembl Gene ID'] = genomic_pos_value['ensemblgene']
def create_ref_sources(self):
# create an entrez ref and ensembl ref (optional)
self.entrez_ref = make_ref_source(self.record['entrezgene']['@source'], PROPS['Entrez Gene ID'],
self.external_ids['Entrez Gene ID'], login=self.login)
if 'Reference Ensembl Gene ID' in self.external_ids:
self.ensembl_ref = make_ref_source(self.record['ensembl']['@source'], PROPS['Ensembl Gene ID'],
self.external_ids['Reference Ensembl Gene ID'], login=self.login)
elif 'Ensembl Gene ID' in self.external_ids:
if len(self.external_ids['Ensembl Gene ID']) == 1:
self.ensembl_ref = make_ref_source(self.record['ensembl']['@source'], PROPS['Ensembl Gene ID'],
list(self.external_ids['Ensembl Gene ID'])[0], login=self.login)
def create_item(self, fast_run=True, write=True):
self.parse_external_ids()
self.create_ref_sources()
return super().create_item(fast_run, write)
def create_gp_statements_chr(self):
"""
Create genomic_pos start stop orientation on a chromosome
:return:
"""
if not self.entrez_ref:
self.create_ref_sources()
genomic_pos_values = self.record['genomic_pos']['@value']
genomic_pos_source = self.record['genomic_pos']['@source']
if genomic_pos_source['id'] == "entrez":
genomic_pos_ref = self.entrez_ref
elif genomic_pos_source['id'] == "ensembl":
genomic_pos_ref = self.ensembl_ref
else:
raise ValueError()
if not genomic_pos_ref:
return None
all_chr = set([self.chr_num_wdid[x['chr']] for x in genomic_pos_values])
all_strand = set(['Q22809680' if x['strand'] == 1 else 'Q22809711' for x in genomic_pos_values])
s = []
for genomic_pos_value in genomic_pos_values:
# create qualifier for start/stop/orientation
chrom_wdid = self.chr_num_wdid[genomic_pos_value['chr']]
qualifiers = [wdi_core.WDItemID(chrom_wdid, PROPS['chromosome'], is_qualifier=True)]
# genomic start and end
s.append(wdi_core.WDString(str(int(genomic_pos_value['start'])), PROPS['genomic start'],
references=[genomic_pos_ref], qualifiers=qualifiers))
s.append(wdi_core.WDString(str(int(genomic_pos_value['end'])), PROPS['genomic end'],
references=[genomic_pos_ref], qualifiers=qualifiers))
for chr in all_chr:
s.append(wdi_core.WDItemID(chr, PROPS['chromosome'], references=[genomic_pos_ref]))
if len(all_strand) == 1:
# todo: not sure what to do if you have both orientations on the same chr
strand_orientation = list(all_strand)[0]
s.append(wdi_core.WDItemID(strand_orientation, PROPS['strand orientation'], references=[genomic_pos_ref]))
return s
class MicrobeGene(Gene):
"""
Microbes
"""
def __init__(self, record, organism_info, refseq_qid_chrom, login):
super().__init__(record, organism_info, login)
self.refseq_qid_chrom = refseq_qid_chrom
def create_label(self):
self.label = self.record['name']['@value'] + " " + self.record['locus_tag']['@value']
def create_description(self):
if self.organism_info['type']:
self.description = '{} gene found in {}'.format(self.organism_info['type'], self.organism_info['name'])
else:
self.description = 'Gene found in {}'.format(self.organism_info['name'])
def validate_record(self):
pass
def create_statements(self):
# create generic gene statements
s = super().create_statements()
# add on gene position statements
s.extend(self.create_gp_statements())
return s
def create_gp_statements(self):
"""
Create genomic_pos start stop orientation plus chromosome qualifiers
:return:
"""
genomic_pos_value = self.record['genomic_pos']['@value'][0]
genomic_pos_source = self.record['genomic_pos']['@source']
genomic_pos_id_prop = source_ref_id[genomic_pos_source['id']]
assert isinstance(self.external_ids[genomic_pos_id_prop], str)
external_id = self.external_ids[genomic_pos_id_prop]
genomic_pos_ref = make_ref_source(genomic_pos_source, PROPS[genomic_pos_id_prop], external_id, login=self.login)
s = []
# create qualifier for chromosome (which has the refseq ID on it)
chr_refseq = genomic_pos_value['chr']
chr_qid = self.refseq_qid_chrom[chr_refseq]
qualifiers = [wdi_core.WDItemID(value=chr_qid, prop_nr=PROPS['chromosome'], is_qualifier=True)]
# strand orientation
strand_orientation = 'Q22809680' if genomic_pos_value['strand'] == 1 else 'Q22809711'
s.append(wdi_core.WDItemID(strand_orientation, PROPS['strand orientation'],
references=[genomic_pos_ref], qualifiers=qualifiers))
# genomic start and end
s.append(wdi_core.WDString(str(int(genomic_pos_value['start'])), PROPS['genomic start'],
references=[genomic_pos_ref], qualifiers=qualifiers))
s.append(wdi_core.WDString(str(int(genomic_pos_value['end'])), PROPS['genomic end'],
references=[genomic_pos_ref], qualifiers=qualifiers))
return s
class HumanGene(ChromosomalGene):
def create_statements(self):
# create gene statements
s = Gene.create_statements(self)
entrez_ref = make_ref_source(self.record['entrezgene']['@source'], PROPS['Entrez Gene ID'],
self.external_ids['Entrez Gene ID'], login=self.login)
# add on human specific gene statements
for key in ['HGNC ID', 'HGNC Gene Symbol']:
if key in self.external_ids:
s.append(wdi_core.WDString(self.external_ids[key], PROPS[key], references=[entrez_ref]))
# add on gene position statements
if 'genomic_pos' in self.record:
ss = self.do_gp_human()
if ss:
s.extend(ss)
return s
def validate_record(self):
assert 'locus_tag' in self.record
assert 'HGNC' in self.record
assert 'symbol' in self.record
assert 'ensembl' in self.record and 'transcript' in self.record['ensembl']
assert 'refseq' in self.record and 'rna' in self.record['ensembl']
assert 'alias' in self.record
def do_gp_human(self):
"""
create genomic pos, chr, strand statements for human
includes genomic assembly
genes that are on an unlocalized scaffold will have no genomic position statements
example: https://mygene.info/v3/gene/102724770
https://www.wikidata.org/wiki/Q20970159
:return:
"""
if not self.entrez_ref:
self.create_ref_sources()
genomic_pos_values = self.record['genomic_pos']['@value']
genomic_pos_source = self.record['genomic_pos']['@source']
genomic_pos_id_prop = source_ref_id[genomic_pos_source['id']]
if genomic_pos_source['id'] == "entrez":
genomic_pos_ref = self.entrez_ref
elif genomic_pos_source['id'] == "ensembl":
genomic_pos_ref = self.ensembl_ref
else:
raise ValueError()
if not genomic_pos_ref:
return None
assembly_hg38 = wdi_core.WDItemID("Q20966585", PROPS['genomic assembly'], is_qualifier=True)
for x in genomic_pos_values:
x['assembly'] = 'hg38'
do_hg19 = False
if 'genomic_pos_hg19' in self.record:
do_hg19 = True
genomic_pos_value_hg19 = self.record['genomic_pos_hg19']['@value']
genomic_pos_source_hg19 = self.record['genomic_pos_hg19']['@source']
genomic_pos_id_prop_hg19 = source_ref_id[genomic_pos_source_hg19['id']]
assembly_hg19 = wdi_core.WDItemID("Q21067546", PROPS['genomic assembly'], is_qualifier=True)
# combine all together
for x in genomic_pos_value_hg19:
x['assembly'] = 'hg19'
genomic_pos_values.extend(genomic_pos_value_hg19)
# remove those where we don't know the chromosome
genomic_pos_values = [x for x in genomic_pos_values if x['chr'] in self.chr_num_wdid]
# print(len(genomic_pos_values))
all_chr = set([self.chr_num_wdid[x['chr']] for x in genomic_pos_values])
all_strand = set(['Q22809680' if x['strand'] == 1 else 'Q22809711' for x in genomic_pos_values])
s = []
for genomic_pos_value in genomic_pos_values:
# create qualifiers (chromosome and assembly)
chrom_wdid = self.chr_num_wdid[genomic_pos_value['chr']]
qualifiers = [wdi_core.WDItemID(chrom_wdid, PROPS['chromosome'], is_qualifier=True)]
if genomic_pos_value['assembly'] == 'hg38':
qualifiers.append(assembly_hg38)
ref = genomic_pos_ref
elif genomic_pos_value['assembly'] == 'hg19':
qualifiers.append(assembly_hg19)
ref = genomic_pos_ref
# genomic start and end
s.append(wdi_core.WDString(str(int(genomic_pos_value['start'])), PROPS['genomic start'],
references=[ref], qualifiers=qualifiers))
s.append(wdi_core.WDString(str(int(genomic_pos_value['end'])), PROPS['genomic end'],
references=[ref], qualifiers=qualifiers))
# strand orientations
# if the same for all, only put one statement
if len(all_strand) == 1 and do_hg19:
strand_orientation = list(all_strand)[0]
s.append(wdi_core.WDItemID(strand_orientation, PROPS['strand orientation'],
references=[genomic_pos_ref], qualifiers=[assembly_hg38, assembly_hg19]))
elif len(all_strand) == 1 and not do_hg19:
strand_orientation = list(all_strand)[0]
s.append(wdi_core.WDItemID(strand_orientation, PROPS['strand orientation'],
references=[genomic_pos_ref], qualifiers=[assembly_hg38]))
# chromosome
# if the same for all, only put one statement
if do_hg19 and len(all_chr) == 1:
chrom_wdid = list(all_chr)[0]
s.append(wdi_core.WDItemID(chrom_wdid, PROPS['chromosome'],
references=[genomic_pos_ref], qualifiers=[assembly_hg38, assembly_hg19]))
elif len(all_chr) == 1 and not do_hg19:
chrom_wdid = list(all_chr)[0]
s.append(wdi_core.WDItemID(chrom_wdid, PROPS['chromosome'],
references=[genomic_pos_ref], qualifiers=[assembly_hg38]))
# print(s)
return s
class GeneBot:
"""
Generic genebot class
"""
GENE_CLASS = Gene
item = None
failed = [] # list of entrez ids for those that failed
def __init__(self, organism_info, login):
self.login = login
self.organism_info = organism_info
def run(self, records, total=None, fast_run=True, write=True):
# this shouldn't ever actually get used now
raise ValueError()
records = self.filter(records)
for record in tqdm(records, mininterval=2, total=total):
gene = self.GENE_CLASS(record, self.organism_info, self.login)
try:
gene.create_item(fast_run=fast_run, write=write)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
msg = wdi_helpers.format_msg(gene.external_ids['Entrez Gene ID'], PROPS['Entrez Gene ID'], None,
str(e), msg_type=type(e))
wdi_core.WDItemEngine.log("ERROR", msg)
gene.status = msg
if gene.status is not True:
self.failed.append(gene.entrez)
def filter(self, records):
"""
This is used to selectively skip certain records based on conditions within the record or to specifically
alter certain fields before sending to the Bot
"""
# If we are processing zebrafish records, skip the record if it doesn't have a zfin ID
for record in records:
if record['taxid']['@value'] == 7955 and 'ZFIN' not in record:
continue
else:
yield record
def cleanup(self, releases, last_updated):
"""
:param releases:
:param last_updated:
:param failed: list of entrez ids to skip
:return:
"""
print(self.failed)
entrez_qid = wdi_helpers.id_mapper('P351', ((PROPS['found in taxon'], self.organism_info['wdid']),))
print(len(entrez_qid))
entrez_qid = {entrez: qid for entrez, qid in entrez_qid.items() if entrez not in self.failed}
print(len(entrez_qid))
filter = {PROPS['Entrez Gene ID']: '', PROPS['found in taxon']: self.organism_info['wdid']}
frc = FastRunContainer(wdi_core.WDBaseDataType, wdi_core.WDItemEngine, base_filter=filter, use_refs=True)
frc.clear()
props = [PROPS[x] for x in FASTRUN_PROPS]
for qid in tqdm(entrez_qid.values()):
remove_deprecated_statements(qid, frc, releases, last_updated, props, self.login)
class ChromosomalGeneBot(GeneBot):
GENE_CLASS = ChromosomalGene
def __init__(self, organism_info, chr_num_wdid, login):
super().__init__(organism_info, login)
self.chr_num_wdid = chr_num_wdid
def run(self, records, total=None, fast_run=True, write=True):
records = self.filter(records)
for record in tqdm(records, mininterval=2, total=total):
# print(record['entrezgene'])
gene = self.GENE_CLASS(record, self.organism_info, self.chr_num_wdid, self.login)
try:
gene.create_item(fast_run=fast_run, write=write)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
msg = wdi_helpers.format_msg(gene.external_ids['Entrez Gene ID'], PROPS['Entrez Gene ID'], None,
str(e), msg_type=type(e))
wdi_core.WDItemEngine.log("ERROR", msg)
gene.status = msg
if gene.status is not True:
self.failed.append(gene.entrez)
class HumanGeneBot(ChromosomalGeneBot):
GENE_CLASS = HumanGene
class MicrobeGeneBot(ChromosomalGeneBot):
GENE_CLASS = MicrobeGene
def remove_deprecated_statements(qid, frc, releases, last_updated, props, login):
"""
:param qid: qid of item
:param frc: a fastrun container
:param releases: list of releases to remove (a statement that has a reference that is stated in one of these
releases will be removed)
:param last_updated: looks like {'Q20641742': datetime.date(2017,5,6)}. a statement that has a reference that is
stated in Q20641742 (entrez) and was retrieved more than DAYS before 2017-5-6 will be removed
:param props: look at these props
:param login:
:return:
"""
for prop in props:
frc.write_required([wdi_core.WDString("fake value", prop)])
orig_statements = frc.reconstruct_statements(qid)
releases = set(int(r[1:]) for r in releases)
s_dep = []
for s in orig_statements:
if any(any(x.get_prop_nr() == 'P248' and x.get_value() in releases for x in r) for r in s.get_references()):
setattr(s, 'remove', '')
s_dep.append(s)
else:
for r in s.get_references():
dbs = [x.get_value() for x in r if x.get_value() in last_updated]
if dbs:
db = dbs[0]
if any(x.get_prop_nr() == 'P813' and last_updated[db] - x.get_value() > DAYS for x in r):
setattr(s, 'remove', '')
s_dep.append(s)
if s_dep:
print("-----")
print(qid)
print(len(s_dep))
print([(x.get_prop_nr(), x.value) for x in s_dep])
print([(x.get_references()[0]) for x in s_dep])
wd_item = wdi_core.WDItemEngine(wd_item_id=qid, data=s_dep, fast_run=False)
wdi_helpers.try_write(wd_item, '', '', login, edit_summary="remove deprecated statements")
def main(taxid, metadata, log_dir="./logs", run_id=None, fast_run=True, write=True, entrez=None):
"""
Main function for creating/updating genes
:param taxid: taxon to use (ncbi tax id)
:type taxid: str
:param metadata: looks like: {"ensembl" : 84, "cpdb" : 31, "netaffy" : "na35", "ucsc" : "20160620", .. }
:type metadata: dict
:param log_dir: dir to store logs
:type log_dir: str
:param fast_run: use fast run mode
:type fast_run: bool
:param write: actually perform write
:type write: bool
:param entrez: Only run this one gene
:type entrez: int
:return: None
"""
# make sure the organism is found in wikidata
taxid = int(taxid)
organism_wdid = wdi_helpers.prop2qid("P685", str(taxid))
if not organism_wdid:
print("organism {} not found in wikidata".format(taxid))
return None
# login
login = wdi_login.WDLogin(user=WDUSER, pwd=WDPASS)
if wdi_core.WDItemEngine.logger is not None:
wdi_core.WDItemEngine.logger.handles = []
wdi_core.WDItemEngine.logger.handlers = []
run_id = run_id if run_id is not None else datetime.now().strftime('%Y%m%d_%H:%M')
log_name = '{}-{}.log'.format(__metadata__['name'], run_id)
__metadata__['taxid'] = taxid
wdi_core.WDItemEngine.setup_logging(log_dir=log_dir, logger_name='WD_logger', log_name=log_name,
header=json.dumps(__metadata__))
# get organism metadata (name, organism type, wdid)
# TODO: this can be pulled from wd
if taxid in organisms_info and organisms_info[taxid]['type'] != "microbial":
validate_type = 'eukaryotic'
organism_info = organisms_info[taxid]
# make sure all chromosome items are found in wikidata
cb = ChromosomeBot()
chr_num_wdid = cb.get_or_create(organism_info, login=login)
chr_num_wdid = {k.upper(): v for k, v in chr_num_wdid.items()}
if int(organism_info['taxid']) == 9606:
bot = HumanGeneBot(organism_info, chr_num_wdid, login)
else:
bot = ChromosomalGeneBot(organism_info, chr_num_wdid, login)
else:
# check if its one of the reference microbial genomes
# raises valueerror if not...
organism_info = mcb.get_organism_info(taxid)
refseq_qid_chrom = mcb.get_or_create_chromosomes(taxid, login)
print(organism_info)
bot = MicrobeGeneBot(organism_info, refseq_qid_chrom, login)
validate_type = "microbial"
# Get handle to mygene records
mgd = MyGeneDownloader()
if entrez:
doc, total = mgd.get_mg_gene(entrez)
docs = iter([doc])
else:
doc_filter = lambda x: (x.get("type_of_gene") != "biological-region") and ("entrezgene" in x)
docs, total = mgd.get_mg_cursor(taxid, doc_filter)
print("total number of records: {}".format(total))
# the scroll_id/cursor times out from mygene if we iterate. So.... get the whole thing now
docs = list(docs)
docs = HelperBot.validate_docs(docs, validate_type, PROPS['Entrez Gene ID'])
records = HelperBot.tag_mygene_docs(docs, metadata)
bot.run(records, total=total, fast_run=fast_run, write=write)
for frc in wdi_core.WDItemEngine.fast_run_store:
frc.clear()
print("done updating, waiting 10 min")
time.sleep(10 * 60)
releases = dict()
releases_to_remove = set()
last_updated = dict()
metadata = {k: v for k, v in metadata.items() if k in {'uniprot', 'ensembl', 'entrez'}}
for k, v in parse_mygene_src_version(metadata).items():
if "release" in v:
if k not in releases:
releases[k] = wdi_helpers.id_mapper('P393', (('P629', source_items[k]),))
to_remove = set(releases[k].values())
to_remove.discard(releases[k][v['release']])
releases_to_remove.update(to_remove)
print(
"{}: Removing releases: {}, keeping release: {}".format(k, ", ".join(set(releases[k]) - {v['release']}),
v['release']))
else:
last_updated[source_items[k]] = datetime.strptime(v["timestamp"], "%Y%m%d")
print(last_updated)
bot.cleanup(releases_to_remove, last_updated)
if __name__ == "__main__":
"""
Data to be used is retrieved from mygene.info
"""
parser = argparse.ArgumentParser(description='run wikidata gene bot')
parser.add_argument('--log-dir', help='directory to store logs', type=str)
parser.add_argument('--dummy', help='do not actually do write', action='store_true')
parser.add_argument('--taxon',
help="only run using this taxon (ncbi tax id). or 'microbe' for all microbes. comma separated",
type=str, required=True)
parser.add_argument('--fastrun', dest='fastrun', action='store_true')
parser.add_argument('--no-fastrun', dest='fastrun', action='store_false')
parser.add_argument('--entrez', help="Run only this one gene")
parser.set_defaults(fastrun=True)
args = parser.parse_args()
log_dir = args.log_dir if args.log_dir else "./logs"
run_id = datetime.now().strftime('%Y%m%d_%H:%M')
__metadata__['run_id'] = run_id
taxon = args.taxon
fast_run = args.fastrun
mcb = MicrobialChromosomeBot()
# get metadata about sources
mgd = MyGeneDownloader()
metadata = dict()
src = mgd.get_metadata()['src']
for source in src.keys():
metadata[source] = src[source]["version"]
if args.entrez:
main(taxon, metadata, run_id=run_id, log_dir=log_dir, fast_run=fast_run,
write=not args.dummy, entrez=args.entrez)
sys.exit(0)
if "microbe" in taxon:
microbe_taxa = mcb.get_all_taxids()
taxon = taxon.replace("microbe", ','.join(map(str, microbe_taxa)))
for taxon1 in taxon.split(","):
try:
main(taxon1, metadata, run_id=run_id, log_dir=log_dir, fast_run=fast_run, write=not args.dummy)
except Exception as e:
# if one taxon fails, still try to run the others
traceback.print_exc()
# done with this run, clear fast run container to save on RAM
wdi_core.WDItemEngine.fast_run_store = []
wdi_core.WDItemEngine.fast_run_container = None
| |
# Copyright 2015 Brocade Communications System, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import codecs
import json
import mock
import os
import unittest
import yaml
from tacker import context
from tacker.extensions import vnfm
from tacker.tests.unit import base
from tacker.tests.unit.db import utils
from tacker.vnfm.infra_drivers.openstack import openstack
class FakeHeatClient(mock.Mock):
class Stack(mock.Mock):
stack_status = 'CREATE_COMPLETE'
outputs = [{u'output_value': u'192.168.120.31', u'description':
u'management ip address', u'output_key': u'mgmt_ip-vdu1'}]
def create(self, *args, **kwargs):
return {'stack': {'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}}
def get(self, id):
return self.Stack()
def _get_template(name):
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data/", name)
f = codecs.open(filename, encoding='utf-8', errors='strict')
return f.read()
class TestOpenStack(base.TestCase):
hot_template = _get_template('hot_openwrt.yaml')
hot_param_template = _get_template('hot_openwrt_params.yaml')
hot_ipparam_template = _get_template('hot_openwrt_ipparams.yaml')
tosca_vnfd_openwrt = _get_template('test_tosca_openwrt.yaml')
config_data = _get_template('config_data.yaml')
def setUp(self):
super(TestOpenStack, self).setUp()
self.context = context.get_admin_context()
self.infra_driver = openstack.OpenStack()
self._mock_heat_client()
self.addCleanup(mock.patch.stopall)
def _mock_heat_client(self):
self.heat_client = mock.Mock(wraps=FakeHeatClient())
fake_heat_client = mock.Mock()
fake_heat_client.return_value = self.heat_client
self._mock(
'tacker.vnfm.infra_drivers.openstack.heat_client.HeatClient',
fake_heat_client)
def _mock(self, target, new=mock.DEFAULT):
patcher = mock.patch(target, new)
return patcher.start()
def _get_vnfd(self, template):
return {'vnfd': {'attributes': {'vnfd': template}}}
def _get_expected_vnfd(self, template):
return {'attributes': {'vnfd': template},
'description': 'OpenWRT with services',
'mgmt_driver': 'openwrt', 'name': 'OpenWRT',
'service_types': [{'service_type': 'vnfd',
'id': '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
'tenant_id': 'ad7ebc56538745a08ef7c5e97f8bd437',
'id': 'fb048660-dc1b-4f0f-bd89-b023666650ec'}
def _get_expected_fields(self):
return {'stack_name':
'tacker.vnfm.infra_drivers.openstack.openstack_OpenStack'
'-eb84260e-5ff7-4332-b032-50a14d6c1123', 'template':
self.hot_template}
def _get_expected_fields_user_data(self):
return {'stack_name':
'tacker.vnfm.infra_drivers.openstack.openstack_OpenStack'
'-18685f68-2b2a-4185-8566-74f54e548811',
'template': self.hot_param_template}
def _get_expected_fields_ipaddr_data(self):
return {'stack_name':
'tacker.vnfm.infra_drivers.openstack.openstack_OpenStack'
'-d1337add-d5a1-4fd4-9447-bb9243c8460b',
'template': self.hot_ipparam_template}
def _get_expected_vnf_wait_obj(self, param_values=''):
return {'status': 'PENDING_CREATE',
'instance_id': None,
'name': u'test_openwrt',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{
'service_type': u'vnfd',
'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
'description': u'OpenWRT with services',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'mgmt_driver': u'openwrt',
'attributes': {u'vnfd': self.tosca_vnfd_openwrt},
'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec',
'name': u'OpenWRT'},
'mgmt_url': '{"vdu1": "192.168.120.31"}',
'service_context': [],
'attributes': {u'param_values': param_values},
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
'description': u'OpenWRT with services'}
def _get_expected_vnf_update_obj(self):
return {'status': 'PENDING_CREATE', 'instance_id': None, 'name':
u'test_openwrt', 'tenant_id':
u'ad7ebc56538745a08ef7c5e97f8bd437', 'vnfd_id':
u'eb094833-995e-49f0-a047-dfb56aaf7c4e', 'vnfd': {
'service_types': [{'service_type': u'vnfd', 'id':
u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}], 'description':
u'OpenWRT with services', 'tenant_id':
u'ad7ebc56538745a08ef7c5e97f8bd437', 'mgmt_driver': u'openwrt',
'attributes': {u'vnfd': self.tosca_vnfd_openwrt},
'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec', 'name':
u'openwrt_services'}, 'mgmt_url': None, 'service_context': [],
'attributes': {'config': utils.update_config_data},
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123', 'description':
u'OpenWRT with services'}
def _get_expected_active_vnf(self):
return {'status': 'ACTIVE',
'instance_id': None,
'name': u'test_openwrt',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'vnfd': {
'service_types': [{
'service_type': u'vnfd',
'id': u'4a4c2d44-8a52-4895-9a75-9d1c76c3e738'}],
'description': u'OpenWRT with services',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437',
'mgmt_driver': u'openwrt',
'infra_driver': u'heat',
'attributes': {u'vnfd': self.tosca_vnfd_openwrt},
'id': u'fb048660-dc1b-4f0f-bd89-b023666650ec',
'name': u'openwrt_services'},
'mgmt_url': '{"vdu1": "192.168.120.31"}',
'service_context': [],
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
'description': u'OpenWRT with services'}
def test_delete(self):
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.infra_driver.delete(plugin=None, context=self.context,
vnf_id=vnf_id,
auth_attr=utils.get_vim_auth_obj())
self.heat_client.delete.assert_called_once_with(vnf_id)
def test_update(self):
vnf_obj = utils.get_dummy_vnf_config_attr()
vnf_config_obj = utils.get_dummy_vnf_update_config()
expected_vnf_update = self._get_expected_vnf_update_obj()
vnf_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
self.infra_driver.update(plugin=None, context=self.context,
vnf_id=vnf_id, vnf_dict=vnf_obj,
vnf=vnf_config_obj,
auth_attr=utils.get_vim_auth_obj())
expected_vnf_update['attributes']['config'] = yaml.safe_load(
expected_vnf_update['attributes']['config'])
vnf_obj['attributes']['config'] = yaml.safe_load(vnf_obj['attributes'][
'config'])
self.assertEqual(expected_vnf_update, vnf_obj)
def _get_expected_fields_tosca(self, template):
return {'stack_name':
'tacker.vnfm.infra_drivers.openstack.openstack_OpenStack'
'-eb84260e'
'-5ff7-4332-b032-50a14d6c1123',
'template': _get_template(template)}
def _get_expected_tosca_vnf(self,
tosca_tpl_name,
hot_tpl_name,
param_values='',
is_monitor=True,
multi_vdus=False):
tosca_tpl = _get_template(tosca_tpl_name)
exp_tmpl = self._get_expected_vnfd(tosca_tpl)
tosca_hw_dict = yaml.safe_load(_get_template(hot_tpl_name))
dvc = {
'vnfd': exp_tmpl,
'description': u'OpenWRT with services',
'attributes': {
'heat_template': tosca_hw_dict,
'param_values': param_values
},
'id': 'eb84260e-5ff7-4332-b032-50a14d6c1123',
'instance_id': None,
'mgmt_url': None,
'name': u'test_openwrt',
'service_context': [],
'status': 'PENDING_CREATE',
'vnfd_id': u'eb094833-995e-49f0-a047-dfb56aaf7c4e',
'tenant_id': u'ad7ebc56538745a08ef7c5e97f8bd437'
}
# Add monitoring attributes for those yaml, which are having it
if is_monitor:
if multi_vdus:
dvc['attributes'].update(
{'monitoring_policy': '{"vdus": {"VDU1": {"ping": '
'{"name": "ping", "actions": '
'{"failure": "respawn"}, '
'"parameters": {"count": 3, '
'"interval": 10}, '
'"monitoring_params": '
'{"count": 3, "interval": 10}}}, '
'"VDU2": {"ping": {"name": "ping", '
'"actions": {"failure": "respawn"}, '
'"parameters": {"count": 3, '
'"interval": 10}, '
'"monitoring_params": {"count": 3, '
'"interval": 10}}}}}'})
else:
dvc['attributes'].update(
{'monitoring_policy': '{"vdus": {"VDU1": {"ping": '
'{"name": "ping", "actions": '
'{"failure": "respawn"}, '
'"parameters": {"count": 3, '
'"interval": 10}, '
'"monitoring_params": '
'{"count": 3, '
'"interval": 10}}}}}'})
return dvc
def _get_dummy_tosca_vnf(self, template, input_params=''):
tosca_template = _get_template(template)
vnf = utils.get_dummy_device_obj()
dtemplate = self._get_expected_vnfd(tosca_template)
vnf['vnfd'] = dtemplate
vnf['attributes'] = {}
vnf['attributes']['param_values'] = input_params
return vnf
def _test_assert_equal_for_tosca_templates(self,
tosca_tpl_name,
hot_tpl_name,
input_params='',
files=None,
is_monitor=True,
multi_vdus=False):
vnf = self._get_dummy_tosca_vnf(tosca_tpl_name, input_params)
expected_result = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738'
expected_fields = self._get_expected_fields_tosca(hot_tpl_name)
expected_vnf = self._get_expected_tosca_vnf(tosca_tpl_name,
hot_tpl_name,
input_params,
is_monitor,
multi_vdus)
result = self.infra_driver.create(plugin=None, context=self.context,
vnf=vnf,
auth_attr=utils.get_vim_auth_obj())
actual_fields = self.heat_client.create.call_args[0][0]
actual_fields["template"] = yaml.safe_load(actual_fields["template"])
expected_fields["template"] = \
yaml.safe_load(expected_fields["template"])
if files:
for k, v in actual_fields["files"].items():
actual_fields["files"][k] = yaml.safe_load(v)
expected_fields["files"] = {}
for k, v in files.items():
expected_fields["files"][k] = yaml.safe_load(_get_template(v))
self.assertEqual(expected_fields, actual_fields)
vnf["attributes"]["heat_template"] = yaml.safe_load(
vnf["attributes"]["heat_template"])
self.heat_client.create.assert_called_once_with(expected_fields)
self.assertEqual(expected_result, result)
if files:
expected_fields["files"] = {}
for k, v in files.items():
expected_vnf["attributes"][k] = yaml.safe_load(
_get_template(v))
vnf["attributes"][k] = yaml.safe_load(
vnf["attributes"][k])
expected_vnf["attributes"]['scaling_group_names'] = {
'SP1': 'G1'}
vnf["attributes"]['scaling_group_names'] = json.loads(
vnf["attributes"]['scaling_group_names']
)
self.assertEqual(expected_vnf, vnf)
def test_create_tosca(self):
# self.skipTest("Not ready yet")
self._test_assert_equal_for_tosca_templates('test_tosca_openwrt.yaml',
'hot_tosca_openwrt.yaml')
def test_create_tosca_with_userdata(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_openwrt_userdata.yaml',
'hot_tosca_openwrt_userdata.yaml')
def test_create_tosca_with_new_flavor(self):
self._test_assert_equal_for_tosca_templates('test_tosca_flavor.yaml',
'hot_flavor.yaml')
def test_create_tosca_with_new_flavor_with_defaults(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_flavor_defaults.yaml',
'hot_flavor_defaults.yaml')
def test_create_tosca_with_flavor_and_capabilities(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_flavor_and_capabilities.yaml',
'hot_flavor_and_capabilities.yaml')
def test_create_tosca_with_flavor_no_units(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_flavor_no_units.yaml',
'hot_flavor_no_units.yaml')
def test_create_tosca_with_flavor_extra_specs_all_numa_count(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_all_numa_count.yaml',
'hot_tosca_flavor_all_numa_count.yaml')
def test_create_tosca_with_flavor_extra_specs_all_numa_nodes(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_all_numa_nodes.yaml',
'hot_tosca_flavor_all_numa_nodes.yaml')
def test_create_tosca_with_flavor_extra_specs_numa_node_count_trumps(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_numa_nodes_count.yaml',
'hot_tosca_flavor_numa_nodes_count.yaml')
def test_create_tosca_with_flavor_extra_specs_huge_pages(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_huge_pages.yaml',
'hot_tosca_flavor_huge_pages.yaml')
def test_create_tosca_with_flavor_extra_specs_cpu_allocations(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_cpu_allocations.yaml',
'hot_tosca_flavor_cpu_allocations.yaml')
def test_create_tosca_with_flavor_extra_specs_numa_nodes(self):
self._test_assert_equal_for_tosca_templates(
'tosca_flavor_numa_nodes.yaml',
'hot_tosca_flavor_numa_nodes.yaml')
def test_create_tosca_with_new_image(self):
self._test_assert_equal_for_tosca_templates('test_tosca_image.yaml',
'hot_tosca_image.yaml')
def test_create_tosca_sriov(self):
self._test_assert_equal_for_tosca_templates(
'tosca_sriov.yaml',
'hot_tosca_sriov.yaml'
)
def test_create_tosca_vnic_normal(self):
self._test_assert_equal_for_tosca_templates(
'tosca_vnic_port.yaml',
'hot_tosca_vnic_normal.yaml'
)
def test_create_tosca_mgmt_sriov_port(self):
self._test_assert_equal_for_tosca_templates(
'tosca_mgmt_sriov.yaml',
'hot_tosca_mgmt_sriov.yaml'
)
def test_tosca_params(self):
input_params = 'image: cirros\nflavor: m1.large'
self._test_assert_equal_for_tosca_templates(
'tosca_generic_vnfd_params.yaml',
'hot_tosca_generic_vnfd_params.yaml',
input_params
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_scale(self):
self._test_assert_equal_for_tosca_templates(
'tosca_scale.yaml',
'hot_scale_main.yaml',
files={'scaling.yaml': 'hot_scale_custom.yaml'},
is_monitor=False
)
def test_get_resource_info(self):
vnf_obj = self._get_expected_active_vnf()
self.assertRaises(vnfm.InfraDriverUnreachable,
self.infra_driver.get_resource_info,
plugin=None, context=self.context, vnf_info=vnf_obj,
auth_attr=utils.get_vim_auth_obj(),
region_name=None)
def test_create_port_with_security_groups(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_security_groups.yaml',
'hot_tosca_security_groups.yaml'
)
def test_create_port_with_allowed_address_pairs(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_allowed_address_pairs.yaml',
'hot_tosca_allowed_address_pairs.yaml'
)
def test_create_port_with_mac_and_ip(self):
self._test_assert_equal_for_tosca_templates(
'test_tosca_mac_ip.yaml',
'hot_tosca_mac_ip.yaml'
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_alarm_respawn(self):
self._test_assert_equal_for_tosca_templates(
'tosca_alarm_respawn.yaml',
'hot_tosca_alarm_respawn.yaml',
is_monitor=False
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_alarm_scale(self):
self._test_assert_equal_for_tosca_templates(
'tosca_alarm_scale.yaml',
'hot_tosca_alarm_scale.yaml',
files={'scaling.yaml': 'hot_alarm_scale_custom.yaml'},
is_monitor=False
)
@unittest.skip("Related Bug 1682098")
def test_create_tosca_with_alarm_monitoring_not_matched(self):
self.assertRaises(vnfm.MetadataNotMatched,
self._test_assert_equal_for_tosca_templates,
'tosca_alarm_metadata.yaml',
'hot_tosca_alarm_metadata.yaml',
is_monitor=False
)
def test_create_tosca_monitoring_multi_vdus(self):
self._test_assert_equal_for_tosca_templates(
'tosca_monitoring_multi_vdu.yaml',
'hot_tosca_monitoring_multi_vdu.yaml',
multi_vdus=True
)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Model defination for the BERT Language Model."""
import tensorflow as tf
from vatt.modeling.common import transformers
def get_shape(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
class BertEncoder(tf.keras.layers.Layer):
"""The standart Transformer Encoder for text modality."""
def __init__(self,
# transformer parameters
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_heads=8,
pre_norm=False,
use_bias=True,
activation="gelu",
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
# masking parameters
use_masking=False,
mask_rate=0.2,
# positional embedding parameters
max_temporal_buckets=16,
name="bert",
**kwargs):
super(BertEncoder, self).__init__(name=name)
self.d_model = d_model
# masking parameters
self.use_masking = use_masking
self.mask_rate = mask_rate
self.pos_embedding_lookup = transformers.TemporalEmbeddings(
hidden_size=self.d_model,
max_temporal_buckets=max_temporal_buckets,
)
# define transformer head
self.tx = transformers.TransformerEncoder(
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_heads=num_heads,
pre_norm=pre_norm,
use_bias=use_bias,
activation=activation,
dropout_rate=dropout_rate,
layer_norm_epsilon=layer_norm_epsilon,
name="transformer",
)
def build(self, input_shapes):
token_embds_kwargs = {
"shape": (self.d_model,),
"initializer": tf.keras.initializers.get("glorot_normal"),
"trainable": True,
"dtype": tf.float32,
}
if self.use_masking:
# define mask_token_embd as a learnable vector
self.mask_token_embd = self.add_weight(
name="mask_embedding",
**token_embds_kwargs,
)
# add special token Aggregator
self.agg_token_embd = self.add_weight(
name="agg_embedding",
**token_embds_kwargs,
)
def random_embd_mask(self, input_embds, input_attn_mask=None):
"""Replacing input tokens with mask_embds, random_embds or nothing.
Args:
input_embds: input sequence of token embeddings
input_attn_mask: padding/attention mask for input sequence
Returns:
input_embds: given input (unchanged - for loss purposes)
input_attn_mask: given padding/attention mask (unchanged)
masked_input_embds: masked inputs according to both padding/attention mask
and randomly generated token masks
mask_pos: a sequence with same shape as input, containing 0/1 in
locations where input tokens have been manipulated (1) or unchanged (0)
"""
batch_size, seq_len, embd_dim = get_shape(input_embds)
if input_attn_mask is None:
input_attn_mask = tf.ones((batch_size, 1), dtype=tf.int32)
# initialize placers for random ids
mask_ids = tf.zeros((batch_size * seq_len,), dtype=tf.int32)
random_ids = tf.zeros((batch_size * seq_len,), dtype=tf.int32)
no_touch_ids = tf.zeros((batch_size * seq_len,), dtype=tf.int32)
# control where to mask
randomness = tf.random.uniform((batch_size * seq_len, 3))
# a random set of token embeddings to be used as 10% of masked token embds
embds_flattened = tf.stop_gradient(
tf.reshape(input_embds, [-1, embd_dim])
)
shuffled_token_embds = tf.gather(
embds_flattened,
tf.random.shuffle(tf.range(tf.shape(embds_flattened)[0]))
)
shuffled_token_embds = tf.reshape(
shuffled_token_embds,
[batch_size, seq_len, embd_dim]
)
# fill in the placers where to mask
for n in range(batch_size*seq_len):
if randomness[n, 0] <= self.mask_rate:
# do masking
where_to_mask = tf.sparse.SparseTensor(
indices=[[n]], values=[1], dense_shape=(batch_size * seq_len,))
if randomness[n, 1] <= 0.8:
# 80% mask
mask_ids += tf.sparse.to_dense(where_to_mask)
elif randomness[n, 2] <= 0.5:
# 10% replace with random token from random set of tokens
random_ids += tf.sparse.to_dense(where_to_mask)
else:
# 10% do nothing, but keep track of it
no_touch_ids += tf.sparse.to_dense(where_to_mask)
# get the masks tensor containing 0/1s indicating where to replace with
# self.mask_token_embd, a learnable vector
masks = tf.reshape(tf.stack(mask_ids), [batch_size, seq_len])
# get the masks tensor containing 0/1s indicating where to replace with
# a randomly chosen token from the current sequence (across all batches)
randoms = tf.reshape(tf.stack(random_ids), [batch_size, seq_len])
# find where the token was unchanged but it was flagged as mask
no_touches = tf.reshape(tf.stack(no_touch_ids), [batch_size, seq_len])
# apply the attention/padding mask to all the masks
masks = tf.cast(masks * input_attn_mask, tf.float32)
randoms = tf.cast(randoms * input_attn_mask, tf.float32)
no_touches = tf.cast(no_touches * input_attn_mask, tf.float32)
# replace the location of resulting masks with the mask values
# (mask_token_embd / shuffled_token_embds)
masked_input_embds = (
input_embds * (1-masks-randoms)[:, :, None] +
self.mask_token_embd[None, None, :] * masks[:, :, None] +
shuffled_token_embds * randoms[:, :, None]
)
# add random shuffle and untouched locations to the mask locations
mask_pos = masks + randoms + no_touches
return masked_input_embds, mask_pos
def _random_patch_selection(self,
inputs,
training,
input_shape):
if training:
# get inputs dimensions
batch_size, seq_len, dim = get_shape(inputs)
# shuffle on temporal axis and gather the first max_num_patches
temporal_idx = tf.range(seq_len)
temporal_idx = tf.random.shuffle(temporal_idx)[None, :]
temporal_idx = tf.tile(temporal_idx, [batch_size, 1])
batch_idx = tf.range(batch_size)[:, None]
batch_idx = tf.tile(batch_idx, [1, seq_len])
gather_idx = tf.stack([batch_idx, temporal_idx], axis=2)
inputs = tf.gather_nd(inputs, gather_idx)[:, :self.max_num_patches, :]
input_shape = [batch_size, self.max_num_patches, dim]
return inputs, input_shape
def _flatten_inputs(self,
inputs):
input_shape = get_shape(inputs)
bs = input_shape[0]
d_embd = input_shape[-1]
inputs = tf.reshape(inputs, [bs, -1, d_embd])
return inputs, input_shape
def _append_special_token(self, embeddings, attention_mask):
batch_size = get_shape(embeddings)[0]
agg_embeddings = tf.tile(self.agg_token_embd[None, None, :],
[batch_size, 1, 1])
word_embeddings = tf.concat([agg_embeddings, embeddings],
axis=1)
attention_mask = tf.concat([tf.ones((batch_size, 1),
dtype=attention_mask.dtype),
attention_mask],
axis=1)
return word_embeddings, attention_mask
def call(self,
inputs,
inputs_embeddings=None,
attention_mask=None,
training=False):
if inputs is None and inputs_embeddings is None:
raise ValueError(
"One of inputs or inputs_embeddings should be specified."
)
if inputs:
raise NotImplementedError(
"Raw inputs to this module not supported. "
"Please feed it to modeling/backbones/text/factory."
)
del inputs
# flatten inputs
embeddings, input_shape = self._flatten_inputs(inputs_embeddings)
if self.use_masking and training:
# generate random masks and replace mask ids with special token mask_embd
masked_embeddings, random_mask = self.random_embd_mask(embeddings)
else:
masked_embeddings = embeddings
random_mask = tf.ones((get_shape(embeddings)[0:2]), dtype=tf.float32)
# add modality-specific positional encoding embeddings
masked_embeddings = self.pos_embedding_lookup(
masked_embeddings,
input_shape,
training
)
# append special tokens: [agg]
tx_inputs, attention_mask = self._append_special_token(
masked_embeddings,
attention_mask,
)
# call Transformer
outputs = self.tx(inputs=tx_inputs,
attention_mask=attention_mask,
training=training)
# add inputs and possible random masks to outputs
outputs["embeddings"] = embeddings
outputs["random_mask"] = random_mask
return outputs
| |
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This is helper module for :mod:`trappy.plotter.ILinePlot`
for adding HTML and javascript necessary for interactive
plotting. The Linear to 2-D co-ordination transformations
are done by using the functionality in
:mod:`trappy.plotter.PlotLayout`
"""
from trappy.plotter import AttrConf
import uuid
from collections import OrderedDict
import json
import os
from trappy.plotter import IPythonConf
from trappy.plotter.ColorMap import to_dygraph_colors
if not IPythonConf.check_ipython():
raise ImportError("No Ipython Environment found")
from IPython.display import display, HTML
def df_to_dygraph(data_frame):
"""Helper function to convert a :mod:`pandas.DataFrame` to
dygraph data
:param data_frame: The DataFrame to be converted
:type data_frame: :mod:`pandas.DataFrame`
"""
values = data_frame.values.tolist()
data = [[x] for x in data_frame.index.tolist()]
for idx, (_, val) in enumerate(zip(data, values)):
data[idx] += val
return {
"data": data,
"labels": ["index"] + data_frame.columns.tolist(),
}
class ILinePlotGen(object):
"""
:param num_plots: The total number of plots
:type num_plots: int
The linear co-ordinate system :math:`[0, N_{plots}]` is
mapped to a 2-D coordinate system with :math:`N_{rows}`
and :math:`N_{cols}` such that:
.. math::
N_{rows} = \\frac{N_{cols}}{N_{plots}}
"""
def _add_graph_cell(self, fig_name, color_map):
"""Add a HTML table cell to hold the plot"""
colors_opt_arg = ", " + to_dygraph_colors(color_map) if color_map else ""
graph_js = ''
lib_urls = [IPythonConf.DYGRAPH_COMBINED_URL, IPythonConf.DYGRAPH_SYNC_URL,
IPythonConf.UNDERSCORE_URL]
for url in lib_urls:
graph_js += '<!-- TRAPPY_PUBLISH_SOURCE_LIB = "{}" -->\n'.format(url)
graph_js += """
<script>
/* TRAPPY_PUBLISH_IMPORT = "plotter/js/ILinePlot.js" */
/* TRAPPY_PUBLISH_REMOVE_START */
var ilp_req = require.config( {
paths: {
"dygraph-sync": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/synchronizer") + """',
"dygraph": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/dygraph-combined") + """',
"ILinePlot": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/ILinePlot") + """',
"underscore": '""" + IPythonConf.add_web_base("plotter_scripts/ILinePlot/underscore-min") + """',
},
shim: {
"dygraph-sync": ["dygraph"],
"ILinePlot": {
"deps": ["dygraph-sync", "dygraph", "underscore"],
"exports": "ILinePlot"
}
}
});
/* TRAPPY_PUBLISH_REMOVE_STOP */
ilp_req(["require", "ILinePlot"], function() { /* TRAPPY_PUBLISH_REMOVE_LINE */
ILinePlot.generate(""" + fig_name + "_data" + colors_opt_arg + """);
}); /* TRAPPY_PUBLISH_REMOVE_LINE */
</script>
"""
cell = '<td style="border-style: hidden;"><div class="ilineplot" id="{}"></div></td>'.format(fig_name)
self._html.append(cell)
self._js.append(graph_js)
def _add_legend_cell(self, fig_name):
"""Add HTML table cell for the legend"""
legend_div_name = fig_name + "_legend"
cell = '<td style="border-style: hidden;"><div style="text-align:center" id="{}"></div></td>'.format(legend_div_name)
self._html.append(cell)
def _begin_row(self):
"""Add the opening tag for HTML row"""
self._html.append("<tr>")
def _end_row(self):
"""Add the closing tag for the HTML row"""
self._html.append("</tr>")
def _end_table(self):
"""Add the closing tag for the HTML table"""
self._html.append("</table>")
def _generate_fig_name(self):
"""Generate a unique figure name"""
fig_name = "fig_" + uuid.uuid4().hex
self._fig_map[self._fig_index] = fig_name
self._fig_index += 1
return fig_name
def _init_html(self, color_map):
"""Initialize HTML code for the plots"""
table = '<table style="border-style: hidden;">'
self._html.append(table)
if self._attr["title"]:
cell = '<caption style="text-align:center; font: 24px sans-serif bold; color: black">{}</caption>'.format(self._attr["title"])
self._html.append(cell)
for _ in range(self._rows):
self._begin_row()
legend_figs = []
for _ in range(self._attr["per_line"]):
fig_name = self._generate_fig_name()
legend_figs.append(fig_name)
self._add_graph_cell(fig_name, color_map)
self._end_row()
self._begin_row()
for l_fig in legend_figs:
self._add_legend_cell(l_fig)
self._end_row()
self._end_table()
def __init__(self, num_plots, **kwargs):
self._attr = kwargs
self._html = []
self._js = []
self._js_plot_data = []
self.num_plots = num_plots
self._fig_map = {}
self._fig_index = 0
self._single_plot = False
if self.num_plots == 0:
raise RuntimeError("No plots for the given constraints")
if self.num_plots < self._attr["per_line"]:
self._attr["per_line"] = self.num_plots
self._rows = (self.num_plots / self._attr["per_line"])
if self.num_plots % self._attr["per_line"] != 0:
self._rows += 1
self._attr["height"] = AttrConf.HTML_HEIGHT
self._init_html(kwargs.pop("colors", None))
def _check_add_scatter(self, fig_params):
"""Check if a scatter plot is needed
and augment the fig_params accordingly"""
if self._attr["scatter"]:
fig_params["drawPoints"] = True
fig_params["strokeWidth"] = 0.0
else:
fig_params["drawPoints"] = False
fig_params["strokeWidth"] = AttrConf.LINE_WIDTH
fig_params["pointSize"] = self._attr["point_size"]
def add_plot(self, plot_num, data_frame, title="", test=False):
"""Add a plot for the corresponding index
:param plot_num: The linear index of the plot
:type plot_num: int
:param data_frame: The data for the plot
:type data_frame: :mod:`pandas.DataFrame`
:param title: The title for the plot
:type title: str
"""
datapoints = sum(len(v) for _, v in data_frame.iteritems())
if datapoints > self._attr["max_datapoints"]:
msg = "This plot is too big and will probably make your browser unresponsive. If you are happy to wait, pass max_datapoints={} to view()".\
format(datapoints + 1)
raise ValueError(msg)
fig_name = self._fig_map[plot_num]
fig_params = {}
fig_params["data"] = df_to_dygraph(data_frame)
fig_params["name"] = fig_name
fig_params["rangesel"] = False
fig_params["logscale"] = False
fig_params["title"] = title
fig_params["step_plot"] = self._attr["step_plot"]
fig_params["fill_graph"] = self._attr["fill"]
if "fill_alpha" in self._attr:
fig_params["fill_alpha"] = self._attr["fill_alpha"]
fig_params["fill_graph"] = True
fig_params["per_line"] = self._attr["per_line"]
fig_params["height"] = self._attr["height"]
self._check_add_scatter(fig_params)
# Use a hash of this object as a default for the sync group, so that if
# 'sync_zoom=True' then by default (i.e. if 'group' is not specified),
# all the plots in a figure are synced.
fig_params["syncGroup"] = self._attr.get("group", str(hash(self)))
fig_params["syncZoom"] = self._attr.get("sync_zoom",
AttrConf.DEFAULT_SYNC_ZOOM)
if "ylim" in self._attr:
fig_params["valueRange"] = self._attr["ylim"]
if "xlim" in self._attr:
fig_params["dateWindow"] = self._attr["xlim"]
fig_data = "var {}_data = {};".format(fig_name, json.dumps(fig_params))
self._js_plot_data.append("<script>")
self._js_plot_data.append(fig_data)
self._js_plot_data.append("</script>")
def finish(self):
"""Called when the Plotting is finished"""
display(HTML(self.html()))
def html(self):
"""Return the raw HTML text"""
return "\n".join(self._html + self._js_plot_data + self._js)
| |
"""Representation of a deCONZ gateway."""
import asyncio
import async_timeout
from pydeconz import DeconzSession, errors, group, light, sensor
from homeassistant.config_entries import SOURCE_HASSIO
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
device_registry as dr,
entity_registry as er,
)
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DEFAULT_ALLOW_CLIP_SENSOR,
DEFAULT_ALLOW_DECONZ_GROUPS,
DEFAULT_ALLOW_NEW_DEVICES,
DOMAIN as DECONZ_DOMAIN,
LOGGER,
PLATFORMS,
)
from .deconz_event import async_setup_events, async_unload_events
from .errors import AuthenticationRequired, CannotConnect
@callback
def get_gateway_from_config_entry(hass, config_entry):
"""Return gateway with a matching config entry ID."""
return hass.data[DECONZ_DOMAIN][config_entry.entry_id]
class DeconzGateway:
"""Manages a single deCONZ gateway."""
def __init__(self, hass, config_entry) -> None:
"""Initialize the system."""
self.hass = hass
self.config_entry = config_entry
self.api = None
self.available = True
self.ignore_state_updates = False
self.signal_reachable = f"deconz-reachable-{config_entry.entry_id}"
self.signal_new_group = f"deconz_new_group_{config_entry.entry_id}"
self.signal_new_light = f"deconz_new_light_{config_entry.entry_id}"
self.signal_new_scene = f"deconz_new_scene_{config_entry.entry_id}"
self.signal_new_sensor = f"deconz_new_sensor_{config_entry.entry_id}"
self.deconz_resource_type_to_signal_new_device = {
group.RESOURCE_TYPE: self.signal_new_group,
light.RESOURCE_TYPE: self.signal_new_light,
group.RESOURCE_TYPE_SCENE: self.signal_new_scene,
sensor.RESOURCE_TYPE: self.signal_new_sensor,
}
self.deconz_ids = {}
self.entities = {}
self.events = []
@property
def bridgeid(self) -> str:
"""Return the unique identifier of the gateway."""
return self.config_entry.unique_id
@property
def host(self) -> str:
"""Return the host of the gateway."""
return self.config_entry.data[CONF_HOST]
@property
def master(self) -> bool:
"""Gateway which is used with deCONZ services without defining id."""
return self.config_entry.options[CONF_MASTER_GATEWAY]
# Options
@property
def option_allow_clip_sensor(self) -> bool:
"""Allow loading clip sensor from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_CLIP_SENSOR, DEFAULT_ALLOW_CLIP_SENSOR
)
@property
def option_allow_deconz_groups(self) -> bool:
"""Allow loading deCONZ groups from gateway."""
return self.config_entry.options.get(
CONF_ALLOW_DECONZ_GROUPS, DEFAULT_ALLOW_DECONZ_GROUPS
)
@property
def option_allow_new_devices(self) -> bool:
"""Allow automatic adding of new devices."""
return self.config_entry.options.get(
CONF_ALLOW_NEW_DEVICES, DEFAULT_ALLOW_NEW_DEVICES
)
# Callbacks
@callback
def async_connection_status_callback(self, available) -> None:
"""Handle signals of gateway connection status."""
self.available = available
self.ignore_state_updates = False
async_dispatcher_send(self.hass, self.signal_reachable)
@callback
def async_add_device_callback(
self, resource_type, device=None, force: bool = False
) -> None:
"""Handle event of new device creation in deCONZ."""
if (
not force
and not self.option_allow_new_devices
or resource_type not in self.deconz_resource_type_to_signal_new_device
):
return
args = []
if device is not None and not isinstance(device, list):
args.append([device])
async_dispatcher_send(
self.hass,
self.deconz_resource_type_to_signal_new_device[resource_type],
*args, # Don't send device if None, it would override default value in listeners
)
async def async_update_device_registry(self) -> None:
"""Update device registry."""
device_registry = dr.async_get(self.hass)
# Host device
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.api.config.mac)},
)
# Gateway service
configuration_url = f"http://{self.host}:{self.config_entry.data[CONF_PORT]}"
if self.config_entry.source == SOURCE_HASSIO:
configuration_url = None
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
configuration_url=configuration_url,
entry_type="service",
identifiers={(DECONZ_DOMAIN, self.api.config.bridge_id)},
manufacturer="Dresden Elektronik",
model=self.api.config.model_id,
name=self.api.config.name,
sw_version=self.api.config.software_version,
via_device=(CONNECTION_NETWORK_MAC, self.api.config.mac),
)
async def async_setup(self) -> bool:
"""Set up a deCONZ gateway."""
try:
self.api = await get_gateway(
self.hass,
self.config_entry.data,
self.async_add_device_callback,
self.async_connection_status_callback,
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except AuthenticationRequired as err:
raise ConfigEntryAuthFailed from err
self.hass.config_entries.async_setup_platforms(self.config_entry, PLATFORMS)
await async_setup_events(self)
self.api.start()
self.config_entry.add_update_listener(self.async_config_entry_updated)
return True
@staticmethod
async def async_config_entry_updated(hass, entry) -> None:
"""Handle signals of config entry being updated.
This is a static method because a class method (bound method), can not be used with weak references.
Causes for this is either discovery updating host address or config entry options changing.
"""
gateway = get_gateway_from_config_entry(hass, entry)
if gateway.api.host != gateway.host:
gateway.api.close()
gateway.api.host = gateway.host
gateway.api.start()
return
await gateway.options_updated()
async def options_updated(self):
"""Manage entities affected by config entry options."""
deconz_ids = []
if self.option_allow_clip_sensor:
self.async_add_device_callback(sensor.RESOURCE_TYPE)
else:
deconz_ids += [
sensor.deconz_id
for sensor in self.api.sensors.values()
if sensor.type.startswith("CLIP")
]
if self.option_allow_deconz_groups:
self.async_add_device_callback(group.RESOURCE_TYPE)
else:
deconz_ids += [group.deconz_id for group in self.api.groups.values()]
entity_registry = er.async_get(self.hass)
for entity_id, deconz_id in self.deconz_ids.items():
if deconz_id in deconz_ids and entity_registry.async_is_registered(
entity_id
):
# Removing an entity from the entity registry will also remove them
# from Home Assistant
entity_registry.async_remove(entity_id)
@callback
def shutdown(self, event) -> None:
"""Wrap the call to deconz.close.
Used as an argument to EventBus.async_listen_once.
"""
self.api.close()
async def async_reset(self):
"""Reset this gateway to default state."""
self.api.async_connection_status_callback = None
self.api.close()
await self.hass.config_entries.async_unload_platforms(
self.config_entry, PLATFORMS
)
async_unload_events(self)
self.deconz_ids = {}
return True
async def get_gateway(
hass, config, async_add_device_callback, async_connection_status_callback
) -> DeconzSession:
"""Create a gateway object and verify configuration."""
session = aiohttp_client.async_get_clientsession(hass)
deconz = DeconzSession(
session,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_API_KEY],
add_device=async_add_device_callback,
connection_status=async_connection_status_callback,
)
try:
with async_timeout.timeout(10):
await deconz.refresh_state()
return deconz
except errors.Unauthorized as err:
LOGGER.warning("Invalid key for deCONZ at %s", config[CONF_HOST])
raise AuthenticationRequired from err
except (asyncio.TimeoutError, errors.RequestError) as err:
LOGGER.error("Error connecting to deCONZ gateway at %s", config[CONF_HOST])
raise CannotConnect from err
| |
import pickle, sys, os
import numpy
from scipy.misc import toimage
import topo
from topo import pattern
from topo import sheet
############################################################
# FUNCTIONS TO CREATE AND LOAD ORGANISED GCAL CONNECTIVITY #
############################################################
def pickleGCALWeight():
""" Run this function after 10,000 iterations of GCAL (default settings) to generate pickle file needed by TCAL.
Place the pickle file in the default output folder specified by topo.param.normalize_path() """
V1Dim=48; pickleObj = {}
pickleObj.update({'SIZE':V1Dim})
connections = ['LGNOnAfferent', 'LGNOffAfferent', 'LateralExcitatory', 'LateralInhibitory']
pickleObj.update({'Connections':connections})
for connectionName in connections:
cfsList = []
for i in range(V1Dim):
for j in range(V1Dim):
cfWeights = topo.sim['V1'].projections(connectionName).cfs[i][j].weights
cfsList.append(cfWeights[:])
assert len(cfsList) == 48*48
pickleObj.update({connectionName:cfsList[:]})
path = os.path.join(topo.param.normalize_path(),'GCALweights.pickle')
pickleFile = open(path,'w')
pickle.dump(pickleObj,pickleFile)
def weightsFromGCALtoTCAL():
" Loads GCAL connectivity from pickle file into current V1 sheet "
# os.path.join(os.getcwd(), 'GCALweights.pickle')
path = os.path.join(topo.param.normalize_path(),'GCALweights.pickle')
pickleFile = open(path,'r'); pickleObj = pickle.load(pickleFile); pickleFile.close()
V1Dim = pickleObj['SIZE']; del pickleObj['SIZE']
connections = pickleObj['Connections']; del pickleObj['Connections']
afferents = ['LGNOffAfferent', 'LGNOnAfferent']
excitatory= ['LateralExcitatory-0', 'LateralExcitatory-1', 'LateralExcitatory-2', 'LateralExcitatory-3']
inhibitory = ['LateralInhibitory-0','LateralInhibitory-1', 'LateralInhibitory-2', 'LateralInhibitory-3',
'LateralInhibitory-4','LateralInhibitory-5','LateralInhibitory-6', 'LateralInhibitory-7',
'LateralInhibitory-8','LateralInhibitory-9', 'LateralInhibitory-10']
for connectionName in connections:
allWeights = pickleObj[connectionName];group = None
assert len(allWeights) == 48*48
if connectionName == 'LateralExcitatory': group = excitatory
if connectionName == 'LateralInhibitory': group = inhibitory
if group is not None:
for connectionRing in group:
groupWeights = allWeights[:]
for i in range(V1Dim):
for j in range(V1Dim):
weights = groupWeights[0]
originalW = topo.sim['V1'].projections(connectionRing).cfs[i][j].weights
mask = originalW.copy()
mask[mask>0.0] = 1.0
assert (originalW.shape == weights.shape)
assert (weights.shape == mask.shape)
topo.sim['V1'].projections(connectionRing).cfs[i][j].weights = (mask*weights)
groupWeights = groupWeights[1:]
assert groupWeights == []
else:
assert (connectionName in afferents)
for i in range(V1Dim):
for j in range(V1Dim):
weights = allWeights[0]
topo.sim['V1'].projections(connectionName).cfs[i][j].weights = weights
allWeights = allWeights[1:]
assert allWeights == []
############################################################
############################################################
class normaliseFn:
def __init__(self,normaliseFactor):
self.normaliseFactor = normaliseFactor
def __call__(self,x):
x *= self.normaliseFactor
def __repr__(self):
return 'normaliseFn(%f)' % self.normaliseFactor
def originalBoundsWeights(sheetName, connName, connectionParams, wPatternClass, wPatternParams, center_row,center_col):
sheetObj = topo.sim[sheetName]
dummyParams = connectionParams.copy()
# To prevent name clashes
dummyInds = [int(el.name[5:]) for el in topo.sim.connections() if el.name[:5] == 'dummy']
if dummyInds == []: dummyInd = 0
else: dummyInd = max(dummyInds)+1
dummyName= 'dummy%d' % dummyInd
dummyParams['strength'] = 0.0; dummyParams['name'] = dummyName
# Making the connection and getting the bounds.
conn = topo.sim.connect(sheetName,sheetName, **dummyParams)
cfObj = sheetObj.projections(dummyName).cfs[center_row,center_col]
bounds = cfObj.input_sheet_slice.compute_bounds(sheetObj)
weights = sheetObj.projections(dummyName).cfs[center_row,center_col].weights[:]
return (bounds, weights)
def boundsChanged(bounds, sheetName, connName, center_row, center_col):
sheetObj = topo.sim[sheetName]
# First connection (ring) will always exist.
ringCf = sheetObj.projections('%s-0' % connName).cfs[center_row,center_col]
ringBounds = ringCf.input_sheet_slice.compute_bounds(sheetObj)
return (ringBounds.lbrt() != bounds.lbrt())
def readBoundsWeights(sheetName, connName, connectionParams, wPatternClass,
wPatternParams, center_row,center_col):
(bounds, weights) = originalBoundsWeights(sheetName, connName, connectionParams,
wPatternClass, wPatternParams, center_row,center_col)
# Remove the dummy connections
[el.remove() for el in topo.sim.connections() if (el.name[:len('dummy')] == 'dummy')]
return (bounds, weights, False)
def rawWeightPattern(wPatternClass, wPatternParams, bbwidth, extraParams):
rawParams = wPatternParams.copy()
rawParams.update(extraParams)
rawParams['output_fns'] = []
diskParams = extraParams.copy()
diskParams.update({'smoothing':0.0, 'aspect_ratio':1.0, 'size':bbwidth})
diskMask = pattern.Disk(**diskParams)()
rawParams.update({'mask':diskMask})
return wPatternClass(**rawParams)
def squareErrorPlots(weights, sheetName, connName, ringNumber,center_row,center_col, PLOTS=True):
sheetObj = topo.sim[sheetName]
ringNames = [ "%s-%d" % (connName, i) for i in range(ringNumber)]
ringWeightList = [sheetObj.projections(name).cfs[center_row,center_col].weights for name in ringNames]
ringWeights = numpy.add.reduce(ringWeightList)
if PLOTS:
toimage(weights).save('%s.png' % connName) # The original
[toimage(ringWeight).save('%s-%d.png' % (connName, i)) for (i,ringWeight) in enumerate(ringWeightList)]
toimage(ringWeights).save('COMPOSITE-%s.png' % connName)
toimage(weights - ringWeights).save('DIFF-%s.png' % connName)
error = ((weights - ringWeights)**2).sum()
return error
def makeDelayedLaterals(sheetName, connName, connectionParams, ringNumber, wPatternClass, wPatternParams):
# Getting the center and density of the sheet
sheetObj = topo.sim[sheetName]
sheet_rows, sheet_cols = sheetObj.shape
center_row, center_col = sheet_rows/2,sheet_cols/2
xdensity = ydensity = sheetObj.xdensity
cunitx, cunity = sheetObj.matrixidx2sheet(center_row, center_col)
(bounds, weights, newBoundsFlag) = readBoundsWeights(sheetName, connName,
connectionParams,
wPatternClass, wPatternParams,
center_row,center_col)
# Getting the bounds and bounds width
l,b,r,t = bounds.lbrt(); bbwidth = r-l
# Making the raw weight pattern to normalise from.
wPatternParamsRaw = wPatternParams.copy()
extraParams = {'x':cunitx, 'y':cunity, 'xdensity':xdensity, 'ydensity':ydensity, 'bounds':bounds}
raw_weights_pattern = rawWeightPattern(wPatternClass, wPatternParamsRaw, bbwidth,extraParams)
# Creating the actual weight pattern
weight_pattern = wPatternClass(**wPatternParams)
weight_pattern.output_fns = []
# Getting the normalisation factor
normalisation_sum = raw_weights_pattern().sum()
normalisation_factor = 1.0 / normalisation_sum
# Adding the output function
weight_pattern.output_fns = [normaliseFn(normalisation_factor)]
# Setting the appropriate ring number
if ringNumber == 'MAX':
(dim,dim) = raw_weights_pattern().shape
if (dim % 2) != 1: print('*WARNING*: Cf dimensions should be odd!')
ringNumber = (dim - 1) / 2
if ringNumber < 1:
print('Ring number has to be greater than one!')
sys.exit()
thickness = bbwidth / (ringNumber * 2)
for i in range(ringNumber):
if (i == 0):
mask = pattern.Disk(size=2*thickness, smoothing=0.0)
else:
ring_size = 2*thickness*i+thickness
mask = pattern.Ring(size=ring_size, thickness=thickness, smoothing=0.0)
delayName = '%s-%d' % (connName,i)
originalWeightOutputFns = None
if 'weights_output_fns' in connectionParams:
originalWeightOutputFns = connectionParams['weights_output_fns']
connectionParams.update({'cf_shape':mask, 'name':delayName,
'weights_generator':weight_pattern, 'autosize_mask':False, 'weights_output_fns':[]})
connectionParams.update({'dest_port':('Activity','JointNormalize',connName)})
conn = topo.sim.connect(sheetName, sheetName, **connectionParams)
if originalWeightOutputFns is not None:
conn.weights_output_fns = originalWeightOutputFns
# Checking to see if the bounds have changed
boundsChangedFlag = boundsChanged(bounds, sheetName, connName, center_row, center_col)
error = squareErrorPlots(weights, sheetName, connName, ringNumber,center_row,center_col)
print ' Squared error in CF weights for %d rings in %s is: %f' % (ringNumber, sheetName, error)
return newBoundsFlag | boundsChangedFlag
| |
#L
# Copyright SAIC
#
# Distributed under the OSI-approved BSD 3-Clause License.
# See http://ncip.github.com/python-api/LICENSE.txt for details.
#L
# This API was generated by pyCaCORE
import cabig.cabio.CaBioWSQueryService_client as services
from cabig.cacore.ws.proxy import *
schema = services.ns3
class Agent(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Agent"
EVSId = ProxyAttr('EVSId')
NSCNumber = ProxyAttr('NSCNumber')
bigid = ProxyAttr('bigid')
comment = ProxyAttr('comment')
id = ProxyAttr('id')
isCMAPAgent = ProxyAttr('isCMAPAgent')
name = ProxyAttr('name')
source = ProxyAttr('source')
clinicalTrialProtocolCollection = ProxyAssoc('clinicalTrialProtocolCollection',True)
geneFunctionAssociationCollection = ProxyAssoc('geneFunctionAssociationCollection',True)
targetCollection = ProxyAssoc('targetCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Agent_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Anomaly(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Anomaly"
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
histopathology = ProxyAssoc('histopathology',False)
organOntologyCollection = ProxyAssoc('organOntologyCollection',True)
vocabularyCollection = ProxyAssoc('vocabularyCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Anomaly_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ArrayReporterCytogeneticLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ArrayReporterCytogeneticLocation"
id = ProxyAttr('id')
bigid = ProxyAttr('bigid')
chromosome = ProxyAssoc('chromosome',False)
endCytoband = ProxyAssoc('endCytoband',False)
startCytoband = ProxyAssoc('startCytoband',False)
arrayReporter = ProxyAssoc('arrayReporter',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ArrayReporterCytogeneticLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ArrayReporterPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ArrayReporterPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
arrayReporter = ProxyAssoc('arrayReporter',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ArrayReporterPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ArrayReporter(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ArrayReporter"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
microarray = ProxyAssoc('microarray',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ArrayReporter_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Chromosome(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Chromosome"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
number = ProxyAttr('number')
cytobandCollection = ProxyAssoc('cytobandCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
locationCollection = ProxyAssoc('locationCollection',True)
taxon = ProxyAssoc('taxon',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Chromosome_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ClinicalTrialProtocol(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ClinicalTrialProtocol"
NIHAdminCode = ProxyAttr('NIHAdminCode')
PDQIdentifier = ProxyAttr('PDQIdentifier')
PIName = ProxyAttr('PIName')
bigid = ProxyAttr('bigid')
currentStatus = ProxyAttr('currentStatus')
currentStatusDate = ProxyAttr('currentStatusDate')
documentNumber = ProxyAttr('documentNumber')
id = ProxyAttr('id')
leadOrganizationId = ProxyAttr('leadOrganizationId')
leadOrganizationName = ProxyAttr('leadOrganizationName')
participationType = ProxyAttr('participationType')
phase = ProxyAttr('phase')
title = ProxyAttr('title')
treatmentFlag = ProxyAttr('treatmentFlag')
agentCollection = ProxyAssoc('agentCollection',True)
diseaseOntologyCollection = ProxyAssoc('diseaseOntologyCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
protocolAssociationCollection = ProxyAssoc('protocolAssociationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ClinicalTrialProtocol_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class CloneRelativeLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.CloneRelativeLocation"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
type = ProxyAttr('type')
clone = ProxyAssoc('clone',False)
nucleicAcidSequence = ProxyAssoc('nucleicAcidSequence',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.CloneRelativeLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Clone(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Clone"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
insertSize = ProxyAttr('insertSize')
name = ProxyAttr('name')
type = ProxyAttr('type')
cloneRelativeLocationCollection = ProxyAssoc('cloneRelativeLocationCollection',True)
library = ProxyAssoc('library',False)
nucleicAcidSequenceCollection = ProxyAssoc('nucleicAcidSequenceCollection',True)
taxonCollection = ProxyAssoc('taxonCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Clone_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class CytobandPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.CytobandPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
cytoband = ProxyAssoc('cytoband',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.CytobandPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Cytoband(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Cytoband"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
chromosome = ProxyAssoc('chromosome',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Cytoband_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class CytogeneticLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.CytogeneticLocation"
id = ProxyAttr('id')
bigid = ProxyAttr('bigid')
chromosome = ProxyAssoc('chromosome',False)
endCytoband = ProxyAssoc('endCytoband',False)
startCytoband = ProxyAssoc('startCytoband',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.CytogeneticLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class DiseaseOntologyRelationship(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.DiseaseOntologyRelationship"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
type = ProxyAttr('type')
childDiseaseOntology = ProxyAssoc('childDiseaseOntology',False)
parentDiseaseOntology = ProxyAssoc('parentDiseaseOntology',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.DiseaseOntologyRelationship_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class DiseaseOntology(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.DiseaseOntology"
EVSId = ProxyAttr('EVSId')
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
childDiseaseOntologyRelationshipCollection = ProxyAssoc('childDiseaseOntologyRelationshipCollection',True)
clinicalTrialProtocolCollection = ProxyAssoc('clinicalTrialProtocolCollection',True)
geneFunctionAssociationCollection = ProxyAssoc('geneFunctionAssociationCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
parentDiseaseOntologyRelationshipCollection = ProxyAssoc('parentDiseaseOntologyRelationshipCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.DiseaseOntology_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class EvidenceCode(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.EvidenceCode"
bigid = ProxyAttr('bigid')
evidenceCode = ProxyAttr('evidenceCode')
id = ProxyAttr('id')
evidenceCollection = ProxyAssoc('evidenceCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.EvidenceCode_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Evidence(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Evidence"
bigid = ProxyAttr('bigid')
celllineStatus = ProxyAttr('celllineStatus')
comments = ProxyAttr('comments')
id = ProxyAttr('id')
negationStatus = ProxyAttr('negationStatus')
pubmedId = ProxyAttr('pubmedId')
sentence = ProxyAttr('sentence')
sentenceStatus = ProxyAttr('sentenceStatus')
evidenceCodeCollection = ProxyAssoc('evidenceCodeCollection',True)
geneFunctionAssociationCollection = ProxyAssoc('geneFunctionAssociationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Evidence_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ExonArrayReporter(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ExonArrayReporter"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
probeCount = ProxyAttr('probeCount')
probeSelectionRegionId = ProxyAttr('probeSelectionRegionId')
strand = ProxyAttr('strand')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
microarray = ProxyAssoc('microarray',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
exon = ProxyAssoc('exon',False)
geneCollection = ProxyAssoc('geneCollection',True)
transcript = ProxyAssoc('transcript',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ExonArrayReporter_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Exon(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Exon"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
source = ProxyAttr('source')
sourceId = ProxyAttr('sourceId')
exonArrayReporterCollection = ProxyAssoc('exonArrayReporterCollection',True)
transcript = ProxyAssoc('transcript',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Exon_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ExpressedSequenceTag(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ExpressedSequenceTag"
accessionNumber = ProxyAttr('accessionNumber')
accessionNumberVersion = ProxyAttr('accessionNumberVersion')
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
length = ProxyAttr('length')
type = ProxyAttr('type')
value = ProxyAttr('value')
cloneRelativeLocation = ProxyAssoc('cloneRelativeLocation',False)
databaseCrossReferenceCollection = ProxyAssoc('databaseCrossReferenceCollection',True)
expressionArrayReporterCollection = ProxyAssoc('expressionArrayReporterCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ExpressedSequenceTag_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ExpressionArrayReporter(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ExpressionArrayReporter"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
sequenceSource = ProxyAttr('sequenceSource')
sequenceType = ProxyAttr('sequenceType')
targetDescription = ProxyAttr('targetDescription')
targetId = ProxyAttr('targetId')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
microarray = ProxyAssoc('microarray',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
gene = ProxyAssoc('gene',False)
nucleicAcidSequence = ProxyAssoc('nucleicAcidSequence',False)
proteinDomainCollection = ProxyAssoc('proteinDomainCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ExpressionArrayReporter_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneAgentAssociation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneAgentAssociation"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
role = ProxyAttr('role')
evidence = ProxyAssoc('evidence',False)
gene = ProxyAssoc('gene',False)
agent = ProxyAssoc('agent',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneAgentAssociation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneAlias(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneAlias"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
type = ProxyAttr('type')
geneCollection = ProxyAssoc('geneCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneAlias_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneCytogeneticLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneCytogeneticLocation"
id = ProxyAttr('id')
bigid = ProxyAttr('bigid')
chromosome = ProxyAssoc('chromosome',False)
endCytoband = ProxyAssoc('endCytoband',False)
startCytoband = ProxyAssoc('startCytoband',False)
gene = ProxyAssoc('gene',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneCytogeneticLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneDiseaseAssociation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneDiseaseAssociation"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
role = ProxyAttr('role')
evidence = ProxyAssoc('evidence',False)
gene = ProxyAssoc('gene',False)
diseaseOntology = ProxyAssoc('diseaseOntology',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneDiseaseAssociation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneFunctionAssociation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneFunctionAssociation"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
role = ProxyAttr('role')
evidence = ProxyAssoc('evidence',False)
gene = ProxyAssoc('gene',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneFunctionAssociation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneOntologyRelationship(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneOntologyRelationship"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
relationshipType = ProxyAttr('relationshipType')
childGeneOntology = ProxyAssoc('childGeneOntology',False)
parentGeneOntology = ProxyAssoc('parentGeneOntology',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneOntologyRelationship_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneOntology(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneOntology"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
childGeneOntologyRelationshipCollection = ProxyAssoc('childGeneOntologyRelationshipCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
parentGeneOntologyRelationshipCollection = ProxyAssoc('parentGeneOntologyRelationshipCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneOntology_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GenePhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GenePhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
featureType = ProxyAttr('featureType')
chromosome = ProxyAssoc('chromosome',False)
gene = ProxyAssoc('gene',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GenePhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class GeneRelativeLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.GeneRelativeLocation"
bigid = ProxyAttr('bigid')
distance = ProxyAttr('distance')
id = ProxyAttr('id')
orientation = ProxyAttr('orientation')
SNP = ProxyAssoc('SNP',False)
gene = ProxyAssoc('gene',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.GeneRelativeLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Gene(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Gene"
bigid = ProxyAttr('bigid')
clusterId = ProxyAttr('clusterId')
fullName = ProxyAttr('fullName')
hugoSymbol = ProxyAttr('hugoSymbol')
id = ProxyAttr('id')
symbol = ProxyAttr('symbol')
chromosome = ProxyAssoc('chromosome',False)
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
databaseCrossReferenceCollection = ProxyAssoc('databaseCrossReferenceCollection',True)
exonArrayReporterCollection = ProxyAssoc('exonArrayReporterCollection',True)
expressionArrayReporterCollection = ProxyAssoc('expressionArrayReporterCollection',True)
geneAliasCollection = ProxyAssoc('geneAliasCollection',True)
geneFunctionAssociationCollection = ProxyAssoc('geneFunctionAssociationCollection',True)
geneOntologyCollection = ProxyAssoc('geneOntologyCollection',True)
geneRelativeLocationCollection = ProxyAssoc('geneRelativeLocationCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
homologousAssociationCollection = ProxyAssoc('homologousAssociationCollection',True)
libraryCollection = ProxyAssoc('libraryCollection',True)
markerCollection = ProxyAssoc('markerCollection',True)
nucleicAcidSequenceCollection = ProxyAssoc('nucleicAcidSequenceCollection',True)
organOntologyCollection = ProxyAssoc('organOntologyCollection',True)
pathwayCollection = ProxyAssoc('pathwayCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
proteinCollection = ProxyAssoc('proteinCollection',True)
targetCollection = ProxyAssoc('targetCollection',True)
taxon = ProxyAssoc('taxon',False)
transcriptCollection = ProxyAssoc('transcriptCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Gene_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Histopathology(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Histopathology"
ageOfOnset = ProxyAttr('ageOfOnset')
comments = ProxyAttr('comments')
grossDescription = ProxyAttr('grossDescription')
id = ProxyAttr('id')
microscopicDescription = ProxyAttr('microscopicDescription')
relationalOperation = ProxyAttr('relationalOperation')
survivalInfo = ProxyAttr('survivalInfo')
tumorIncidenceRate = ProxyAttr('tumorIncidenceRate')
anomalyCollection = ProxyAssoc('anomalyCollection',True)
clinicalTrialProtocolCollection = ProxyAssoc('clinicalTrialProtocolCollection',True)
diseaseOntology = ProxyAssoc('diseaseOntology',False)
geneCollection = ProxyAssoc('geneCollection',True)
libraryCollection = ProxyAssoc('libraryCollection',True)
metastasisCollection = ProxyAssoc('metastasisCollection',True)
organOntology = ProxyAssoc('organOntology',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Histopathology_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class HomologousAssociation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.HomologousAssociation"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
similarityPercentage = ProxyAttr('similarityPercentage')
gene = ProxyAssoc('gene',False)
homologousGene = ProxyAssoc('homologousGene',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.HomologousAssociation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Library(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Library"
bigid = ProxyAttr('bigid')
cloneProducer = ProxyAttr('cloneProducer')
cloneVector = ProxyAttr('cloneVector')
cloneVectorType = ProxyAttr('cloneVectorType')
clonesToDate = ProxyAttr('clonesToDate')
creationDate = ProxyAttr('creationDate')
description = ProxyAttr('description')
id = ProxyAttr('id')
keyword = ProxyAttr('keyword')
labHost = ProxyAttr('labHost')
name = ProxyAttr('name')
rsite1 = ProxyAttr('rsite1')
rsite2 = ProxyAttr('rsite2')
sequencesToDate = ProxyAttr('sequencesToDate')
type = ProxyAttr('type')
uniGeneId = ProxyAttr('uniGeneId')
cloneCollection = ProxyAssoc('cloneCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
protocol = ProxyAssoc('protocol',False)
tissue = ProxyAssoc('tissue',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Library_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Location(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Location"
id = ProxyAttr('id')
chromosome = ProxyAssoc('chromosome',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Location_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class MarkerAlias(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.MarkerAlias"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
markerCollection = ProxyAssoc('markerCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.MarkerAlias_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class MarkerPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.MarkerPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
marker = ProxyAssoc('marker',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.MarkerPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class MarkerRelativeLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.MarkerRelativeLocation"
bigid = ProxyAttr('bigid')
distance = ProxyAttr('distance')
id = ProxyAttr('id')
orientation = ProxyAttr('orientation')
type = ProxyAttr('type')
SNP = ProxyAssoc('SNP',False)
markerCollection = ProxyAssoc('markerCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.MarkerRelativeLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Marker(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Marker"
bigid = ProxyAttr('bigid')
geneticMarkerId = ProxyAttr('geneticMarkerId')
id = ProxyAttr('id')
name = ProxyAttr('name')
type = ProxyAttr('type')
geneCollection = ProxyAssoc('geneCollection',True)
markerAliasCollection = ProxyAssoc('markerAliasCollection',True)
markerRelativeLocationCollection = ProxyAssoc('markerRelativeLocationCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
taxon = ProxyAssoc('taxon',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Marker_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class MessengerRNA(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.MessengerRNA"
accessionNumber = ProxyAttr('accessionNumber')
accessionNumberVersion = ProxyAttr('accessionNumberVersion')
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
length = ProxyAttr('length')
type = ProxyAttr('type')
value = ProxyAttr('value')
cloneRelativeLocation = ProxyAssoc('cloneRelativeLocation',False)
databaseCrossReferenceCollection = ProxyAssoc('databaseCrossReferenceCollection',True)
expressionArrayReporterCollection = ProxyAssoc('expressionArrayReporterCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.MessengerRNA_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Microarray(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Microarray"
LSID = ProxyAttr('LSID')
annotationDate = ProxyAttr('annotationDate')
bigid = ProxyAttr('bigid')
dbSNPVersion = ProxyAttr('dbSNPVersion')
description = ProxyAttr('description')
genomeVersion = ProxyAttr('genomeVersion')
id = ProxyAttr('id')
manufacturer = ProxyAttr('manufacturer')
name = ProxyAttr('name')
type = ProxyAttr('type')
arrayReporterCollection = ProxyAssoc('arrayReporterCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Microarray_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class NucleicAcidPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.NucleicAcidPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
nucleicAcidSequence = ProxyAssoc('nucleicAcidSequence',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.NucleicAcidPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class NucleicAcidSequence(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.NucleicAcidSequence"
accessionNumber = ProxyAttr('accessionNumber')
accessionNumberVersion = ProxyAttr('accessionNumberVersion')
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
length = ProxyAttr('length')
type = ProxyAttr('type')
value = ProxyAttr('value')
cloneRelativeLocation = ProxyAssoc('cloneRelativeLocation',False)
databaseCrossReferenceCollection = ProxyAssoc('databaseCrossReferenceCollection',True)
expressionArrayReporterCollection = ProxyAssoc('expressionArrayReporterCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.NucleicAcidSequence_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class OrganOntologyRelationship(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.OrganOntologyRelationship"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
type = ProxyAttr('type')
childOrganOntology = ProxyAssoc('childOrganOntology',False)
parentOrganOntology = ProxyAssoc('parentOrganOntology',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.OrganOntologyRelationship_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class OrganOntology(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.OrganOntology"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
anomalyCollection = ProxyAssoc('anomalyCollection',True)
childOrganOntologyRelationshipCollection = ProxyAssoc('childOrganOntologyRelationshipCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
parentOrganOntologyRelationshipCollection = ProxyAssoc('parentOrganOntologyRelationshipCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.OrganOntology_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Pathway(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Pathway"
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
diagram = ProxyAttr('diagram')
displayValue = ProxyAttr('displayValue')
id = ProxyAttr('id')
name = ProxyAttr('name')
geneCollection = ProxyAssoc('geneCollection',True)
histopathologyCollection = ProxyAssoc('histopathologyCollection',True)
taxon = ProxyAssoc('taxon',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Pathway_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class PhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.PhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.PhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class PopulationFrequency(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.PopulationFrequency"
bigid = ProxyAttr('bigid')
ethnicity = ProxyAttr('ethnicity')
heterozygousFrequency = ProxyAttr('heterozygousFrequency')
id = ProxyAttr('id')
majorAllele = ProxyAttr('majorAllele')
majorFrequency = ProxyAttr('majorFrequency')
minorAllele = ProxyAttr('minorAllele')
minorFrequency = ProxyAttr('minorFrequency')
type = ProxyAttr('type')
SNP = ProxyAssoc('SNP',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.PopulationFrequency_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ProteinAlias(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ProteinAlias"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
proteinCollection = ProxyAssoc('proteinCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ProteinAlias_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ProteinDomain(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ProteinDomain"
accessionNumber = ProxyAttr('accessionNumber')
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
source = ProxyAttr('source')
expressionArrayReporterCollection = ProxyAssoc('expressionArrayReporterCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ProteinDomain_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ProteinSequence(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ProteinSequence"
bigid = ProxyAttr('bigid')
checkSum = ProxyAttr('checkSum')
id = ProxyAttr('id')
length = ProxyAttr('length')
molecularWeightInDaltons = ProxyAttr('molecularWeightInDaltons')
value = ProxyAttr('value')
protein = ProxyAssoc('protein',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ProteinSequence_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Protein(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Protein"
bigid = ProxyAttr('bigid')
copyrightStatement = ProxyAttr('copyrightStatement')
id = ProxyAttr('id')
name = ProxyAttr('name')
primaryAccession = ProxyAttr('primaryAccession')
uniProtCode = ProxyAttr('uniProtCode')
geneCollection = ProxyAssoc('geneCollection',True)
keywords = ProxyAssoc('keywords',True)
proteinAliasCollection = ProxyAssoc('proteinAliasCollection',True)
proteinSequence = ProxyAssoc('proteinSequence',True)
secondaryAccession = ProxyAssoc('secondaryAccession',True)
taxonCollection = ProxyAssoc('taxonCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Protein_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class ProtocolAssociation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.ProtocolAssociation"
CTEPNAME = ProxyAttr('CTEPNAME')
IMTCODE = ProxyAttr('IMTCODE')
bigid = ProxyAttr('bigid')
diseaseCategory = ProxyAttr('diseaseCategory')
diseaseSubCategory = ProxyAttr('diseaseSubCategory')
id = ProxyAttr('id')
clinicalTrialProtocol = ProxyAssoc('clinicalTrialProtocol',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.ProtocolAssociation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Protocol(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Protocol"
bigid = ProxyAttr('bigid')
description = ProxyAttr('description')
id = ProxyAttr('id')
name = ProxyAttr('name')
type = ProxyAttr('type')
libraryCollection = ProxyAssoc('libraryCollection',True)
tissueCollection = ProxyAssoc('tissueCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Protocol_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class RelativeLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.RelativeLocation"
bigid = ProxyAttr('bigid')
distance = ProxyAttr('distance')
id = ProxyAttr('id')
orientation = ProxyAttr('orientation')
SNP = ProxyAssoc('SNP',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.RelativeLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class SNPArrayReporter(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.SNPArrayReporter"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
phastConservation = ProxyAttr('phastConservation')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
microarray = ProxyAssoc('microarray',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
SNP = ProxyAssoc('SNP',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.SNPArrayReporter_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class SNPCytogeneticLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.SNPCytogeneticLocation"
id = ProxyAttr('id')
bigid = ProxyAttr('bigid')
chromosome = ProxyAssoc('chromosome',False)
endCytoband = ProxyAssoc('endCytoband',False)
startCytoband = ProxyAssoc('startCytoband',False)
SNP = ProxyAssoc('SNP',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.SNPCytogeneticLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class SNPPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.SNPPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
SNP = ProxyAssoc('SNP',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.SNPPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class SNP(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.SNP"
DBSNPID = ProxyAttr('DBSNPID')
alleleA = ProxyAttr('alleleA')
alleleB = ProxyAttr('alleleB')
aminoAcidChange = ProxyAttr('aminoAcidChange')
bigid = ProxyAttr('bigid')
chrXPseudoAutosomalRegion = ProxyAttr('chrXPseudoAutosomalRegion')
codingStatus = ProxyAttr('codingStatus')
flank = ProxyAttr('flank')
id = ProxyAttr('id')
validationStatus = ProxyAttr('validationStatus')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
databaseCrossReferenceCollection = ProxyAssoc('databaseCrossReferenceCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
populationFrequencyCollection = ProxyAssoc('populationFrequencyCollection',True)
relativeLocationCollection = ProxyAssoc('relativeLocationCollection',True)
snpArrayReporterCollection = ProxyAssoc('snpArrayReporterCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.SNP_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Target(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Target"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
type = ProxyAttr('type')
agentCollection = ProxyAssoc('agentCollection',True)
anomalyCollection = ProxyAssoc('anomalyCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Target_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Taxon(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Taxon"
abbreviation = ProxyAttr('abbreviation')
bigid = ProxyAttr('bigid')
commonName = ProxyAttr('commonName')
ethnicityStrain = ProxyAttr('ethnicityStrain')
id = ProxyAttr('id')
scientificName = ProxyAttr('scientificName')
chromosomeCollection = ProxyAssoc('chromosomeCollection',True)
cloneCollection = ProxyAssoc('cloneCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
markerCollection = ProxyAssoc('markerCollection',True)
pathwayCollection = ProxyAssoc('pathwayCollection',True)
proteinCollection = ProxyAssoc('proteinCollection',True)
tissueCollection = ProxyAssoc('tissueCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Taxon_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Tissue(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Tissue"
cellLine = ProxyAttr('cellLine')
cellType = ProxyAttr('cellType')
description = ProxyAttr('description')
developmentalStage = ProxyAttr('developmentalStage')
histology = ProxyAttr('histology')
id = ProxyAttr('id')
name = ProxyAttr('name')
organ = ProxyAttr('organ')
sex = ProxyAttr('sex')
supplier = ProxyAttr('supplier')
type = ProxyAttr('type')
libraryCollection = ProxyAssoc('libraryCollection',True)
protocol = ProxyAssoc('protocol',False)
taxon = ProxyAssoc('taxon',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Tissue_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class TranscriptArrayReporter(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.TranscriptArrayReporter"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
name = ProxyAttr('name')
cytogeneticLocationCollection = ProxyAssoc('cytogeneticLocationCollection',True)
microarray = ProxyAssoc('microarray',False)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.TranscriptArrayReporter_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class TranscriptPhysicalLocation(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.TranscriptPhysicalLocation"
id = ProxyAttr('id')
assembly = ProxyAttr('assembly')
chromosomalEndPosition = ProxyAttr('chromosomalEndPosition')
chromosomalStartPosition = ProxyAttr('chromosomalStartPosition')
chromosome = ProxyAssoc('chromosome',False)
transcript = ProxyAssoc('transcript',False)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.TranscriptPhysicalLocation_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Transcript(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Transcript"
bigid = ProxyAttr('bigid')
id = ProxyAttr('id')
source = ProxyAttr('source')
sourceId = ProxyAttr('sourceId')
strand = ProxyAttr('strand')
totalProbeCount = ProxyAttr('totalProbeCount')
exonArrayReporterCollection = ProxyAssoc('exonArrayReporterCollection',True)
exonCollection = ProxyAssoc('exonCollection',True)
geneCollection = ProxyAssoc('geneCollection',True)
physicalLocationCollection = ProxyAssoc('physicalLocationCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Transcript_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
class Vocabulary(WSBean):
arrayType = services.ns1.ArrayOf_xsd_anyType_Def(None).pyclass
className = "gov.nih.nci.cabio.domain.Vocabulary"
bigid = ProxyAttr('bigid')
coreTerm = ProxyAttr('coreTerm')
generalTerm = ProxyAttr('generalTerm')
id = ProxyAttr('id')
anomalyCollection = ProxyAssoc('anomalyCollection',True)
def __init__(self, holder=None, service=None, **kwargs):
if not(holder): holder = schema.Vocabulary_Def(None).pyclass()
WSBean.__init__(self, holder, service=service, **kwargs)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import base64
import hashlib
import os
from . import fetcher
from .envfuncs import path_search
from .errors import ManifestNotFound
from .manifest import ManifestParser
class Loader(object):
"""The loader allows our tests to patch the load operation"""
def _list_manifests(self, build_opts):
"""Returns a generator that iterates all the available manifests"""
for (path, _, files) in os.walk(build_opts.manifests_dir):
for name in files:
# skip hidden files
if name.startswith("."):
continue
yield os.path.join(path, name)
def _load_manifest(self, path):
return ManifestParser(path)
def load_project(self, build_opts, project_name):
if "/" in project_name or "\\" in project_name:
# Assume this is a path already
return ManifestParser(project_name)
for manifest in self._list_manifests(build_opts):
if os.path.basename(manifest) == project_name:
return ManifestParser(manifest)
raise ManifestNotFound(project_name)
def load_all(self, build_opts):
manifests_by_name = {}
for manifest in self._list_manifests(build_opts):
m = self._load_manifest(manifest)
if m.name in manifests_by_name:
raise Exception("found duplicate manifest '%s'" % m.name)
manifests_by_name[m.name] = m
return manifests_by_name
class ResourceLoader(Loader):
def __init__(self, namespace, manifests_dir):
self.namespace = namespace
self.manifests_dir = manifests_dir
def _list_manifests(self, _build_opts):
import pkg_resources
dirs = [self.manifests_dir]
while dirs:
current = dirs.pop(0)
for name in pkg_resources.resource_listdir(self.namespace, current):
path = "%s/%s" % (current, name)
if pkg_resources.resource_isdir(self.namespace, path):
dirs.append(path)
else:
yield "%s/%s" % (current, name)
def _find_manifest(self, project_name):
for name in self._list_manifests():
if name.endswith("/%s" % project_name):
return name
raise ManifestNotFound(project_name)
def _load_manifest(self, path):
import pkg_resources
contents = pkg_resources.resource_string(self.namespace, path).decode("utf8")
return ManifestParser(file_name=path, fp=contents)
def load_project(self, build_opts, project_name):
project_name = self._find_manifest(project_name)
return self._load_resource_manifest(project_name)
LOADER = Loader()
def patch_loader(namespace, manifests_dir="manifests"):
global LOADER
LOADER = ResourceLoader(namespace, manifests_dir)
def load_project(build_opts, project_name):
"""given the name of a project or a path to a manifest file,
load up the ManifestParser instance for it and return it"""
return LOADER.load_project(build_opts, project_name)
def load_all_manifests(build_opts):
return LOADER.load_all(build_opts)
class ManifestLoader(object):
"""ManifestLoader stores information about project manifest relationships for a
given set of (build options + platform) configuration.
The ManifestLoader class primarily serves as a location to cache project dependency
relationships and project hash values for this build configuration.
"""
def __init__(self, build_opts, ctx_gen=None):
self._loader = LOADER
self.build_opts = build_opts
if ctx_gen is None:
self.ctx_gen = self.build_opts.get_context_generator()
else:
self.ctx_gen = ctx_gen
self.manifests_by_name = {}
self._loaded_all = False
self._project_hashes = {}
self._fetcher_overrides = {}
self._build_dir_overrides = {}
self._install_dir_overrides = {}
self._install_prefix_overrides = {}
def load_manifest(self, name):
manifest = self.manifests_by_name.get(name)
if manifest is None:
manifest = self._loader.load_project(self.build_opts, name)
self.manifests_by_name[name] = manifest
return manifest
def load_all_manifests(self):
if not self._loaded_all:
all_manifests_by_name = self._loader.load_all(self.build_opts)
if self.manifests_by_name:
# To help ensure that we only ever have a single manifest object for a
# given project, and that it can't change once we have loaded it,
# only update our mapping for projects that weren't already loaded.
for name, manifest in all_manifests_by_name.items():
self.manifests_by_name.setdefault(name, manifest)
else:
self.manifests_by_name = all_manifests_by_name
self._loaded_all = True
return self.manifests_by_name
def manifests_in_dependency_order(self, manifest=None):
"""Compute all dependencies of the specified project. Returns a list of the
dependencies plus the project itself, in topologically sorted order.
Each entry in the returned list only depends on projects that appear before it
in the list.
If the input manifest is None, the dependencies for all currently loaded
projects will be computed. i.e., if you call load_all_manifests() followed by
manifests_in_dependency_order() this will return a global dependency ordering of
all projects."""
# The list of deps that have been fully processed
seen = set()
# The list of deps which have yet to be evaluated. This
# can potentially contain duplicates.
if manifest is None:
deps = list(self.manifests_by_name.values())
else:
assert manifest.name in self.manifests_by_name
deps = [manifest]
# The list of manifests in dependency order
dep_order = []
while len(deps) > 0:
m = deps.pop(0)
if m.name in seen:
continue
# Consider its deps, if any.
# We sort them for increased determinism; we'll produce
# a correct order even if they aren't sorted, but we prefer
# to produce the same order regardless of how they are listed
# in the project manifest files.
ctx = self.ctx_gen.get_context(m.name)
dep_list = sorted(m.get_section_as_dict("dependencies", ctx).keys())
builder = m.get("build", "builder", ctx=ctx)
if builder in ("cmake", "python-wheel"):
dep_list.append("cmake")
elif builder == "autoconf" and m.name not in (
"autoconf",
"libtool",
"automake",
):
# they need libtool and its deps (automake, autoconf) so add
# those as deps (but obviously not if we're building those
# projects themselves)
dep_list.append("libtool")
dep_count = 0
for dep_name in dep_list:
# If we're not sure whether it is done, queue it up
if dep_name not in seen:
dep = self.manifests_by_name.get(dep_name)
if dep is None:
dep = self._loader.load_project(self.build_opts, dep_name)
self.manifests_by_name[dep.name] = dep
deps.append(dep)
dep_count += 1
if dep_count > 0:
# If we queued anything, re-queue this item, as it depends
# those new item(s) and their transitive deps.
deps.append(m)
continue
# Its deps are done, so we can emit it
seen.add(m.name)
dep_order.append(m)
return dep_order
def set_project_src_dir(self, project_name, path):
self._fetcher_overrides[project_name] = fetcher.LocalDirFetcher(path)
def set_project_build_dir(self, project_name, path):
self._build_dir_overrides[project_name] = path
def set_project_install_dir(self, project_name, path):
self._install_dir_overrides[project_name] = path
def set_project_install_prefix(self, project_name, path):
self._install_prefix_overrides[project_name] = path
def create_fetcher(self, manifest):
override = self._fetcher_overrides.get(manifest.name)
if override is not None:
return override
ctx = self.ctx_gen.get_context(manifest.name)
return manifest.create_fetcher(self.build_opts, ctx)
def get_project_hash(self, manifest):
h = self._project_hashes.get(manifest.name)
if h is None:
h = self._compute_project_hash(manifest)
self._project_hashes[manifest.name] = h
return h
def _compute_project_hash(self, manifest):
"""This recursive function computes a hash for a given manifest.
The hash takes into account some environmental factors on the
host machine and includes the hashes of its dependencies.
No caching of the computation is performed, which is theoretically
wasteful but the computation is fast enough that it is not required
to cache across multiple invocations."""
ctx = self.ctx_gen.get_context(manifest.name)
hasher = hashlib.sha256()
# Some environmental and configuration things matter
env = {}
env["install_dir"] = self.build_opts.install_dir
env["scratch_dir"] = self.build_opts.scratch_dir
env["vcvars_path"] = self.build_opts.vcvars_path
env["os"] = self.build_opts.host_type.ostype
env["distro"] = self.build_opts.host_type.distro
env["distro_vers"] = self.build_opts.host_type.distrovers
for name in [
"CXXFLAGS",
"CPPFLAGS",
"LDFLAGS",
"CXX",
"CC",
"GETDEPS_CMAKE_DEFINES",
]:
env[name] = os.environ.get(name)
for tool in ["cc", "c++", "gcc", "g++", "clang", "clang++"]:
env["tool-%s" % tool] = path_search(os.environ, tool)
for name in manifest.get_section_as_args("depends.environment", ctx):
env[name] = os.environ.get(name)
fetcher = self.create_fetcher(manifest)
env["fetcher.hash"] = fetcher.hash()
for name in sorted(env.keys()):
hasher.update(name.encode("utf-8"))
value = env.get(name)
if value is not None:
try:
hasher.update(value.encode("utf-8"))
except AttributeError as exc:
raise AttributeError("name=%r, value=%r: %s" % (name, value, exc))
manifest.update_hash(hasher, ctx)
dep_list = sorted(manifest.get_section_as_dict("dependencies", ctx).keys())
for dep in dep_list:
dep_manifest = self.load_manifest(dep)
dep_hash = self.get_project_hash(dep_manifest)
hasher.update(dep_hash.encode("utf-8"))
# Use base64 to represent the hash, rather than the simple hex digest,
# so that the string is shorter. Use the URL-safe encoding so that
# the hash can also be safely used as a filename component.
h = base64.urlsafe_b64encode(hasher.digest()).decode("ascii")
# ... and because cmd.exe is troublesome with `=` signs, nerf those.
# They tend to be padding characters at the end anyway, so we can
# safely discard them.
h = h.replace("=", "")
return h
def _get_project_dir_name(self, manifest):
if manifest.is_first_party_project():
return manifest.name
else:
project_hash = self.get_project_hash(manifest)
return "%s-%s" % (manifest.name, project_hash)
def get_project_install_dir(self, manifest):
override = self._install_dir_overrides.get(manifest.name)
if override:
return override
project_dir_name = self._get_project_dir_name(manifest)
return os.path.join(self.build_opts.install_dir, project_dir_name)
def get_project_build_dir(self, manifest):
override = self._build_dir_overrides.get(manifest.name)
if override:
return override
project_dir_name = self._get_project_dir_name(manifest)
return os.path.join(self.build_opts.scratch_dir, "build", project_dir_name)
def get_project_install_prefix(self, manifest):
return self._install_prefix_overrides.get(manifest.name)
def get_project_install_dir_respecting_install_prefix(self, manifest):
inst_dir = self.get_project_install_dir(manifest)
prefix = self.get_project_install_prefix(manifest)
if prefix:
return inst_dir + prefix
return inst_dir
| |
#Alex Holcombe alex.holcombe@sydney.edu.au
#Modified by Kim Ransley from the twoWords.py program at the github repository: https://github.com/alexholcombe/twoWords
#1. Inverted
#2. One word
#3. duration
#4. trail, trial
from __future__ import print_function
from psychopy import monitors, visual, event, data, logging, core, sound, gui
import psychopy.info
import scipy
import numpy as np
from math import atan, log, ceil
import copy
import time, sys, os, pylab
try:
from noiseStaircaseHelpers import printStaircase, toStaircase, outOfStaircase, createNoise, plotDataAndPsychometricCurve
except ImportError:
print('Could not import from noiseStaircaseHelpers.py (you need that file to be in the same directory)')
try:
import stringResponse
except ImportError:
print('Could not import stringResponse.py (you need that file to be in the same directory)')
wordEccentricity=4
tasks=['T1']; task = tasks[0]
#THINGS THAT COULD PREVENT SUCCESS ON A STRANGE MACHINE
#same screen or external screen? Set scrn=0 if one screen. scrn=1 means display stimulus on second screen.
#widthPix, heightPix
quitFinder = False #if checkRefreshEtc, quitFinder becomes True
autopilot=False
demo=False #False
exportImages= False #quits after one trial
subject='Hubert' #user is prompted to enter true subject name
if autopilot: subject='auto'
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
showRefreshMisses=True #flicker fixation at refresh rate, to visualize if frames missed
feedback=True
autoLogging=False
if demo:
refreshRate = 60.; #100
staircaseTrials = 25
prefaceStaircaseTrialsN = 20 #22
prefaceStaircaseNoise = np.array([5,20,20,20, 50,50,50,5,80,80,80,5,95,95,95]) #will be recycled / not all used, as needed
descendingPsycho = True #psychometric function- more noise means worse performance
threshCriterion = 0.58
numWordsInStream = 1
# reads word in from external source
wordList1 = open("wordList1.txt")
wordList2 = open("wordList2.txt")
wordList1 = [x.rstrip() for x in wordList1.readlines()]
wordList2 = [x.rstrip() for x in wordList2.readlines()]
#wordsUnparsed="mute, hive, tile, dart, skip, dame, lend, hint, cure, wire, cope, pick, rule, rock, deal, rage, foam, bark, wept, mice, pine, cafe, wage, bake, hide, pace, lake, safe, desk, send, glue, sect, clip, prim, span, boot, gods, bowl, fool, loud, cool, tree, step, fine, bled, mend, pest, buck, snap, sunk, limp, fist, ages, soil, feed, thin, loss, shot, warm, ###" #words from Davis & Bowers experiment 1
#wordsUnparsed2="made, haze, take, diet, shop, dole, laid, hurt, case, wore, cage, park, rose, rank, dull, rice, firm, bunk, want, make, pope, cone, wide, bore, hare, pile, lone, sole, dock, shed, gene, salt, camp, palm, sign, bait, gets, bill, fuel, laid, call, tune, soup, flee, bird, maid, plot, beak, shop, soak, leap, flat, alps, seal, fold, town, lies, salt, whom, ###"
lettersUnparsed = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".upper()
#wordList1 = wordsUnparsed.split(",") #split into list
#wordList2 = wordsUnparsed2.split(",") #split into list
for i in range(len(wordList1)):
wordList1[i] = wordList1[i].replace(" ", "") #delete spaces
for i in range(len(wordList2)):
wordList2[i] = wordList2[i].replace(" ", "") #delete spaces
bgColor = [-.7,-.7,-.7] # [-1,-1,-1]
cueColor = [-.7,-.7,-.7] #originally [1.,1.,1.]
letterColor = [1.,1.,1.]
cueRadius = 7 #6 deg in Goodbourn & Holcombe
widthPix= 1280 #monitor width in pixels of Agosta
heightPix= 800 #800 #monitor height in pixels
monitorwidth = 38.7 #monitor width in cm
scrn=0 #0 to use main screen, 1 to use external screen connected to computer
fullscr=False #True to use fullscreen, False to not. Timing probably won't be quite right if fullscreen = False
allowGUI = False
if demo: monitorwidth = 23#18.0
if exportImages:
widthPix = 600; heightPix = 600
monitorwidth = 13.0
fullscr=False; scrn=0
framesSaved=0
if demo:
scrn=0; fullscr=False
widthPix = 800; heightPix = 600
monitorname='testMonitor'
allowGUI = True
viewdist = 57. #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) /np.pi*180)
print('pixelperdegree=',pixelperdegree)
# create a dialog from dictionary
infoFirst = { 'Do staircase (only)': False, 'Check refresh etc':False, 'Fullscreen (timing errors if not)': False, 'Screen refresh rate': 60 }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='AB experiment OR staircase to find thresh noise level for T1 performance criterion',
order=['Do staircase (only)', 'Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
doStaircase = infoFirst['Do staircase (only)']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
#letter size 2.5 deg
SOAms = 500 #Battelli, Agosta, Goodbourn, Holcombe mostly using 133 #KR: was 233
#Minimum SOAms should be 84 because any shorter, I can't always notice the second ring when lag1. 71 in Martini E2 and E1b (actually he used 66.6 but that's because he had a crazy refresh rate of 90 Hz)
letterDurMs = 250 #Was 17. 23.6 in Martini E2 and E1b (actually he used 22.2 but that's because he had a crazy refresh rate of 90 Hz)
ISIms = SOAms - letterDurMs
letterDurFrames = int( np.floor(letterDurMs / (1000./refreshRate)) )
cueDurFrames = letterDurFrames
ISIframes = int( np.floor(ISIms / (1000./refreshRate)) )
#have set ISIframes and letterDurFrames to integer that corresponds as close as possible to originally intended ms
rateInfo = 'total SOA=' + str(round( (ISIframes + letterDurFrames)*1000./refreshRate, 2)) + ' or ' + str(ISIframes + letterDurFrames) + ' frames, comprising\n'
rateInfo+= 'ISIframes ='+str(ISIframes)+' or '+str(ISIframes*(1000./refreshRate))+' ms and letterDurFrames ='+str(letterDurFrames)+' or '+str(round( letterDurFrames*(1000./refreshRate), 2))+'ms'
logging.info(rateInfo); print(rateInfo)
trialDurFrames = int( numWordsInStream*(ISIframes+letterDurFrames) ) #trial duration in frames
monitorname = 'testmonitor'
waitBlank = False
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#relying on monitorwidth cm (39 for Mitsubishi to do deg calculations) and gamma info in calibratn
mon.setSizePix( (widthPix,heightPix) )
units='deg' #'cm'
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
refreshMsg2 = ''
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
refreshRateWrong = False
else: #checkRefreshEtc
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=True, ## True means report on everything
userProcsDetailed=True ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
#check screen refresh is what assuming it is ##############################################
Hzs=list()
myWin.flip(); myWin.flip();myWin.flip();myWin.flip();
myWin.setRecordFrameIntervals(True) #otherwise myWin.fps won't work
print('About to measure frame flips')
for i in range(50):
myWin.flip()
Hzs.append( myWin.fps() ) #varies wildly on successive runs!
myWin.setRecordFrameIntervals(False)
# end testing of screen refresh########################################################
Hzs = np.array( Hzs ); Hz= np.median(Hzs)
msPerFrame= 1000./Hz
refreshMsg1= 'Frames per second ~='+ str( np.round(Hz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (np.median(Hzs)-refreshRate) / refreshRate)
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
defaultNoiseLevel = 0.0 #to use if no staircase, can be set by user
trialsPerCondition = 20 #default value
dlgLabelsOrdered = list()
if doStaircase:
myDlg = gui.Dlg(title="Staircase to find appropriate noisePercent", pos=(200,400))
else:
myDlg = gui.Dlg(title="RSVP experiment", pos=(200,400))
if not autopilot:
myDlg.addField('Subject name (default="Hubert"):', 'Hubert', tip='or subject code')
dlgLabelsOrdered.append('subject')
if doStaircase:
easyTrialsCondText = 'Num preassigned noise trials to preface staircase with (default=' + str(prefaceStaircaseTrialsN) + '):'
myDlg.addField(easyTrialsCondText, tip=str(prefaceStaircaseTrialsN))
dlgLabelsOrdered.append('easyTrials')
myDlg.addField('Staircase trials (default=' + str(staircaseTrials) + '):', tip="Staircase will run until this number is reached or it thinks it has precise estimate of threshold")
dlgLabelsOrdered.append('staircaseTrials')
pctCompletedBreak = 101
else:
myDlg.addField('\tPercent noise dots=', defaultNoiseLevel, tip=str(defaultNoiseLevel))
dlgLabelsOrdered.append('defaultNoiseLevel')
myDlg.addField('Trials per condition (default=' + str(trialsPerCondition) + '):', trialsPerCondition, tip=str(trialsPerCondition))
dlgLabelsOrdered.append('trialsPerCondition')
pctCompletedBreak = 50
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
thisInfo = myDlg.data #this will be a list of data returned from each field added in order
if not autopilot:
name=thisInfo[dlgLabelsOrdered.index('subject')]
if len(name) > 0: #if entered something
subject = name #change subject default name to what user entered
if doStaircase:
if len(thisInfo[dlgLabelsOrdered.index('staircaseTrials')]) >0:
staircaseTrials = int( thisInfo[ dlgLabelsOrdered.index('staircaseTrials') ] ) #convert string to integer
print('staircaseTrials entered by user=',staircaseTrials)
logging.info('staircaseTrials entered by user=',staircaseTrials)
if len(thisInfo[dlgLabelsOrdered.index('easyTrials')]) >0:
prefaceStaircaseTrialsN = int( thisInfo[ dlgLabelsOrdered.index('easyTrials') ] ) #convert string to integer
print('prefaceStaircaseTrialsN entered by user=',thisInfo[dlgLabelsOrdered.index('easyTrials')])
logging.info('prefaceStaircaseTrialsN entered by user=',prefaceStaircaseTrialsN)
else: #not doing staircase
trialsPerCondition = int( thisInfo[ dlgLabelsOrdered.index('trialsPerCondition') ] ) #convert string to integer
print('trialsPerCondition=',trialsPerCondition)
logging.info('trialsPerCondition =',trialsPerCondition)
defaultNoiseLevel = int (thisInfo[ dlgLabelsOrdered.index('defaultNoiseLevel') ])
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
#set up output data file, log file, copy of program code, and logging
infix = ''
if doStaircase:
infix = 'staircase_'
fileName = os.path.join(dataDir, subject + '_' + infix+ timeAndDateStr)
if not demo and not exportImages:
dataFile = open(fileName+'.txt', 'w')
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
if demo or exportImages:
dataFile = sys.stdout; logF = sys.stdout
logging.console.setLevel(logging.ERROR) #only show this level messages and higher
logging.console.setLevel(logging.ERROR) #DEBUG means set console to receive nearly all messges, INFO next level, EXP, DATA, WARNING and ERROR
if fullscr and not demo and not exportImages:
runInfo = psychopy.info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=True, ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#randomSeed='set:42', ## a way to record, and optionally set, a random seed of type str for making reproducible random sequences
## None -> default
## 'time' will use experimentRuntime.epoch as the value for the seed, different value each time the script is run
##'set:time' --> seed value is set to experimentRuntime.epoch, and initialized: random.seed(info['randomSeed'])
##'set:42' --> set & initialize to str('42'), and will give the same sequence of random.random() for all runs of the script
)
logging.info(runInfo)
logging.flush()
textStimuliStream1 = list()
textStimuliStream2 = list() #used for second, simultaneous RSVP stream
def calcAndPredrawStimuli(wordList2):
if len(wordList1) < numWordsInStream:
print('Error! Your word list must have at least ',numWordsInStream,'strings')
idxsIntoWordList1 = np.arange( len(wordList1) ) #create a list of indexes of the entire word list
#idxsIntoWordList2 = np.arange( len(wordList2) )
#print('wordList1=',wordList1)
for i in range(0,len(wordList1)): #draw the words that will be used on this trial, the first numWordsInStream of the shuffled list
word = wordList1[ i ]# #[ idxsIntoWordList1[i] ]
word2 = wordList2[ i ]
textStimulusStream1 = visual.TextStim(myWin,text=word,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream2 = visual.TextStim(myWin,text=word2,height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
textStimulusStream1.setPos([-wordEccentricity,0]) #left
textStimuliStream1.append(textStimulusStream1)
textStimulusStream2.setPos([wordEccentricity,0]) #right
textStimuliStream2.append(textStimulusStream2)
#idxsIntoWordList1
idxsStream1 = idxsIntoWordList1[range(0,len(wordList1)-1)] #first RSVP stream
np.random.shuffle(idxsStream1)
idxsStream2 = copy.deepcopy(idxsStream1[range(0,len(wordList2)-1)])
#np.random.shuffle(idxsStream2)
return idxsStream1, idxsStream2
#create click sound for keyboard
try:
click=sound.Sound('406__tictacshutup__click-1-d.wav')
except: #in case file missing, create inferiro click manually
logging.warn('Could not load the desired click sound file, instead using manually created inferior click')
click=sound.Sound('D',octave=4, sampleRate=22050, secs=0.015, bits=8)
if showRefreshMisses:
fixSizePix = 32 #2.6 #make fixation bigger so flicker more conspicuous
else: fixSizePix = 32
fixColor = [1,1,1]
if exportImages: fixColor= [0,0,0]
fixatnNoiseTexture = np.round( np.random.rand(fixSizePix/4,fixSizePix/4) ,0 ) *2.0-1 #Can counterphase flicker noise texture to create salient flicker if you break fixation
fixation= visual.PatchStim(myWin, tex=fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False)
fixationBlank= visual.PatchStim(myWin, tex= -1*fixatnNoiseTexture, size=(fixSizePix,fixSizePix), units='pix', mask='circle', interpolate=False, autoLog=False) #reverse contrast
fixationPoint= visual.PatchStim(myWin,tex='none',colorSpace='rgb',color=(1,1,1),size=10,units='pix',autoLog=autoLogging)
respPromptStim = visual.TextStim(myWin,pos=(0, -.9),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim = visual.TextStim(myWin,pos=(0, -.8),colorSpace='rgb',color=(1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
acceptTextStim.setText('Hit ENTER to accept. Backspace to edit')
respStim = visual.TextStim(myWin,pos=(0,0),colorSpace='rgb',color=(1,1,0),alignHoriz='center', alignVert='center',height=3,units='deg',autoLog=autoLogging)
clickSound, badKeySound = stringResponse.setupSoundsForResponse()
requireAcceptance = False
nextText = visual.TextStim(myWin,pos=(0, .1),colorSpace='rgb',color = (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
NextRemindCountText = visual.TextStim(myWin,pos=(0,.2),colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='center',height=.1,units='norm',autoLog=autoLogging)
screenshot= False; screenshotDone = False
stimList = []
#SETTING THE CONDITIONS
cuePositions = np.array([0]) # [4,10,16,22] used in Martini E2, group 2
for cuePos in cuePositions:
for rightResponseFirst in [False,True]:
for bothWordsFlipped in [True,True]:
stimList.append( {'cuePos':cuePos, 'rightResponseFirst':rightResponseFirst,
'leftStreamFlip':bothWordsFlipped, 'rightStreamFlip':bothWordsFlipped} )
trials = data.TrialHandler(stimList,trialsPerCondition) #constant stimuli method
trialsForPossibleStaircase = data.TrialHandler(stimList,trialsPerCondition) #independent randomization, just to create random trials for staircase phase
numRightWrongEachCuepos = np.zeros([ len(cuePositions), 1 ]); #summary results to print out at end
logging.info( 'numtrials=' + str(trials.nTotal) + ' and each trialDurFrames='+str(trialDurFrames)+' or '+str(trialDurFrames*(1000./refreshRate))+ \
' ms' + ' task=' + task)
def numberToLetter(number): #0 = A, 25 = Z
#if it's not really a letter, return @
if number < 0 or number > 25:
return ('@')
else: #it's probably a letter
try:
return chr( ord('A')+number )
except:
return('@')
def letterToNumber(letter): #A = 0, Z = 25
#if it's not really a letter, return -999
#HOW CAN I GENERICALLY TEST FOR LENGTH. EVEN IN CASE OF A NUMBER THAT' SNOT PART OF AN ARRAY?
try:
#if len(letter) > 1:
# return (-999)
if letter < 'A' or letter > 'Z':
return (-999)
else: #it's a letter
return ord(letter)-ord('A')
except:
return (-999)
def wordToIdx(word2,wordList2):
#if it's not in the list of stimuli, return -999
try:
#http://stackoverflow.com/questions/7102050/how-can-i-get-a-python-generator-to-return-none-rather-than-stopiteration
firstMatchIdx = next((i for i, val in enumerate(wordList2) if val.upper()==word2), None) #return i (index) unless no matches, in which case return None
#print('Looked for ',word,' in ',wordList1,'\nfirstMatchIdx =',firstMatchIdx)
return firstMatchIdx
except:
print('Unexpected error in wordToIdx with word=',word)
return (None)
#print header for data file
print('experimentPhase\ttrialnum\tsubject\ttask\t',file=dataFile,end='')
print('noisePercent\tleftStreamFlip\trightStreamFlip\t',end='',file=dataFile)
if task=='T1':
numRespsWanted = 2
dataFile.write('rightResponseFirst\t')
for i in range(numRespsWanted):
dataFile.write('cuePos'+str(i)+'\t') #have to use write to avoid ' ' between successive text, at least until Python 3
dataFile.write('answer'+str(i)+'\t')
dataFile.write('response'+str(i)+'\t')
dataFile.write('correct'+str(i)+'\t')
# dataFile.write('responsePosRelative'+str(i)+'\t')
print('timingBlips',file=dataFile)
#end of header
def oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ):
#defining a function to draw each frame of stim.
#seq1 is an array of indices corresponding to the appropriate pre-drawn stimulus, contained in textStimuli
SOAframes = letterDurFrames+ISIframes
cueFrames = thisTrial['cuePos']*SOAframes #cuesPos is global variable
stimN = int( np.floor(n/SOAframes) )
frameOfThisLetter = n % SOAframes #every SOAframes, new letter
showLetter = frameOfThisLetter < letterDurFrames #if true, it's not time for the blank ISI. it's still time to draw the letter
#print 'n=',n,' SOAframes=',SOAframes, ' letterDurFrames=', letterDurFrames, ' (n % SOAframes) =', (n % SOAframes) #DEBUGOFF
thisStimIdx = seq1[stimN] #which letter, from A to Z (1 to 26), should be shown?
if seq2 is not None:
thisStim2Idx = seq2[stimN]
#so that any timing problems occur just as often for every frame, always draw the letter and the cue, but simply draw it in the bgColor when it's not meant to be on
cue.setLineColor( bgColor )
if type(cueFrames) not in [tuple,list,np.ndarray]: #scalar. But need collection to do loop based on it
cueFrames = list([cueFrames])
for cueFrame in cueFrames: #cheTck whether it's time for any cue
if n>=cueFrame and n<cueFrame+cueDurFrames:
cue.setLineColor( cueColor )
if showLetter:
textStimuliStream1[thisStimIdx].setColor( letterColor )
textStimuliStream2[thisStim2Idx].setColor( letterColor )
else:
textStimuliStream1[thisStimIdx].setColor( bgColor )
textStimuliStream2[thisStim2Idx].setColor( bgColor )
textStimuliStream1[thisStimIdx].flipVert = thisTrial['leftStreamFlip']
textStimuliStream1[thisStimIdx].flipHoriz = thisTrial['leftStreamFlip']
textStimuliStream2[thisStim2Idx].flipVert = thisTrial['rightStreamFlip']
textStimuliStream2[thisStim2Idx].flipHoriz = thisTrial['rightStreamFlip']
textStimuliStream1[thisStimIdx].draw()
textStimuliStream2[thisStim2Idx].draw()
cue.draw()
refreshNoise = False #Not recommended because takes longer than a frame, even to shuffle apparently. Or may be setXYs step
if proportnNoise>0 and refreshNoise:
if frameOfThisLetter ==0:
np.random.shuffle(allFieldCoords)
dotCoords = allFieldCoords[0:numNoiseDots]
noise.setXYs(dotCoords)
if proportnNoise>0:
noise.draw()
return True
# #######End of function definition that displays the stimuli!!!! #####################################
#############################################################################################################################
cue = visual.Circle(myWin,
radius=cueRadius,#Martini used circles with diameter of 12 deg
lineColorSpace = 'rgb',
lineColor=bgColor,
lineWidth=4.0, #in pixels. Was thinner (2 pixels) in letter AB experiments
units = 'deg',
fillColorSpace = 'rgb',
fillColor=None, #beware, with convex shapes fill colors don't work
pos= [0,0], #the anchor (rotation and vertices are position with respect to this)
interpolate=True,
autoLog=False)#this stim changes too much for autologging to be useful
ltrHeight = 2.5 #Martini letters were 2.5deg high
#All noise dot coordinates ultimately in pixels, so can specify each dot is one pixel
noiseFieldWidthDeg=ltrHeight *1.0
noiseFieldWidthPix = int( round( noiseFieldWidthDeg*pixelperdegree ) )
def timingCheckAndLog(ts,trialN):
#check for timing problems and log them
#ts is a list of the times of the clock after each frame
interframeIntervs = np.diff(ts)*1000
#print ' interframe intervs were ',around(interframeIntervs,1) #DEBUGOFF
frameTimeTolerance=.3 #proportion longer than refreshRate that will not count as a miss
longFrameLimit = np.round(1000/refreshRate*(1.0+frameTimeTolerance),2)
idxsInterframeLong = np.where( interframeIntervs > longFrameLimit ) [0] #frames that exceeded 150% of expected duration
numCasesInterframeLong = len( idxsInterframeLong )
if numCasesInterframeLong >0 and (not demo):
longFramesStr = 'ERROR,'+str(numCasesInterframeLong)+' frames were longer than '+str(longFrameLimit)+' ms'
if demo:
longFramesStr += 'not printing them all because in demo mode'
else:
longFramesStr += ' apparently screen refreshes skipped, interframe durs were:'+\
str( np.around( interframeIntervs[idxsInterframeLong] ,1 ) )+ ' and was these frames: '+ str(idxsInterframeLong)
if longFramesStr != None:
logging.error( 'trialnum='+str(trialN)+' '+longFramesStr )
if not demo:
flankingAlso=list()
for idx in idxsInterframeLong: #also print timing of one before and one after long frame
if idx-1>=0:
flankingAlso.append(idx-1)
else: flankingAlso.append(np.NaN)
flankingAlso.append(idx)
if idx+1<len(interframeIntervs): flankingAlso.append(idx+1)
else: flankingAlso.append(np.NaN)
flankingAlso = np.array(flankingAlso)
flankingAlso = flankingAlso[np.negative(np.isnan(flankingAlso))] #remove nan values
flankingAlso = flankingAlso.astype(np.integer) #cast as integers, so can use as subscripts
logging.info( 'flankers also='+str( np.around( interframeIntervs[flankingAlso], 1) ) ) #because this is not an essential error message, as previous one already indicates error
#As INFO, at least it won't fill up the console when console set to WARNING or higher
return numCasesInterframeLong
#end timing check
trialClock = core.Clock()
numTrialsCorrect = 0;
numTrialsApproxCorrect = 0;
numTrialsEachCorrect= np.zeros( numRespsWanted )
numTrialsEachApproxCorrect= np.zeros( numRespsWanted )
def do_RSVP_stim(thisTrial, seq1, seq2, proportnNoise,trialN):
#relies on global variables:
# textStimuli, logging, bgColor
# thisTrial should have 'cuePos'
global framesSaved #because change this variable. Can only change a global variable if you declare it
cuesPos = [] #will contain the positions in the stream of all the cues (targets)
cuesPos.append(thisTrial['cuePos'])
cuesPos = np.array(cuesPos)
noise = None; allFieldCoords=None; numNoiseDots=0
if proportnNoise > 0: #gtenerating noise is time-consuming, so only do it once per trial. Then shuffle noise coordinates for each letter
(noise,allFieldCoords,numNoiseDots) = createNoise(proportnNoise,myWin,noiseFieldWidthPix, bgColor)
preDrawStimToGreasePipeline = list() #I don't know why this works, but without drawing it I have consistent timing blip first time that draw ringInnerR for phantom contours
cue.setLineColor(bgColor)
preDrawStimToGreasePipeline.extend([cue])
for stim in preDrawStimToGreasePipeline:
stim.draw()
myWin.flip(); myWin.flip()
#end preparation of stimuli
core.wait(.1);
trialClock.reset()
fixatnPeriodMin = 0.3
fixatnPeriodFrames = int( (np.random.rand(1)/2.+fixatnPeriodMin) *refreshRate) #random interval between 800ms and 1.3s
ts = list(); #to store time of each drawing, to check whether skipped frames
for i in range(fixatnPeriodFrames+20): #prestim fixation interval
#if i%4>=2 or demo or exportImages: #flicker fixation on and off at framerate to see when skip frame
# fixation.draw()
#else: fixationBlank.draw()
fixationPoint.draw()
myWin.flip() #end fixation interval
#myWin.setRecordFrameIntervals(True); #can't get it to stop detecting superlong frames
t0 = trialClock.getTime()
# adding a noise mask
noiseMaskMin = 0.20
noiseMaskFrames = int(noiseMaskMin *refreshRate)
for i in range(noiseMaskFrames):
#unicodeStuff = visual.TextStim(myWin,
# text = u"unicode (eg \u03A8 \u040A \u03A3)",#you can find the unicode character value from MS Word 'insert symbol'
# color='black', font=serif,pos=(0,3),
# height = 1)
myPatch1 = visual.TextStim(myWin, text = u"###",pos=(wordEccentricity, 0),height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging )
myPatch2 = visual.TextStim(myWin, text = u"###",pos=(-wordEccentricity, 0),height=ltrHeight,colorSpace='rgb',color=letterColor,alignHoriz='center',alignVert='center',units='deg',autoLog=autoLogging)
myPatch1.draw()
myPatch2.draw()
myWin.flip()
for n in range(trialDurFrames): #this is the loop for this trial's stimulus!
worked = oneFrameOfStim( n,cue,seq1,seq2,cueDurFrames,letterDurFrames,ISIframes,thisTrial,textStimuliStream1,textStimuliStream2,
noise,proportnNoise,allFieldCoords,numNoiseDots ) #draw letter and possibly cue and noise on top
fixationPoint.draw()
if exportImages:
myWin.getMovieFrame(buffer='back') #for later saving
framesSaved +=1
myWin.flip()
t=trialClock.getTime()-t0; ts.append(t);
#end of big stimulus loop
myWin.setRecordFrameIntervals(False);
# adding a noise mask
noiseMaskMin = 0.9
noiseMaskFrames = int(noiseMaskMin *refreshRate)
for i in range(noiseMaskFrames):
noiseTexture = scipy.random.rand(128,128)*2.0-1
myPatch1 = visual.GratingStim(myWin, tex=noiseTexture, pos=(3, 0),
size=(5,2), units='deg',
interpolate=False,
autoLog=False)#this stim changes too much for autologging to be useful
myPatch2 = visual.GratingStim(myWin, tex=noiseTexture, pos=(-3, 0),
size=(5,2), units='deg',
interpolate=False,
autoLog=False)
myPatch1.phase += (1 / 128.0, 0.5 / 128.0) # increment by (1, 0.5) pixels per frame
myPatch2.phase += (1 / 128.0, 0.5 / 128.0) # increment by (1, 0.5) pixels per frame
myPatch1.draw()
myPatch2.draw()
myWin.flip()
if task=='T1':
respPromptStim.setText('What were the two words?',log=False)
else: respPromptStim.setText('Error: unexpected task',log=False)
postCueNumBlobsAway=-999 #doesn't apply to non-tracking and click tracking task
correctAnswerIdxsStream1 = np.array( seq1[cuesPos] )
correctAnswerIdxsStream2 = np.array( seq2[cuesPos] )
#print('correctAnswerIdxsStream1=',correctAnswerIdxsStream1, 'wordList1[correctAnswerIdxsStream1[0]]=',wordList1[correctAnswerIdxsStream1[0]])
return cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2,ts
def handleAndScoreResponse(passThisTrial,response,responseAutopilot,task,stimSequence,cuePos,correctAnswerIdx):
#Handle response, calculate whether correct, ########################################
#responses are actual characters
#correctAnswer is index into stimSequence
#autopilot is global variable
if autopilot or passThisTrial:
response = responseAutopilot
#print('handleAndScoreResponse correctAnswerIdxs=',correctAnswerIdxs,'\nstimSequence=',stimSequence, '\nwords=',wordList1)
correct = 0
#approxCorrect = 0
#posOfResponse = -999
#responsePosRelative = -999
idx = correctAnswerIdx
correctAnswer = wordList[idx].upper()
responseString= ''.join(['%s' % char for char in response])
responseString= responseString.upper()
#print('correctAnswer=',correctAnswer ,' responseString=',responseString)
if correctAnswer == responseString:
correct = 1
#print('correct=',correct)
responseWordIdx = wordToIdx(responseString,wordList2)
#if responseWordIdx is None: #response is not in the wordList1
# posOfResponse = -999
# logging.warn('Response was not present in the stimulus stream')
#else:
# posOfResponse= np.where( responseWordIdx==stimSequence )
# posOfResponse= posOfResponse[0] #list with two entries, want first which will be array of places where the response was found in the sequence
# if len(posOfResponse) > 1:
# logging.error('Expected response to have occurred in only one position in stream')
# posOfResponse = posOfResponse[0] #first element of list (should be only one element long
# responsePosRelative = posOfResponse - cuePos
# approxCorrect = abs(responsePosRelative)<= 3 #Vul efficacy measure of getting it right to within plus/minus
#print('wordToIdx(',responseString,',',wordList1,')=',responseWordIdx,' stimSequence=',stimSequence,'\nposOfResponse = ',posOfResponse) #debugON
#print response stuff to dataFile
#header was answerPos0, answer0, response0, correct0, responsePosRelative0
print(cuePos,'\t', end='', file=dataFile)
print(correctAnswer, '\t', end='', file=dataFile) #answer0
print(responseString, '\t', end='', file=dataFile) #response0
print(correct, '\t', end='',file=dataFile) #correct0
# print(responsePosRelative, '\t', end='',file=dataFile) #responsePosRelative0
return correct
#end handleAndScoreResponses
def play_high_tone_correct_low_incorrect(correct, passThisTrial=False):
highA = sound.Sound('G',octave=5, sampleRate=6000, secs=.3, bits=8)
low = sound.Sound('F',octave=3, sampleRate=6000, secs=.3, bits=8)
highA.setVolume(0.9)
low.setVolume(1.0)
if correct:
highA.play()
elif passThisTrial:
high= sound.Sound('G',octave=4, sampleRate=2000, secs=.08, bits=8)
for i in range(2):
high.play(); low.play();
else: #incorrect
low.play()
expStop=False
nDoneMain = -1 #change to zero once start main part of experiment
if doStaircase:
#create the staircase handler
useQuest = True
if useQuest:
staircase = data.QuestHandler(startVal = 95,
startValSd = 80,
stopInterval= 1, #sd of posterior has to be this small or smaller for staircase to stop, unless nTrials reached
nTrials = staircaseTrials,
#extraInfo = thisInfo,
pThreshold = threshCriterion, #0.25,
gamma = 1./26,
delta=0.02, #lapse rate, I suppose for Weibull function fit
method = 'quantile', #uses the median of the posterior as the final answer
stepType = 'log', #will home in on the 80% threshold. But stepType = 'log' doesn't usually work
minVal=1, maxVal = 100
)
print('created QUEST staircase')
else:
stepSizesLinear = [.2,.2,.1,.1,.05,.05]
stepSizesLog = [log(1.4,10),log(1.4,10),log(1.3,10),log(1.3,10),log(1.2,10)]
staircase = data.StairHandler(startVal = 0.1,
stepType = 'log', #if log, what do I want to multiply it by
stepSizes = stepSizesLog, #step size to use after each reversal
minVal=0, maxVal=1,
nUp=1, nDown=3, #will home in on the 80% threshold
nReversals = 2, #The staircase terminates when nTrials have been exceeded, or when both nReversals and nTrials have been exceeded
nTrials=1)
print('created conventional staircase')
if prefaceStaircaseTrialsN > len(prefaceStaircaseNoise): #repeat array to accommodate desired number of easyStarterTrials
prefaceStaircaseNoise = np.tile( prefaceStaircaseNoise, ceil( prefaceStaircaseTrialsN/len(prefaceStaircaseNoise) ) )
prefaceStaircaseNoise = prefaceStaircaseNoise[0:prefaceStaircaseTrialsN]
phasesMsg = ('Doing '+str(prefaceStaircaseTrialsN)+'trials with noisePercent= '+str(prefaceStaircaseNoise)+' then doing a max '+str(staircaseTrials)+'-trial staircase')
print(phasesMsg); logging.info(phasesMsg)
#staircaseStarterNoise PHASE OF EXPERIMENT
corrEachTrial = list() #only needed for easyStaircaseStarterNoise
staircaseTrialN = -1; mainStaircaseGoing = False
while (not staircase.finished) and expStop==False: #staircase.thisTrialN < staircase.nTrials
if staircaseTrialN+1 < len(prefaceStaircaseNoise): #still doing easyStaircaseStarterNoise
staircaseTrialN += 1
noisePercent = prefaceStaircaseNoise[staircaseTrialN]
else:
if staircaseTrialN+1 == len(prefaceStaircaseNoise): #add these non-staircase trials so QUEST knows about them
mainStaircaseGoing = True
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise)
staircase.importData(100-prefaceStaircaseNoise, np.array(corrEachTrial))
printStaircase(staircase, descendingPsycho, briefTrialUpdate=False, printInternalVal=True, alsoLog=False)
try: #advance the staircase
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
noisePercent = 100. - staircase.next() #will step through the staircase, based on whether told it (addResponse) got it right or wrong
staircaseTrialN += 1
except StopIteration: #Need this here, even though test for finished above. I can't understand why finished test doesn't accomplish this.
print('stopping because staircase.next() returned a StopIteration, which it does when it is finished')
break #break out of the trials loop
#print('staircaseTrialN=',staircaseTrialN)
idxsStream1, idxsStream2 = calcAndPredrawStimuli(wordList2)
correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(cuePos, idxsStream1, idxsStream2, noisePercent/100.,staircaseTrialN)
numCasesInterframeLong = timingCheckAndLog(ts,staircaseTrialN)
expStop,passThisTrial,responses,responsesAutopilot = \
stringResponse.collectStringResponse(numRespsWanted,respPromptStim,respStim,acceptTextStim,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
if not expStop:
if mainStaircaseGoing:
print('staircase\t', end='', file=dataFile)
else:
print('staircase_preface\t', end='', file=dataFile)
#header start 'trialnum\tsubject\ttask\t'
print(staircaseTrialN,'\t', end='', file=dataFile) #first thing printed on each line of dataFile
print(subject,'\t',task,'\t', round(noisePercent,2),'\t', end='', file=dataFile)
correct,approxCorrect,responsePosRelative= handleAndScoreResponse(
passThisTrial,responses,responseAutopilot,task,sequenceLeft,cuesPos[0],correctAnswerIdx )
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
core.wait(.06)
if feedback:
play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
print('staircaseTrialN=', staircaseTrialN,' noisePercent=',round(noisePercent,3),' T1approxCorrect=',T1approxCorrect) #debugON
corrEachTrial.append(T1approxCorrect)
if mainStaircaseGoing:
staircase.addResponse(T1approxCorrect, intensity = 100-noisePercent) #Add a 1 or 0 to signify a correct/detected or incorrect/missed trial
#print('Have added an intensity of','{:.3f}'.format(100-noisePercent), 'T1approxCorrect =', T1approxCorrect, ' to staircase') #debugON
#ENDING STAIRCASE PHASE
if staircaseTrialN+1 < len(prefaceStaircaseNoise) and (staircaseTrialN>=0): #exp stopped before got through staircase preface trials, so haven't imported yet
print('Importing ',corrEachTrial,' and intensities ',prefaceStaircaseNoise[0:staircaseTrialN+1])
staircase.importData(100-prefaceStaircaseNoise[0:staircaseTrialN], np.array(corrEachTrial))
print('framesSaved after staircase=',framesSaved) #debugON
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = ('prefaceStaircase phase' if expStop else '')
msg += ('ABORTED' if expStop else 'Finished') + ' staircase part of experiment at ' + timeAndDateStr
logging.info(msg); print(msg)
printStaircase(staircase, descendingPsycho, briefTrialUpdate=True, printInternalVal=True, alsoLog=False)
#print('staircase.quantile=',round(staircase.quantile(),2),' sd=',round(staircase.sd(),2))
threshNoise = round(staircase.quantile(),3)
if descendingPsycho:
threshNoise = 100- threshNoise
threshNoise = max( 0, threshNoise ) #e.g. ff get all trials wrong, posterior peaks at a very negative number
msg= 'Staircase estimate of threshold = ' + str(threshNoise) + ' with sd=' + str(round(staircase.sd(),2))
logging.info(msg); print(msg)
myWin.close()
#Fit and plot data
fit = None
try:
intensityForCurveFitting = staircase.intensities
if descendingPsycho:
intensityForCurveFitting = 100-staircase.intensities #because fitWeibull assumes curve is ascending
fit = data.FitWeibull(intensityForCurveFitting, staircase.data, expectedMin=1/26., sems = 1.0/len(staircase.intensities))
except:
print("Fit failed.")
plotDataAndPsychometricCurve(staircase,fit,descendingPsycho,threshCriterion)
#save figure to file
pylab.savefig(fileName+'.pdf')
print('The plot has been saved, as '+fileName+'.pdf')
pylab.show() #must call this to actually show plot
else: #not staircase
noisePercent = defaultNoiseLevel
phasesMsg = 'Experiment will have '+str(trials.nTotal)+' trials. Letters will be drawn with superposed noise of ' + "{:.2%}".format(defaultNoiseLevel)
print(phasesMsg); logging.info(phasesMsg)
nDoneMain =0
while nDoneMain < trials.nTotal and expStop==False: #MAIN EXPERIMENT LOOP
if nDoneMain==0:
msg='Starting main (non-staircase) part of experiment'
logging.info(msg); print(msg)
thisTrial = trials.next() #get a proper (non-staircase) trial
sequenceStream1, sequenceStream2 = calcAndPredrawStimuli(wordList2)
cuesPos,correctAnswerIdxsStream1,correctAnswerIdxsStream2, ts = \
do_RSVP_stim(thisTrial, sequenceStream1, sequenceStream2, noisePercent/100.,nDoneMain)
numCasesInterframeLong = timingCheckAndLog(ts,nDoneMain)
#call for each response
expStop = list(); passThisTrial = list(); responses=list(); responsesAutopilot=list()
numCharsInResponse = len(wordList2[0])
dL = [None]*numRespsWanted #dummy list for null values
expStop = copy.deepcopy(dL); responses = copy.deepcopy(dL); responsesAutopilot = copy.deepcopy(dL); passThisTrial=copy.deepcopy(dL)
responseOrder = [0,1]
if thisTrial['rightResponseFirst']: #change order of indices depending on rightResponseFirst. response0, answer0 etc refer to which one had to be reported first
responseOrder.reverse()
for i in responseOrder:
x = 3* wordEccentricity*(i*2-1) #put it 3 times farther out than stimulus, so participant is sure which is left and which right
expStop[i],passThisTrial[i],responses[i],responsesAutopilot[i] = stringResponse.collectStringResponse(
numCharsInResponse,x,respPromptStim,respStim,acceptTextStim,fixationPoint,myWin,clickSound,badKeySound,
requireAcceptance,autopilot,responseDebug=True)
expStop = np.array(expStop).any(); passThisTrial = np.array(passThisTrial).any()
if not expStop:
print('main\t', end='', file=dataFile) #first thing printed on each line of dataFile to indicate main part of experiment, not staircase
print(nDoneMain,'\t', end='', file=dataFile)
print(subject,'\t',task,'\t', round(noisePercent,3),'\t', end='', file=dataFile)
print(thisTrial['leftStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightStreamFlip'],'\t', end='', file=dataFile)
print(thisTrial['rightResponseFirst'],'\t', end='', file=dataFile)
i = 0
eachCorrect = np.ones(numRespsWanted)*-999
for i in range(numRespsWanted): #scored and printed to dataFile in left first, right second order even if collected in different order
if i==0:
sequenceStream = sequenceStream1; correctAnswerIdxs = correctAnswerIdxsStream1; wordList = wordList1;
else: sequenceStream = sequenceStream2; correctAnswerIdxs = correctAnswerIdxsStream2; wordList = wordList2;
correct = (
handleAndScoreResponse(passThisTrial,responses[i],responsesAutopilot[i],task,sequenceStream,thisTrial['cuePos'],correctAnswerIdxs ) )
eachCorrect[i] = correct
print(numCasesInterframeLong, file=dataFile) #timingBlips, last thing recorded on each line of dataFile
print('correct=',correct,'eachCorrect=',eachCorrect)
numTrialsCorrect += eachCorrect.all() #so count -1 as 0
numTrialsEachCorrect += eachCorrect #list numRespsWanted long
if exportImages: #catches one frame of response
myWin.getMovieFrame() #I cant explain why another getMovieFrame, and core.wait is needed
framesSaved +=1; core.wait(.1)
myWin.saveMovieFrames('images_sounds_movies/frames.png') #mov not currently supported
expStop=True
core.wait(.1)
if feedback: play_high_tone_correct_low_incorrect(correct, passThisTrial=False)
nDoneMain+=1
dataFile.flush(); logging.flush()
print('nDoneMain=', nDoneMain,' trials.nTotal=',trials.nTotal) #' trials.thisN=',trials.thisN
if (trials.nTotal > 6 and nDoneMain > 2 and nDoneMain %
( trials.nTotal*pctCompletedBreak/100. ) ==1): #dont modulus 0 because then will do it for last trial
nextText.setText('Press "SPACE" to continue!')
nextText.draw()
progressMsg = 'Completed ' + str(nDoneMain) + ' of ' + str(trials.nTotal) + ' trials'
NextRemindCountText.setText(progressMsg)
NextRemindCountText.draw()
myWin.flip() # myWin.flip(clearBuffer=True)
waiting=True
while waiting:
if autopilot: break
elif expStop == True:break
for key in event.getKeys(): #check if pressed abort-type key
if key in ['space','ESCAPE']:
waiting=False
if key in ['ESCAPE']:
expStop = True
myWin.clearBuffer()
core.wait(.2); time.sleep(.2)
#end main trials loop
timeAndDateStr = time.strftime("%H:%M on %d %b %Y", time.localtime())
msg = 'Finishing at '+timeAndDateStr
print(msg); logging.info(msg)
if expStop:
msg = 'user aborted experiment on keypress with trials done=' + str(nDoneMain) + ' of ' + str(trials.nTotal+1)
print(msg); logging.error(msg)
if not doStaircase and (nDoneMain >0):
print('Of ',nDoneMain,' trials, on ',numTrialsCorrect*1.0/nDoneMain*100., '% of all trials all targets reported exactly correct',sep='')
for i in range(numRespsWanted):
print('stream',i,': ',round(numTrialsEachCorrect[i]*1.0/nDoneMain*100.,2), '% correct',sep='')
logging.flush(); dataFile.close()
myWin.close() #have to close window if want to show a plot
| |
#!python
"""Bootstrap distribute installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from distribute_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import os
import shutil
import sys
import time
import fnmatch
import tempfile
import tarfile
import optparse
from distutils import log
try:
from site import USER_SITE
except ImportError:
USER_SITE = None
try:
import subprocess
def _python_cmd(*args):
args = (sys.executable,) + args
return subprocess.call(args) == 0
except ImportError:
# will be used for python 2.3
def _python_cmd(*args):
args = (sys.executable,) + args
# quoting arguments if windows
if sys.platform == 'win32':
def quote(arg):
if ' ' in arg:
return '"%s"' % arg
return arg
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
DEFAULT_VERSION = "0.7.3"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.7"
SETUPTOOLS_PKG_INFO = """\
Metadata-Version: 1.0
Name: setuptools
Version: %s
Summary: xxxx
Home-page: xxx
Author: xxx
Author-email: xxx
License: xxx
Description: xxx
""" % SETUPTOOLS_FAKED_VERSION
def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# installing
log.warn('Installing Distribute')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
old_wd = os.getcwd()
try:
os.chdir(tmpdir)
tar = tarfile.open(tarball)
_extractall(tar)
tar.close()
# going in the directory
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
os.chdir(subdir)
log.warn('Now working in %s', subdir)
# building an egg
log.warn('Building a Distribute egg in %s', to_dir)
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
finally:
os.chdir(old_wd)
shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
raise IOError('Could not build the egg.')
def _do_download(version, download_base, to_dir, download_delay):
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
% (version, sys.version_info[0], sys.version_info[1]))
if not os.path.exists(egg):
tarball = download_setuptools(version, download_base,
to_dir, download_delay)
_build_egg(egg, tarball, to_dir)
sys.path.insert(0, egg)
import setuptools
setuptools.bootstrap_install_from = egg
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, download_delay=15, no_fake=True):
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
was_imported = 'pkg_resources' in sys.modules or \
'setuptools' in sys.modules
try:
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
if not no_fake:
_fake_setuptools()
raise ImportError
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
if was_imported:
sys.stderr.write(
"The required version of distribute (>=%s) is not available,\n"
"and can't be installed while this script is running. Please\n"
"install a more recent version first, using\n"
"'easy_install -U distribute'."
"\n\n(Currently using %r)\n" % (version, e.args[0]))
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return _do_download(version, download_base, to_dir,
download_delay)
except pkg_resources.DistributionNotFound:
return _do_download(version, download_base, to_dir,
download_delay)
finally:
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
`version` should be a valid distribute version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download
attempt.
"""
# making sure we use the absolute path
to_dir = os.path.abspath(to_dir)
try:
from urllib.request import urlopen
except ImportError:
from urllib3 import urlopen
tgz_name = "distribute-%s.tar.gz" % version
url = download_base + tgz_name
saveto = os.path.join(to_dir, tgz_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
log.warn("Downloading %s", url)
src = urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = src.read()
dst = open(saveto, "wb")
dst.write(data)
finally:
if src:
src.close()
if dst:
dst.close()
return os.path.realpath(saveto)
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
from setuptools.sandbox import DirectorySandbox
if not hasattr(DirectorySandbox, '_old'):
def violation(*args):
pass
DirectorySandbox._old = DirectorySandbox._violation
DirectorySandbox._violation = violation
patched = True
else:
patched = False
except ImportError:
patched = False
try:
return function(*args, **kw)
finally:
if patched:
DirectorySandbox._violation = DirectorySandbox._old
del DirectorySandbox._old
return __no_sandbox
def _patch_file(path, content):
"""Will backup the file then patch it"""
f = open(path)
existing_content = f.read()
f.close()
if existing_content == content:
# already patched
log.warn('Already patched.')
return False
log.warn('Patching...')
_rename_path(path)
f = open(path, 'w')
try:
f.write(content)
finally:
f.close()
return True
_patch_file = _no_sandbox(_patch_file)
def _same_content(path, content):
f = open(path)
existing_content = f.read()
f.close()
return existing_content == content
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
log.warn('Renaming %s to %s', path, new_name)
os.rename(path, new_name)
return new_name
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
return False
found = False
for file in os.listdir(placeholder):
if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
found = True
break
if not found:
log.warn('Could not locate setuptools*.egg-info')
return
log.warn('Moving elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
else:
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
if not patched:
log.warn('%s already patched.', pkg_info)
return False
# now let's move the files out of the way
for element in ('setuptools', 'pkg_resources.py', 'site.py'):
element = os.path.join(placeholder, element)
if os.path.exists(element):
_rename_path(element)
else:
log.warn('Could not find the %s element of the '
'Setuptools distribution', element)
return True
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
return
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
setuptools_file = 'setuptools-%s-py%s.egg-info' % \
(SETUPTOOLS_FAKED_VERSION, pyver)
pkg_info = os.path.join(placeholder, setuptools_file)
if os.path.exists(pkg_info):
log.warn('%s already exists', pkg_info)
return
log.warn('Creating %s', pkg_info)
try:
f = open(pkg_info, 'w')
except EnvironmentError:
log.warn("Don't have permissions to write %s, skipping", pkg_info)
return
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
pth_file = os.path.join(placeholder, 'setuptools.pth')
log.warn('Creating %s', pth_file)
f = open(pth_file, 'w')
try:
f.write(os.path.join(os.curdir, setuptools_file))
finally:
f.close()
_create_fake_setuptools_pkg_info = _no_sandbox(
_create_fake_setuptools_pkg_info
)
def _patch_egg_dir(path):
# let's check if it's already patched
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
if os.path.exists(pkg_info):
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
log.warn('%s already patched.', pkg_info)
return False
_rename_path(path)
os.mkdir(path)
os.mkdir(os.path.join(path, 'EGG-INFO'))
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
f = open(pkg_info, 'w')
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
f.close()
return True
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
def _under_prefix(location):
if 'install' not in sys.argv:
return True
args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
top_dir = arg.split('root=')[-1]
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
return True
def _fake_setuptools():
log.warn('Scanning installed packages')
try:
import pkg_resources
except ImportError:
# we're cool
log.warn('Setuptools or Distribute does not seem to be installed.')
return
ws = pkg_resources.working_set
try:
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools', replacement=False)
)
except TypeError:
# old distribute API
setuptools_dist = ws.find(
pkg_resources.Requirement.parse('setuptools')
)
if setuptools_dist is None:
log.warn('No setuptools distribution found')
return
# detecting if it was already faked
setuptools_location = setuptools_dist.location
log.warn('Setuptools installation detected at %s', setuptools_location)
# if --root or --preix was provided, and if
# setuptools is not located in them, we don't patch it
if not _under_prefix(setuptools_location):
log.warn('Not patching, --root or --prefix is installing Distribute'
' in another location')
return
# let's see if its an egg
if not setuptools_location.endswith('.egg'):
log.warn('Non-egg installation')
res = _remove_flat_installation(setuptools_location)
if not res:
return
else:
log.warn('Egg installation')
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
if (os.path.exists(pkg_info) and
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
log.warn('Already patched.')
return
log.warn('Patching...')
# let's create a fake egg replacing setuptools one
res = _patch_egg_dir(setuptools_location)
if not res:
return
log.warn('Patching complete.')
_relaunch()
def _relaunch():
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
_cmd1 = ['-c', 'install', '--single-version-externally-managed']
_cmd2 = ['-c', 'install', '--record']
if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
def _extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
import copy
import operator
from tarfile import ExtractError
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
if sys.version_info < (2, 4):
def sorter(dir1, dir2):
return cmp(dir1.name, dir2.name)
directories.sort(sorter)
directories.reverse()
else:
directories.sort(key=operator.attrgetter('name'), reverse=True)
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError:
e = sys.exc_info()[1]
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def _build_install_args(options):
"""
Build the arguments to 'python setup.py install' on the distribute package
"""
install_args = []
if options.user_install:
if sys.version_info < (2, 6):
log.warn("--user requires Python 2.6 or later")
raise SystemExit(1)
install_args.append('--user')
return install_args
def _parse_args():
"""
Parse the command line for options
"""
parser = optparse.OptionParser()
parser.add_option(
'--user', dest='user_install', action='store_true', default=False,
help='install in user site package (requires Python 2.6 or later)')
parser.add_option(
'--download-base', dest='download_base', metavar="URL",
default=DEFAULT_URL,
help='alternative URL from where to download the distribute package')
options, args = parser.parse_args()
# positional arguments are ignored
return options
def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
options = _parse_args()
tarball = download_setuptools(download_base=options.download_base)
return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright 2008-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebElement implementation."""
import os
import zipfile
from StringIO import StringIO
import base64
from command import Command
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidSelectorException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value."""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = unicode(resp['value'])
if type(resp['value']) is bool:
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected."""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element by name."""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element by link text."""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath."""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath."""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element by their class name."""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds elements by their class name."""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element by CSS selector."""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements by CSS selector."""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element."""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""CONSIDERED LIABLE TO CHANGE WITHOUT WARNING. Use this to discover where on the screen an
element is so that we can click it. This method should cause the element to be scrolled
into view.
Returns the top lefthand corner location on the screen, or None if the element is not visible"""
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": old_loc['x'],
"y": old_loc['y']}
return new_loc
@property
def parent(self):
return self._parent
@property
def id(self):
return self._id
def __eq__(self, element):
return self._id == element.id
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
if isinstance(by, tuple) or isinstance(value, int) or value==None:
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
if isinstance(by, tuple) or isinstance(value, int) or value==None:
raise InvalidSelectorException("Invalid locator values passed in")
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def _upload(self, filename):
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
try:
return self._execute(Command.UPLOAD_FILE,
{'file': base64.encodestring(fp.getvalue())})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
try:
if os.path.isfile(file_path):
return file_path
except:
pass
return None
| |
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Sebastien Blot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import subprocess
import time
import libqtile
import libqtile.layout
import libqtile.bar
import libqtile.command
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.hook
import libqtile.confreader
from .conftest import whereis, BareConfig, no_xinerama, retry
class ManagerConfig(object):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2),
libqtile.layout.tile.Tile(ratio=0.5),
libqtile.layout.max.Max()
]
floating_layout = libqtile.layout.floating.Floating(
float_rules=[dict(wmclass="xclock")])
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
main = None
follow_mouse_focus = True
manager_config = pytest.mark.parametrize("qtile", [ManagerConfig], indirect=True)
@manager_config
def test_screen_dim(qtile):
# self.c.restart()
qtile.testXclock()
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
qtile.c.to_screen(1)
qtile.testXeyes()
assert qtile.c.screen.info()["index"] == 1
assert qtile.c.screen.info()["x"] == 800
assert qtile.c.screen.info()["width"] == 640
assert qtile.c.group.info()["name"] == 'b'
assert qtile.c.group.info()["focus"] == 'xeyes'
qtile.c.to_screen(0)
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
@pytest.mark.parametrize("xephyr", [{"xoffset": 0}], indirect=True)
@manager_config
def test_clone_dim(qtile):
self = qtile
self.testXclock()
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
assert len(self.c.screens()) == 1
@manager_config
def test_to_screen(qtile):
self = qtile
assert self.c.screen.info()["index"] == 0
self.c.to_screen(1)
assert self.c.screen.info()["index"] == 1
self.testWindow("one")
self.c.to_screen(0)
self.testWindow("two")
ga = self.c.groups()["a"]
assert ga["windows"] == ["two"]
gb = self.c.groups()["b"]
assert gb["windows"] == ["one"]
assert self.c.window.info()["name"] == "two"
self.c.next_screen()
assert self.c.window.info()["name"] == "one"
self.c.next_screen()
assert self.c.window.info()["name"] == "two"
self.c.prev_screen()
assert self.c.window.info()["name"] == "one"
@manager_config
def test_togroup(qtile):
self = qtile
self.testWindow("one")
with pytest.raises(libqtile.command.CommandError):
self.c.window.togroup("nonexistent")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("a")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("b")
assert self.c.groups()["b"]["focus"] == "one"
assert self.c.groups()["a"]["focus"] is None
self.c.to_screen(1)
self.c.window.togroup("c")
assert self.c.groups()["c"]["focus"] == "one"
@manager_config
def test_resize(qtile):
self = qtile
self.c.screen[0].resize(x=10, y=10, w=100, h=100)
@retry(ignore_exceptions=(AssertionError), fail_msg="Screen didn't resize")
def run():
d = self.c.screen[0].info()
assert d['width'] == 100
assert d['height'] == 100
return d
d = run()
assert d['x'] == d['y'] == 10
@no_xinerama
def test_minimal(qtile):
assert qtile.c.status() == "OK"
@manager_config
@no_xinerama
def test_events(qtile):
assert qtile.c.status() == "OK"
# FIXME: failing test disabled. For some reason we don't seem
# to have a keymap in Xnest or Xephyr 99% of the time.
@manager_config
@no_xinerama
def test_keypress(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
v = self.c.simulate_keypress(["unknown"], "j")
assert v.startswith("Unknown modifier")
assert self.c.groups()["a"]["focus"] == "two"
self.c.simulate_keypress(["control"], "j")
assert self.c.groups()["a"]["focus"] == "one"
@manager_config
@no_xinerama
def test_spawn(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn("true")) > 1
@manager_config
@no_xinerama
def test_spawn_list(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn(["echo", "true"])) > 1
@retry(ignore_exceptions=(AssertionError,), fail_msg='Window did not die!')
def assert_window_died(client, window_info):
client.sync()
wid = window_info['id']
assert wid not in set([x['id'] for x in client.windows()])
@manager_config
@no_xinerama
def test_kill_window(qtile):
qtile.testWindow("one")
qtile.testwindows = []
window_info = qtile.c.window.info()
qtile.c.window[window_info["id"]].kill()
assert_window_died(qtile.c, window_info)
@manager_config
@no_xinerama
def test_kill_other(qtile):
self = qtile
self.c.group.setlayout("tile")
one = self.testWindow("one")
assert self.c.window.info()["width"] == 798
window_one_info = self.c.window.info()
assert self.c.window.info()["height"] == 578
two = self.testWindow("two")
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 398
assert self.c.window.info()["height"] == 578
assert len(self.c.windows()) == 2
self.kill_window(one)
assert_window_died(self.c, window_one_info)
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 798
assert self.c.window.info()["height"] == 578
@manager_config
@no_xinerama
def test_regression_groupswitch(qtile):
self = qtile
self.c.group["c"].toscreen()
self.c.group["d"].toscreen()
assert self.c.groups()["c"]["screen"] is None
@manager_config
@no_xinerama
def test_next_layout(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
assert len(self.c.layout.info()["stacks"]) == 1
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.c.next_layout()
self.c.next_layout()
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 1
@manager_config
@no_xinerama
def test_setlayout(qtile):
self = qtile
assert not self.c.layout.info()["name"] == "max"
self.c.group.setlayout("max")
assert self.c.layout.info()["name"] == "max"
@manager_config
@no_xinerama
def test_adddelgroup(qtile):
self = qtile
self.testWindow("one")
self.c.addgroup("dummygroup")
self.c.addgroup("testgroup")
assert "testgroup" in self.c.groups().keys()
self.c.window.togroup("testgroup")
self.c.delgroup("testgroup")
assert "testgroup" not in self.c.groups().keys()
# Assert that the test window is still a member of some group.
assert sum(len(i["windows"]) for i in self.c.groups().values())
for i in list(self.c.groups().keys())[:-1]:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup(list(self.c.groups().keys())[0])
@manager_config
@no_xinerama
def test_delgroup(qtile):
self = qtile
self.testWindow("one")
for i in ['a', 'd', 'c']:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup('b')
@manager_config
@no_xinerama
def test_nextprevgroup(qtile):
self = qtile
start = self.c.group.info()["name"]
ret = self.c.screen.next_group()
assert self.c.group.info()["name"] != start
assert self.c.group.info()["name"] == ret
ret = self.c.screen.prev_group()
assert self.c.group.info()["name"] == start
@manager_config
@no_xinerama
def test_toggle_group(qtile):
self = qtile
self.c.group["a"].toscreen()
self.c.group["b"].toscreen()
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "c"
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "b"
self.c.screen.toggle_group()
assert self.c.group.info()["name"] == "c"
@manager_config
@no_xinerama
def test_inspect_xeyes(qtile):
self = qtile
self.testXeyes()
assert self.c.window.inspect()
@manager_config
@no_xinerama
def test_inspect_xclock(qtile):
self = qtile
self.testXclock()
assert self.c.window.inspect()["wm_class"]
@manager_config
@no_xinerama
def test_static(qtile):
self = qtile
self.testXeyes()
self.testWindow("one")
self.c.window[self.c.window.info()["id"]].static(0, 0, 0, 100, 100)
@manager_config
@no_xinerama
def test_match(qtile):
self = qtile
self.testXeyes()
assert self.c.window.match(wname="xeyes")
assert not self.c.window.match(wname="nonexistent")
@manager_config
@no_xinerama
def test_default_float(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXclock()
assert self.c.group.info()['focus'] == 'xclock'
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 318
assert self.c.window.info()['y'] == 208
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 328
assert self.c.window.info()['y'] == 228
assert self.c.window.info()['floating'] is True
self.c.window.set_position_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_last_float_size(qtile):
"""
When you re-float something it would be preferable to have it use the previous float size
"""
self = qtile
self.testXeyes()
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float and it moves
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
# resize
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# back to not floating
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float again, should use last float size
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# make sure it works through min and max
self.c.window.toggle_maximize()
self.c.window.toggle_minimize()
self.c.window.toggle_minimize()
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
@manager_config
@no_xinerama
def test_float_max_min_combo(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] is False
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_fullscreen(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 600
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_max(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_min(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_floating(qtile):
self = qtile
self.testXeyes()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
# change layout (should still be floating)
self.c.next_layout()
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_floating_focus(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
self.c.window.toggle_floating()
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.group.info()['focus'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# change focus to xcalc
self.c.group.next_window()
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['name'] != 'xeyes'
assert self.c.group.info()['focus'] != 'xeyes'
# check what stack thinks is focus
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# now focusing via layout is borked (won't go to float)
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
@manager_config
@no_xinerama
def test_move_floating(qtile):
self = qtile
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.resize_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 60
assert self.c.window.info()['height'] == 110
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
# change layout (x, y should be same)
self.c.next_layout()
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
@manager_config
@no_xinerama
def test_screens(qtile):
self = qtile
assert len(self.c.screens())
@manager_config
@no_xinerama
def test_rotate(qtile):
self = qtile
self.testWindow("one")
s = self.c.screens()[0]
height, width = s["height"], s["width"]
subprocess.call(
[
"xrandr",
"--output", "default",
"-display", self.display,
"--rotate", "left"
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
@retry(ignore_exceptions=(AssertionError,), fail_msg="Screen did not rotate")
def run():
s = self.c.screens()[0]
assert s['width'] == height
assert s['height'] == width
return True
run()
# TODO: see note on test_resize
@manager_config
@no_xinerama
def test_resize_(qtile):
self = qtile
self.testWindow("one")
subprocess.call(
[
"xrandr",
"-s", "480x640",
"-display", self.display
]
)
@retry(ignore_exceptions=(AssertionError,), fail_msg="Screen did not resize")
def run():
d = self.c.screen.info()
assert d['width'] == 480
assert d['height'] == 640
return True
run()
@manager_config
@no_xinerama
def test_focus_stays_on_layout_switch(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
# switch to a double stack layout
qtile.c.next_layout()
# focus on a different window than the default
qtile.c.layout.next()
# toggle the layout
qtile.c.next_layout()
qtile.c.prev_layout()
assert qtile.c.window.info()['name'] == 'one'
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xeyes(qtile):
qtile.testXeyes()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xcalc(qtile):
qtile.testXcalc()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xcalc_kill_window(qtile):
self = qtile
self.testXcalc()
window_info = self.c.window.info()
self.c.window.kill()
assert_window_died(self.c, window_info)
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_map_request(qtile):
self = qtile
self.testWindow("one")
info = self.c.groups()["a"]
assert "one" in info["windows"]
assert info["focus"] == "one"
self.testWindow("two")
info = self.c.groups()["a"]
assert "two" in info["windows"]
assert info["focus"] == "two"
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap(qtile):
self = qtile
one = self.testWindow("one")
two = self.testWindow("two")
three = self.testWindow("three")
info = self.c.groups()["a"]
assert info["focus"] == "three"
assert len(self.c.windows()) == 3
self.kill_window(three)
assert len(self.c.windows()) == 2
info = self.c.groups()["a"]
assert info["focus"] == "two"
self.kill_window(two)
assert len(self.c.windows()) == 1
info = self.c.groups()["a"]
assert info["focus"] == "one"
self.kill_window(one)
assert len(self.c.windows()) == 0
info = self.c.groups()["a"]
assert info["focus"] is None
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_setgroup(qtile):
self = qtile
self.testWindow("one")
self.c.group["b"].toscreen()
self.groupconsistency()
if len(self.c.screens()) == 1:
assert self.c.groups()["a"]["screen"] is None
else:
assert self.c.groups()["a"]["screen"] == 1
assert self.c.groups()["b"]["screen"] == 0
self.c.group["c"].toscreen()
self.groupconsistency()
assert self.c.groups()["c"]["screen"] == 0
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap_noscreen(qtile):
self = qtile
self.testWindow("one")
pid = self.testWindow("two")
assert len(self.c.windows()) == 2
self.c.group["c"].toscreen()
self.groupconsistency()
self.c.status()
assert len(self.c.windows()) == 2
self.kill_window(pid)
assert len(self.c.windows()) == 1
assert self.c.groups()["a"]["focus"] == "one"
def test_init():
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key([], "unknown", libqtile.command._Call("base", None, "foo"))
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key(["unknown"], "x", libqtile.command._Call("base", None, "foo"))
class TScreen(libqtile.config.Screen):
def setGroup(self, x, save_prev=True):
pass
def test_dx():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dx == 10
def test_dwidth():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dwidth == 90
s.right = libqtile.bar.Gap(10)
assert s.dwidth == 80
def test_dy():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dy == 10
def test_dheight():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dheight == 90
s.bottom = libqtile.bar.Gap(10)
assert s.dheight == 80
class _Config(object):
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
auto_fullscreen = True
class ClientNewStaticConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.static(0)
libqtile.hook.subscribe.client_new(client_new)
clientnew_config = pytest.mark.parametrize("qtile", [ClientNewStaticConfig], indirect=True)
@clientnew_config
def test_minimal_(qtile):
self = qtile
a = self.testWindow("one")
self.kill_window(a)
@pytest.mark.skipif(whereis("gkrellm") is None, reason="gkrellm not found")
@clientnew_config
def test_gkrellm(qtile):
qtile.testGkrellm()
time.sleep(0.1)
class ToGroupConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.togroup("d")
libqtile.hook.subscribe.client_new(client_new)
togroup_config = pytest.mark.parametrize("qtile", [ToGroupConfig], indirect=True)
@togroup_config
def test_minimal__(qtile):
qtile.c.group["d"].toscreen()
qtile.c.group["a"].toscreen()
a = qtile.testWindow("one")
assert len(qtile.c.group["d"].info()["windows"]) == 1
qtile.kill_window(a)
@manager_config
def test_colorPixel(qtile):
# test for #394
qtile.c.eval("self.colorPixel(\"ffffff\")")
| |
from django.conf import settings
from django.contrib import messages as django_messages
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views.decorators.http import require_POST
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.edit import CreateView, FormView
from raven.contrib.django.raven_compat.models import client as raven_client
from common.mixins import PrivateMixin
from common.utils import get_source_labels
from private_sharing.models import (
ActivityFeed,
DataRequestProject,
DataRequestProjectMember,
id_label_to_project,
)
from .forms import ConsentForm
from .models import PublicDataAccess, WithdrawalFeedback
class QuizView(PrivateMixin, TemplateView):
"""
Modification of TemplateView that accepts and requires POST.
This prevents users from jumping to the quiz link without going through
the informed consent pages.
"""
template_name = "public_data/quiz.html"
@method_decorator(require_POST)
def dispatch(self, *args, **kwargs):
return super(QuizView, self).dispatch(*args, **kwargs)
def post(self, *args, **kwargs):
return self.get(*args, **kwargs)
class ConsentView(PrivateMixin, FormView):
"""
Modification of FormView that walks through the informed consent content.
Stepping through the form is triggered by POST requests containing new
values in the 'section' field. If this field is present, the view overrides
form data processing.
"""
template_name = "public_data/consent.html"
form_class = ConsentForm
success_url = reverse_lazy("home")
def get(self, request, *args, **kwargs):
"""Customized to allow additional context."""
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form, **kwargs))
def form_invalid(self, form):
"""
Customized to add final section marker when reloading.
"""
return self.render_to_response(self.get_context_data(form=form, section=6))
def post(self, request, *args, **kwargs):
"""
Customized to convert a POST with 'section' into GET request.
"""
if "section" in request.POST:
kwargs["section"] = int(request.POST["section"])
self.request.method = "GET"
return self.get(request, *args, **kwargs)
else:
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
participant = self.request.user.member.public_data_participant
participant.enrolled = True
participant.save()
django_messages.success(
self.request,
("Thank you! The public data sharing " "feature is now activated."),
)
return super(ConsentView, self).form_valid(form)
class ToggleSharingView(PrivateMixin, RedirectView):
"""
Toggle the specified data_file to the specified value of public.
"""
permanent = False
url = reverse_lazy("my-member-data")
def get_redirect_url(self):
if "next" in self.request.POST:
return self.request.POST["next"]
else:
return super(ToggleSharingView, self).get_redirect_url()
def toggle_data(self, user, source, public):
if source not in get_source_labels() and not source.startswith(
"direct-sharing-"
):
error_msg = (
"Public sharing toggle attempted for "
'unexpected source "{}"'.format(source)
)
django_messages.error(self.request, error_msg)
if not settings.TESTING:
raven_client.captureMessage(error_msg)
return
project = id_label_to_project(source)
project_membership = DataRequestProjectMember.objects.get(
member=user.member, project=project
)
participant = user.member.public_data_participant
access, _ = PublicDataAccess.objects.get_or_create(
participant=participant, project_membership=project_membership
)
access.is_public = False
if public == "True":
if not project.no_public_data:
access.is_public = True
access.save()
if (
project.approved
and not ActivityFeed.objects.filter(
member=user.member, project=project, action="publicly-shared"
).exists()
):
event = ActivityFeed(
member=user.member, project=project, action="publicly-shared"
)
event.save()
def post(self, request, *args, **kwargs):
"""
Toggle public sharing status of a dataset.
"""
if "source" in request.POST and "public" in request.POST:
public = request.POST["public"]
source = request.POST["source"]
if public not in ["True", "False"]:
raise ValueError("'public' must be 'True' or 'False'")
self.toggle_data(request.user, source, public)
else:
raise ValueError("'public' and 'source' must be specified")
return super(ToggleSharingView, self).post(request, *args, **kwargs)
class WithdrawView(PrivateMixin, CreateView):
"""
A form that withdraws the user from the study on POST.
"""
template_name = "public_data/withdraw.html"
model = WithdrawalFeedback
fields = ["feedback"]
success_url = reverse_lazy("public-data:home")
def form_valid(self, form):
"""
If the form is valid, redirect to the supplied URL.
"""
participant = self.request.user.member.public_data_participant
participant.enrolled = False
participant.save()
django_messages.success(
self.request,
(
"You have successfully deactivated public data sharing and marked "
"your files as private."
),
)
form.instance.member = self.request.user.member
return super(WithdrawView, self).form_valid(form)
class HomeView(TemplateView):
"""
Provide this page's URL as the next URL for login or signup.
"""
template_name = "public_data/home.html"
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
projects = DataRequestProject.objects.filter(
approved=True, active=True
).order_by("name")
context.update({"projects": projects, "next": reverse_lazy("public-data:home")})
return context
class ActivateOverviewView(PrivateMixin, TemplateView):
"""
Apply PrivateMixin
"""
template_name = "public_data/overview.html"
| |
"""
Pandora API Transport
This module contains the very low level transport agent for the Pandora API.
The transport is concerned with the details of a raw HTTP call to the Pandora
API along with the request and response encryption by way of an Encyrpytor
object. The result from a transport is a JSON object for the API or an
exception.
API consumers should use one of the API clients in the pandora.client package.
"""
import random
import time
import json
import base64
import blowfish
import requests
from requests.adapters import HTTPAdapter
from .errors import PandoraException
DEFAULT_API_HOST = "tuner.pandora.com/services/json/"
# This decorator is a temporary workaround for handling SysCallErrors, see:
# https://github.com/shazow/urllib3/issues/367. Should be removed once a fix is
# applied in urllib3.
def retries(max_tries, exceptions=(Exception,)):
"""Function decorator implementing retrying logic.
exceptions: A tuple of exception classes; default (Exception,)
The decorator will call the function up to max_tries times if it raises
an exception.
By default it catches instances of the Exception class and subclasses.
This will recover after all but the most fatal errors. You may specify a
custom tuple of exception classes with the 'exceptions' argument; the
function will only be retried if it raises one of the specified
exceptions.
"""
def decorator(func):
def function(*args, **kwargs):
retries_left = max_tries
while retries_left > 0:
try:
retries_left -= 1
return func(*args, **kwargs)
except exceptions as exc:
# Don't retry for PandoraExceptions - unlikely that result
# will change for same set of input parameters.
if isinstance(exc, PandoraException):
raise
if retries_left > 0:
time.sleep(
delay_exponential(0.5, 2, max_tries - retries_left)
)
else:
raise
return function
return decorator
def delay_exponential(base, growth_factor, attempts):
"""Calculate time to sleep based on exponential function.
The format is::
base * growth_factor ^ (attempts - 1)
If ``base`` is set to 'rand' then a random number between
0 and 1 will be used as the base.
Base must be greater than 0, otherwise a ValueError will be
raised.
"""
if base == "rand":
base = random.random()
elif base <= 0:
raise ValueError(
"The 'base' param must be greater than 0, got: {}".format(base)
)
time_to_sleep = base * (growth_factor ** (attempts - 1))
return time_to_sleep
class RetryingSession(requests.Session):
"""Requests Session With Retry Support
This Requests session uses an HTTPAdapter that retries on connection
failure three times. The Pandora API is fairly aggressive about closing
connections on clients and the default session doesn't retry.
"""
def __init__(self):
super().__init__()
self.mount("https://", HTTPAdapter(max_retries=3))
self.mount("http://", HTTPAdapter(max_retries=3))
class APITransport:
"""Pandora API Transport
The transport is responsible for speaking the low-level protocol required
by the Pandora API. It knows about encryption, TLS and the other API
details. Once setup the transport acts like a callable.
"""
API_VERSION = "5"
REQUIRE_RESET = ("auth.partnerLogin",)
NO_ENCRYPT = ("auth.partnerLogin",)
REQUIRE_TLS = (
"auth.partnerLogin",
"auth.userLogin",
"station.getPlaylist",
"user.createUser",
)
def __init__(self, cryptor, api_host=DEFAULT_API_HOST, proxy=None):
self.cryptor = cryptor
self.api_host = api_host
self._http = RetryingSession()
if proxy:
self._http.proxies = {"http": proxy, "https": proxy}
self.reset()
def reset(self):
self.partner_auth_token = None
self.user_auth_token = None
self.partner_id = None
self.user_id = None
self.start_time = None
self.server_sync_time = None
def set_partner(self, data):
self.sync_time = data["syncTime"]
self.partner_auth_token = data["partnerAuthToken"]
self.partner_id = data["partnerId"]
def set_user(self, data):
self.user_id = data["userId"]
self.user_auth_token = data["userAuthToken"]
@property
def auth_token(self):
if self.user_auth_token:
return self.user_auth_token
if self.partner_auth_token:
return self.partner_auth_token
return None
@property
def sync_time(self):
if not self.server_sync_time:
return None
return int(self.server_sync_time + (time.time() - self.start_time))
def remove_empty_values(self, data):
return {k: v for k, v in data.items() if v is not None}
@sync_time.setter
def sync_time(self, sync_time):
self.server_sync_time = self.cryptor.decrypt_sync_time(sync_time)
def _start_request(self, method):
if method in self.REQUIRE_RESET:
self.reset()
if not self.start_time:
self.start_time = int(time.time())
def _make_http_request(self, url, data, params):
try:
data = data.encode("utf-8")
except AttributeError:
pass
params = self.remove_empty_values(params)
result = self._http.post(url, data=data, params=params)
result.raise_for_status()
return result.content
def test_url(self, url):
return self._http.head(url).status_code == requests.codes.OK
def _build_params(self, method):
return {
"method": method,
"auth_token": self.auth_token,
"partner_id": self.partner_id,
"user_id": self.user_id,
}
def _build_url(self, method):
return "{}://{}".format(
"https" if method in self.REQUIRE_TLS else "http", self.api_host
)
def _build_data(self, method, data):
data["userAuthToken"] = self.user_auth_token
if not self.user_auth_token and self.partner_auth_token:
data["partnerAuthToken"] = self.partner_auth_token
data["syncTime"] = self.sync_time
data = json.dumps(self.remove_empty_values(data))
if method not in self.NO_ENCRYPT:
data = self.cryptor.encrypt(data)
return data
def _parse_response(self, result):
result = json.loads(result.decode("utf-8"))
if result["stat"] == "ok":
return result["result"] if "result" in result else None
else:
raise PandoraException.from_code(result["code"], result["message"])
@retries(3)
def __call__(self, method, **data):
self._start_request(method)
url = self._build_url(method)
data = self._build_data(method, data)
params = self._build_params(method)
result = self._make_http_request(url, data, params)
return self._parse_response(result)
class BlowfishCryptor:
"""Low-Level Blowfish Cryptography
Handles symmetric Blowfish cryptography of raw byte messages with or
without padding. Does not handle messages that are encoded in other formats
like hex or base64.
Subclasses implement the cryptography based on different back-end
libraries.
"""
block_size = 8
def _add_padding(self, data):
pad_size = self.block_size - (len(data) % self.block_size)
padding = (chr(pad_size) * pad_size).encode("ascii")
return data.encode("utf-8") + padding
@staticmethod
def _strip_padding(data):
pad_size = int(data[-1])
computed = b"".join([chr(pad_size).encode("ascii")] * pad_size)
if not data[-pad_size:] == computed:
raise ValueError("Invalid padding")
return data[:-pad_size]
class PurePythonBlowfish(BlowfishCryptor):
"""Pure Python 3 Blowfish Cryptor
Uses the pure python blowfish library but is only compatible with Python 3.
"""
def __init__(self, key):
self.cipher = blowfish.Cipher(key.encode("ascii"))
def decrypt(self, data, strip_padding=True):
data = b"".join(self.cipher.decrypt_ecb(data))
return self._strip_padding(data) if strip_padding else data
def encrypt(self, data):
return b"".join(self.cipher.encrypt_ecb(self._add_padding(data)))
class Encryptor:
"""Pandora Blowfish Encryptor
The blowfish encryptor can encrypt and decrypt the relevant parts of the
API request and response. It handles the formats that the API expects.
"""
def __init__(self, in_key, out_key, crypto_class=PurePythonBlowfish):
self.bf_out = crypto_class(out_key)
self.bf_in = crypto_class(in_key)
@staticmethod
def _decode_hex(data):
return base64.b16decode(data.encode("ascii").upper())
@staticmethod
def _encode_hex(data):
return base64.b16encode(data).lower()
def decrypt(self, data):
return json.loads(self.bf_out.decrypt(self._decode_hex(data)))
def decrypt_sync_time(self, data):
return int(self.bf_in.decrypt(self._decode_hex(data), False)[4:-2])
def encrypt(self, data):
return self._encode_hex(self.bf_out.encrypt(data))
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Home of the `Sequential` model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import warnings
from tensorflow.python import tf2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import functional
from tensorflow.python.keras.engine import input_layer
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.saving.saved_model import model_serialization
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.module import module
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
SINGLE_LAYER_OUTPUT_ERROR_MSG = ('All layers in a Sequential model should have '
'a single output tensor. For multi-output '
'layers, use the functional API.')
@keras_export('keras.Sequential', 'keras.models.Sequential')
class Sequential(functional.Functional):
"""`Sequential` groups a linear stack of layers into a `tf.keras.Model`.
`Sequential` provides training and inference features on this model.
Examples:
>>> # Optionally, the first layer can receive an `input_shape` argument:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> # Afterwards, we do automatic shape inference:
>>> model.add(tf.keras.layers.Dense(4))
>>> # This is identical to the following:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.Input(shape=(16,)))
>>> model.add(tf.keras.layers.Dense(8))
>>> # Note that you can also omit the `input_shape` argument.
>>> # In that case the model doesn't have any weights until the first call
>>> # to a training/evaluation method (since it isn't yet built):
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> # model.weights not created yet
>>> # Whereas if you specify the input shape, the model gets built
>>> # continuously as you are adding layers:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8, input_shape=(16,)))
>>> model.add(tf.keras.layers.Dense(4))
>>> len(model.weights)
4
>>> # When using the delayed-build pattern (no input shape specified), you can
>>> # choose to manually build your model by calling
>>> # `build(batch_input_shape)`:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Dense(8))
>>> model.add(tf.keras.layers.Dense(4))
>>> model.build((None, 16))
>>> len(model.weights)
4
```python
# Note that when using the delayed-build pattern (no input shape specified),
# the model gets built the first time you call `fit`, `eval`, or `predict`,
# or the first time you call the model on some input data.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(8))
model.add(tf.keras.layers.Dense(1))
model.compile(optimizer='sgd', loss='mse')
# This builds the model for the first time:
model.fit(x, y, batch_size=32, epochs=10)
```
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, layers=None, name=None):
"""Creates a `Sequential` model instance.
Args:
layers: Optional list of layers to add to the model.
name: Optional name for the model.
"""
# Skip the init in FunctionalModel since model doesn't have input/output yet
super(functional.Functional, self).__init__( # pylint: disable=bad-super-call
name=name, autocast=False)
base_layer.keras_api_gauge.get_cell('Sequential').set(True)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
self._auto_track_sub_layers = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._input_dtype = None
self._layer_call_argspecs = {}
self._created_nodes = set()
# Flag that indicate whether the sequential network topology has been
# created. It is false when there isn't any layer, or the layers doesn't
# have input shape.
self._graph_initialized = False
# Unfortunately some Sequential models using custom layers or FeatureColumn
# layers have multiple inputs. This is fundamentally incompatible with
# most of the Sequential API, and we have to disable a number of features
# for such models.
self._use_legacy_deferred_behavior = False
# Add to the model any layers passed to the constructor.
if layers:
if not isinstance(layers, (list, tuple)):
layers = [layers]
for layer in layers:
self.add(layer)
@property
def layers(self):
# Historically, `sequential.layers` only returns layers that were added
# via `add`, and omits the auto-generated `InputLayer` that comes at the
# bottom of the stack.
# `Trackable` manages the `_layers` attributes and does filtering
# over it.
layers = super(Sequential, self).layers
if layers and isinstance(layers[0], input_layer.InputLayer):
return layers[1:]
return layers[:]
@trackable.no_automatic_dependency_tracking
def add(self, layer):
"""Adds a layer instance on top of the layer stack.
Arguments:
layer: layer instance.
Raises:
TypeError: If `layer` is not a layer instance.
ValueError: In case the `layer` argument does not
know its input shape.
ValueError: In case the `layer` argument has
multiple output tensors, or is already connected
somewhere else (forbidden in `Sequential` models).
"""
# If we are passed a Keras tensor created by keras.Input(), we can extract
# the input layer from its keras history and use that without any loss of
# generality.
if hasattr(layer, '_keras_history'):
origin_layer = layer._keras_history[0]
if isinstance(origin_layer, input_layer.InputLayer):
layer = origin_layer
logging.warning(
'Please add `keras.layers.InputLayer` instead of `keras.Input` to '
'Sequential model. `keras.Input` is intended to be used by '
'Functional model.')
if isinstance(layer, module.Module):
if not isinstance(layer, base_layer.Layer):
layer = functional.ModuleWrapper(layer)
else:
raise TypeError('The added layer must be '
'an instance of class Layer. '
'Found: ' + str(layer))
tf_utils.assert_no_legacy_layers([layer])
if not self._is_layer_name_unique(layer):
raise ValueError('All layers added to a Sequential model '
'should have unique names. Name "%s" is already the name'
' of a layer in this model. Update the `name` argument '
'to pass a unique name.' % (layer.name,))
self.built = False
set_inputs = False
self._maybe_create_attribute('_self_tracked_trackables', [])
if not self._self_tracked_trackables:
if isinstance(layer, input_layer.InputLayer):
# Case where the user passes an Input or InputLayer layer via `add`.
set_inputs = True
else:
batch_shape, dtype = training_utils.get_input_shape_and_dtype(layer)
if batch_shape:
# Instantiate an input layer.
x = input_layer.Input(
batch_shape=batch_shape, dtype=dtype, name=layer.name + '_input')
# This will build the current layer
# and create the node connecting the current layer
# to the input layer we just created.
layer(x)
set_inputs = True
if set_inputs:
outputs = nest.flatten(layer._inbound_nodes[-1].outputs)
if len(outputs) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = outputs
self.inputs = layer_utils.get_source_inputs(self.outputs[0])
self.built = True
self._has_explicit_input_shape = True
elif self.outputs:
# If the model is being built continuously on top of an input layer:
# refresh its output.
output_tensor = layer(self.outputs[0])
if len(nest.flatten(output_tensor)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
self.outputs = [output_tensor]
self.built = True
if set_inputs or self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
self._graph_initialized = True
else:
self._self_tracked_trackables.append(layer)
self._handle_deferred_layer_dependencies([layer])
self._layer_call_argspecs[layer] = tf_inspect.getfullargspec(layer.call)
@trackable.no_automatic_dependency_tracking
def pop(self):
"""Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model.
"""
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._self_tracked_trackables.pop()
self._layer_call_argspecs.pop(layer)
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
self._inferred_input_shape = None
self._has_explicit_input_shape = False
self._graph_initialized = False
elif self._graph_initialized:
self.layers[-1]._outbound_nodes = []
self.outputs = [self.layers[-1].output]
self._init_graph_network(self.inputs, self.outputs)
self.built = True
@trackable.no_automatic_dependency_tracking
def _build_graph_network_for_inferred_shape(self,
input_shape,
input_dtype=None):
if input_shape is None or not self.layers:
return
if not tf2.enabled() or not ops.executing_eagerly_outside_functions():
# This behavior is disabled in V1 or when eager execution is disabled.
return
if (not self._has_explicit_input_shape and
not self._use_legacy_deferred_behavior):
# Determine whether the input shape is novel, i.e. whether the model
# should be rebuilt.
input_shape = tuple(input_shape)
if self._inferred_input_shape is None:
new_shape = input_shape
else:
new_shape = relax_input_shape(self._inferred_input_shape, input_shape)
if (new_shape is not None and new_shape != self._inferred_input_shape):
# A novel shape has been received: we need to rebuild the model.
# In case we are inside a graph function, we step out of it.
with ops.init_scope():
inputs = input_layer.Input(
batch_shape=new_shape,
dtype=input_dtype,
name=self.layers[0].name + '_input')
layer_input = inputs
created_nodes = set()
for layer in self.layers:
# Clear nodes previously created via this method. This prevents
# node accumulation and ensures that e.g. `layer.output` is
# always connected to `model.inputs`
# (this is important e.g. for the feature extraction use case).
# We don't just do `layer._inbound_nodes = []` in order
# not to break shared layers added to Sequential models (which is
# technically illegal as per the `add()` docstring,
# but wasn't previously disabled).
clear_previously_created_nodes(layer, self._created_nodes)
try:
# Create Functional API connection by calling the current layer
layer_output = layer(layer_input)
except: # pylint:disable=bare-except
# Functional API calls may fail for a number of reasons:
# 1) The layer may be buggy. In this case it will be easier for
# the user to debug if we fail on the first call on concrete data,
# instead of our own call on a symbolic input.
# 2) The layer is dynamic (graph-incompatible) and hasn't
# overridden `compute_output_shape`. In this case, it is
# impossible to build a graph network.
# 3) The layer is otherwise incompatible with the Functional API
# (e.g. this is the case for some probabilistic layers that rely
# on hacks and that do not return tensors).
# In all these cases, we should avoid creating a graph network
# (or we simply can't).
self._use_legacy_deferred_behavior = True
return
if len(nest.flatten(layer_output)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# Keep track of nodes just created above
track_nodes_created_by_last_call(layer, created_nodes)
layer_input = layer_output
outputs = layer_output
self._created_nodes = created_nodes
try:
# Initialize a graph Network. This call will never fail for
# a stack of valid Keras layers.
# However some users have layers that are fundamentally incompatible
# with the Functional API, which do not return tensors. In this
# case, we fall back to the legacy deferred behavior.
# TODO(fchollet): consider raising here, as we should not be
# supporting such layers.
self._init_graph_network(inputs, outputs)
self._graph_initialized = True
except: # pylint:disable=bare-except
self._use_legacy_deferred_behavior = True
self._inferred_input_shape = new_shape
@generic_utils.default
def build(self, input_shape=None):
if self._graph_initialized:
self._init_graph_network(self.inputs, self.outputs)
else:
if input_shape is None:
raise ValueError('You must provide an `input_shape` argument.')
self._build_graph_network_for_inferred_shape(input_shape)
if not self.built:
input_shape = tuple(input_shape)
self._build_input_shape = input_shape
super(Sequential, self).build(input_shape)
self.built = True
def call(self, inputs, training=None, mask=None): # pylint: disable=redefined-outer-name
# If applicable, update the static input shape of the model.
if not self._has_explicit_input_shape:
if not tensor_util.is_tensor(inputs) and not isinstance(
inputs, np_arrays.ndarray):
# This is a Sequential with mutiple inputs. This is technically an
# invalid use case of Sequential, but we tolerate it for backwards
# compatibility.
self._use_legacy_deferred_behavior = True
self._build_input_shape = nest.map_structure(_get_shape_tuple, inputs)
if tf2.enabled():
logging.warning('Layers in a Sequential model should only have a '
'single input tensor, but we receive a %s input: %s'
'\nConsider rewriting this model with the Functional '
'API.' % (type(inputs), inputs))
else:
self._build_graph_network_for_inferred_shape(inputs.shape, inputs.dtype)
if self._graph_initialized:
if not self.built:
self._init_graph_network(self.inputs, self.outputs)
return super(Sequential, self).call(inputs, training=training, mask=mask)
outputs = inputs # handle the corner case where self.layers is empty
for layer in self.layers:
# During each iteration, `inputs` are the inputs to `layer`, and `outputs`
# are the outputs of `layer` applied to `inputs`. At the end of each
# iteration `inputs` is set to `outputs` to prepare for the next layer.
kwargs = {}
argspec = self._layer_call_argspecs[layer].args
if 'mask' in argspec:
kwargs['mask'] = mask
if 'training' in argspec:
kwargs['training'] = training
outputs = layer(inputs, **kwargs)
if len(nest.flatten(outputs)) != 1:
raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
# `outputs` will be the inputs to the next layer.
inputs = outputs
mask = getattr(outputs, '_keras_mask', None)
return outputs
def compute_output_shape(self, input_shape):
shape = input_shape
for layer in self.layers:
shape = layer.compute_output_shape(shape)
return shape
def compute_mask(self, inputs, mask):
# TODO(omalleyt): b/123540974 This function is not really safe to call
# by itself because it will duplicate any updates and losses in graph
# mode by `call`ing the Layers again.
outputs = self.call(inputs, mask=mask)
return getattr(outputs, '_keras_mask', None)
def predict_proba(self, x, batch_size=32, verbose=0):
"""Generates class probability predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A Numpy array of probability predictions.
"""
warnings.warn('`model.predict_proba()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use `model.predict()` instead.')
preds = self.predict(x, batch_size, verbose)
if preds.min() < 0. or preds.max() > 1.:
logging.warning('Network returning invalid probability values. '
'The last layer might not normalize predictions '
'into probabilities '
'(like softmax or sigmoid would).')
return preds
def predict_classes(self, x, batch_size=32, verbose=0):
"""Generate class predictions for the input samples.
The input samples are processed batch by batch.
Arguments:
x: input data, as a Numpy array or list of Numpy arrays
(if the model has multiple inputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
Returns:
A numpy array of class predictions.
"""
warnings.warn('`model.predict_classes()` is deprecated and '
'will be removed after 2021-01-01. '
'Please use instead:'
'* `np.argmax(model.predict(x), axis=-1)`, '
' if your model does multi-class classification '
' (e.g. if it uses a `softmax` last-layer activation).'
'* `(model.predict(x) > 0.5).astype("int32")`, '
' if your model does binary classification '
' (e.g. if it uses a `sigmoid` last-layer activation).')
proba = self.predict(x, batch_size=batch_size, verbose=verbose)
if proba.shape[-1] > 1:
return proba.argmax(axis=-1)
else:
return (proba > 0.5).astype('int32')
def get_config(self):
layer_configs = []
for layer in super(Sequential, self).layers:
# `super().layers` include the InputLayer if available (it is filtered out
# of `self.layers`). Note that `self._self_tracked_trackables` is managed
# by the tracking infrastructure and should not be used.
layer_configs.append(generic_utils.serialize_keras_object(layer))
config = {
'name': self.name,
'layers': copy.deepcopy(layer_configs)
}
if not self._is_graph_network and self._build_input_shape is not None:
config['build_input_shape'] = self._build_input_shape
return config
@classmethod
def from_config(cls, config, custom_objects=None):
if 'name' in config:
name = config['name']
build_input_shape = config.get('build_input_shape')
layer_configs = config['layers']
else:
name = None
build_input_shape = None
layer_configs = config
model = cls(name=name)
for layer_config in layer_configs:
layer = layer_module.deserialize(layer_config,
custom_objects=custom_objects)
model.add(layer)
if (not model.inputs and build_input_shape and
isinstance(build_input_shape, (tuple, list))):
model.build(build_input_shape)
return model
@property
def input_spec(self):
if hasattr(self, '_manual_input_spec'):
return self._manual_input_spec
if self.layers and hasattr(self.layers[0], 'input_spec'):
return self.layers[0].input_spec
return None
@input_spec.setter
def input_spec(self, value):
self._manual_input_spec = value
@property
def _trackable_saved_model_saver(self):
return model_serialization.SequentialSavedModelSaver(self)
def _is_layer_name_unique(self, layer):
for ref_layer in self.layers:
if layer.name == ref_layer.name and ref_layer is not layer:
return False
return True
def _assert_weights_created(self):
if self._graph_initialized:
return
# When the graph has not been initialized, use the Model's implementation to
# to check if the weights has been created.
super(functional.Functional, self)._assert_weights_created() # pylint: disable=bad-super-call
def _get_shape_tuple(t):
if hasattr(t, 'shape'):
shape = t.shape
if isinstance(shape, tuple):
return shape
if shape.rank is not None:
return tuple(shape.as_list())
return None
return None
def relax_input_shape(shape_1, shape_2):
if shape_1 is None or shape_2 is None:
return None
if len(shape_1) != len(shape_2):
return None
return tuple(None if d1 != d2 else d1 for d1, d2 in zip(shape_1, shape_2))
def clear_previously_created_nodes(layer, created_nodes):
"""Remove nodes from `created_nodes` from the layer's inbound_nodes."""
for node in layer._inbound_nodes:
prev_layers = node.inbound_layers
for prev_layer in nest.flatten(prev_layers):
prev_layer._outbound_nodes = [
n for n in prev_layer._outbound_nodes
if n not in created_nodes]
layer._inbound_nodes = [
n for n in layer._inbound_nodes if n not in created_nodes]
def track_nodes_created_by_last_call(layer, created_nodes):
"""Adds to `created_nodes` the nodes created by the last call to `layer`."""
if not layer._inbound_nodes:
return
created_nodes.add(layer._inbound_nodes[-1])
prev_layers = layer._inbound_nodes[-1].inbound_layers
for prev_layer in nest.flatten(prev_layers):
if prev_layer._outbound_nodes:
created_nodes.add(prev_layer._outbound_nodes[-1])
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.random_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RandomNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
sampler = self._Sampler(
1000000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
shape = [2, 3, 4]
rnd1 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.random_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class TruncatedNormalTest(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
if not test.is_gpu_available():
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
if not test.is_gpu_available():
return
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(
1000000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
if dt == dtypes.float16:
self.assertAllClose(results[False], results[True], rtol=1e-3, atol=1e-3)
else:
self.assertAllClose(results[False], results[True], rtol=1e-6, atol=1e-6)
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertTrue(abs(np.std(x) / stddev - 0.85) < 0.04)
def testLargeShape(self):
with self.test_session(use_gpu=True):
v = variables.Variable(
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
n = random_ops.truncated_normal(v.shape)
self.assertEqual([8589934592, 1], n.shape.as_list())
def testNoCSE(self):
with self.test_session(use_gpu=True):
shape = [2, 3, 4]
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
class RandomUniformTest(test.TestCase):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.test_session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in xrange(10):
ret[i, :] = sess.run(rng)
return ret
return func
def testRange(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 1 << 30
sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
count_limit = 50 if dt == dtypes.float16 else 10
if count >= count_limit:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < count_limit)
# Check that uniform ints actually follow a uniform distribution.
def testUniformInts(self):
minv = -2
maxv = 15
n = 100000
p = 1 / (maxv - minv)
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
sampler = self._Sampler(
n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
self.assertEqual(counts.shape, (maxv - minv,))
self.assertEqual(counts.sum(), n)
error = np.abs(counts - mean)
self.assertLess(error.max(), 5 * std)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
def testCPUGPUMatch(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
sampler = self._Sampler(
1000000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
def testSeed(self):
for dt in dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64:
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
self.assertAllEqual(sx(), sy())
def testNoCSE(self):
shape = [2, 3, 4]
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.test_session(use_gpu=True):
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
class RandomShapeTest(test.TestCase):
def testTruncatedNormal(self):
# Fully known shape.
rnd1 = random_ops.truncated_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.truncated_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.truncated_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomNormal(self):
# Fully known shape.
rnd1 = random_ops.random_normal([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_normal(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_normal(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
def testRandomUniform(self):
# Fully known shape.
rnd1 = random_ops.random_uniform([1, 2, 3])
self.assertEqual([1, 2, 3], rnd1.get_shape())
# Partially known shape.
rnd2 = random_ops.random_uniform(
array_ops.placeholder(
dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], rnd2.get_shape().as_list())
# Unknown shape.
rnd3 = random_ops.random_uniform(array_ops.placeholder(dtypes.int32))
self.assertIs(None, rnd3.get_shape().ndims)
if __name__ == "__main__":
test.main()
| |
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# Copyright (c) 2021 Chris Reed
# Copyright (c) 2022 Clay McClure
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import (Callable, Dict, List, NamedTuple, Optional, Sequence, Tuple, TYPE_CHECKING, Union, overload)
from typing_extensions import Literal
from ..core import (exceptions, memory_interface)
from ..core.target import Target
from ..probe.debug_probe import DebugProbe
from ..probe.swj import SWJSequenceSender
from .ap import APSEL_APBANKSEL
from ..utility.sequencer import CallSequence
from ..utility.timeout import Timeout
if TYPE_CHECKING:
from .ap import (APAddressBase, AccessPort)
from ..core.session import Session
from ..utility.notification import Notification
LOG = logging.getLogger(__name__)
TRACE = LOG.getChild("trace")
TRACE.setLevel(logging.CRITICAL)
# DP register addresses. The DPBANKSEL value is encoded in bits [7:4].
DP_IDR = 0x00 # read-only
DP_IDR1 = 0x10 # read-only
DP_BASEPTR0 = 0x20 # read-only
DP_BASEPTR1 = 0x30 # read-only
DP_ABORT = 0x00 # write-only
DP_CTRL_STAT = 0x04 # read-write
DP_DLCR = 0x14 # read-write
DP_TARGETID = 0x24 # read-only
DP_DLPIDR = 0x34 # read-only
DP_EVENTSTAT = 0x44 # read-only
DP_SELECT1 = 0x54 # write-only
DP_SELECT = 0x8 # write-only
DP_RDBUFF = 0xC # read-only
# Mask and shift for extracting DPBANKSEL from our DP register address constants. These are not
# related to the SELECT.DPBANKSEL bitfield.
DPADDR_MASK = 0x0f
DPADDR_DPBANKSEL_MASK = 0xf0
DPADDR_DPBANKSEL_SHIFT = 4
DPIDR1_ASIZE_MASK = 0x00000007f
DPIDR1_ERRMODE_MASK = 0x00000080
BASEPTR0_VALID_MASK = 0x00000001
BASEPTR0_PTR_MASK = 0xfffff000
BASEPTR0_PTR_SHIFT = 12
ABORT_DAPABORT = 0x00000001
ABORT_STKCMPCLR = 0x00000002
ABORT_STKERRCLR = 0x00000004
ABORT_WDERRCLR = 0x00000008
ABORT_ORUNERRCLR = 0x00000010
# DP Control / Status Register bit definitions
CTRLSTAT_ORUNDETECT = 0x00000001
CTRLSTAT_STICKYORUN = 0x00000002
CTRLSTAT_STICKYCMP = 0x00000010
CTRLSTAT_STICKYERR = 0x00000020
CTRLSTAT_READOK = 0x00000040
CTRLSTAT_WDATAERR = 0x00000080
# DP SELECT register fields.
SELECT_DPBANKSEL_MASK = 0x0000000f
SELECT_APADDR_MASK = 0xfffffff0
DPIDR_REVISION_MASK = 0xf0000000
DPIDR_REVISION_SHIFT = 28
DPIDR_PARTNO_MASK = 0x0ff00000
DPIDR_PARTNO_SHIFT = 20
DPIDR_MIN_MASK = 0x00010000
DPIDR_VERSION_MASK = 0x0000f000
DPIDR_VERSION_SHIFT = 12
CSYSPWRUPACK = 0x80000000
CDBGPWRUPACK = 0x20000000
CSYSPWRUPREQ = 0x40000000
CDBGPWRUPREQ = 0x10000000
TRNNORMAL = 0x00000000
MASKLANE = 0x00000f00
## Arbitrary 5 second timeout for DP power up/down requests.
DP_POWER_REQUEST_TIMEOUT = 5.0
## @brief Class to hold fields from DP IDR register.
class DPIDR(NamedTuple):
idr: int
partno: int
version: int
revision: int
mindp: int
class ADIVersion(Enum):
"""@brief Supported versions of the Arm Debug Interface."""
ADIv5 = 5
ADIv6 = 6
class DPConnector:
"""@brief Establishes a connection to the DP for a given wire protocol.
This class will ask the probe to connect using a given wire protocol. Then it makes multiple
attempts at sending the SWJ sequence to select the wire protocol and read the DP IDR register.
"""
def __init__(self, probe: DebugProbe) -> None:
self._probe = probe
self._idr = DPIDR(0, 0, 0, 0, 0)
# Make sure we have a session, since we get the session from the probe and probes have their session set
# after creation.
assert probe.session is not None, "DPConnector requires the probe to have a session"
self._session = probe.session
@property
def idr(self) -> DPIDR:
"""@brief DPIDR instance containing values read from the DP IDR register."""
return self._idr
def _get_protocol(self, protocol: Optional[DebugProbe.Protocol]) -> DebugProbe.Protocol:
# Convert protocol from setting if not passed as parameter.
if protocol is None:
protocol_name = self._session.options.get('dap_protocol').strip().lower()
protocol = DebugProbe.PROTOCOL_NAME_MAP[protocol_name]
if protocol not in self._probe.supported_wire_protocols:
raise exceptions.DebugError("requested wire protocol %s not supported by the debug probe" % protocol.name)
return protocol
def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None:
"""@brief Establish a connection to the DP.
This method causes the debug probe to connect using the wire protocol.
@param self
@param protocol One of the @ref pyocd.probe.debug_probe.DebugProbe.Protocol
"DebugProbe.Protocol" enums. If not provided, will default to the `dap_protocol` setting.
@exception DebugError
@exception TransferError
"""
try:
self._probe.lock()
# Determine the requested wire protocol.
protocol = self._get_protocol(protocol)
# If this is not None then the probe is already connected.
current_wire_protocol = self._probe.wire_protocol
already_connected = current_wire_protocol is not None
if already_connected:
assert current_wire_protocol
self._check_protocol(current_wire_protocol, protocol)
else:
self._connect_probe(protocol)
protocol = self._probe.wire_protocol
assert protocol
self._connect_dp(protocol)
finally:
self._probe.unlock()
def _check_protocol(self, current_wire_protocol: DebugProbe.Protocol, protocol: DebugProbe.Protocol) -> None:
# Warn about mismatched current and requested wire protocols.
if (protocol is not current_wire_protocol) and (protocol is not DebugProbe.Protocol.DEFAULT):
LOG.warning("Cannot use %s; already connected with %s", protocol.name, current_wire_protocol.name)
else:
LOG.debug("Already connected with %s", current_wire_protocol.name)
def _connect_probe(self, protocol: DebugProbe.Protocol) -> None:
# Debug log with the selected protocol.
if protocol is not DebugProbe.Protocol.DEFAULT:
LOG.debug("Using %s wire protocol", protocol.name)
# Connect using the selected protocol.
self._probe.connect(protocol)
# Log the actual protocol if selected was default.
if protocol is DebugProbe.Protocol.DEFAULT:
actual_protocol = self._probe.wire_protocol
assert actual_protocol
LOG.debug("Default wire protocol selected; using %s", actual_protocol.name)
def _connect_dp(self, protocol: DebugProbe.Protocol) -> None:
# Get SWJ settings.
use_dormant = self._session.options.get('dap_swj_use_dormant')
send_swj = self._session.options.get('dap_swj_enable') \
and (DebugProbe.Capability.SWJ_SEQUENCE in self._probe.capabilities)
# Create object to send SWJ sequences.
swj = SWJSequenceSender(self._probe, use_dormant)
# Multiple attempts to select protocol and read DP IDR.
for attempt in range(4):
try:
if send_swj:
swj.select_protocol(protocol)
# Attempt to read the DP IDR register.
self._idr = self.read_idr()
# Successful connection so exit the loop.
break
except exceptions.TransferError:
# If not sending the SWJ sequence, just reraise; there's nothing more to do.
if not send_swj:
raise
# If the read of the DP IDCODE fails, retry SWJ sequence. The DP may have been
# in a state where it thought the SWJ sequence was an invalid transfer. We also
# try enabling use of dormant state if it wasn't already enabled.
LOG.debug("DP IDCODE read failed; resending SWJ sequence (use dormant=%s)", use_dormant)
if attempt == 1:
# If already using dormant mode, just raise, we don't need to retry the same mode.
if use_dormant:
raise
# After the second attempt, switch to enabling dormant mode.
swj.use_dormant = True
elif attempt == 3:
# After 4 attempts, we let the exception propagate.
raise
def read_idr(self):
"""@brief Read IDR register and get DP version"""
dpidr = self._probe.read_dp(DP_IDR, now=True)
dp_partno = (dpidr & DPIDR_PARTNO_MASK) >> DPIDR_PARTNO_SHIFT
dp_version = (dpidr & DPIDR_VERSION_MASK) >> DPIDR_VERSION_SHIFT
dp_revision = (dpidr & DPIDR_REVISION_MASK) >> DPIDR_REVISION_SHIFT
is_mindp = (dpidr & DPIDR_MIN_MASK) != 0
return DPIDR(dpidr, dp_partno, dp_version, dp_revision, is_mindp)
class DebugPort:
"""@brief Represents the Arm Debug Interface (ADI) Debug Port (DP)."""
## Sleep for 50 ms between connection tests and reconnect attempts after a reset.
_RESET_RECOVERY_SLEEP_INTERVAL = 0.05
## Number of times to try to read DP registers after hw reset before attempting reconnect.
_RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT = 1
def __init__(self, probe: DebugProbe, target: Target) -> None:
"""@brief Constructor.
@param self The DebugPort object.
@param probe The @ref pyocd.probe.debug_probe.DebugProbe "DebugProbe" object. The probe is assumed to not
have been opened yet.
@param target An instance of @ref pyocd.core.soc_target.SoCTarget "SoCTarget". Assumed to not have been
fully initialized.
"""
self._probe = probe
self.target = target
assert target.session
self._session = target.session
self.valid_aps: Optional[List["APAddressBase"]] = None
self.dpidr = DPIDR(0, 0, 0, 0, 0)
self.aps: Dict["APAddressBase", "AccessPort"] = {}
self._access_number: int = 0
self._cached_dp_select: Optional[int] = None
self._protocol: Optional[DebugProbe.Protocol] = None
self._probe_managed_ap_select: bool = False
self._probe_managed_dpbanksel: bool = False
self._probe_supports_dpbanksel: bool = False
self._probe_supports_apv2_addresses: bool = False
self._have_probe_capabilities: bool = False
self._did_check_version: bool = False
self._log_dp_info: bool = True
# DPv3 attributes
self._is_dpv3: bool = False
self._addr_size: int = -1
self._addr_mask: int = -1
self._errmode: int = -1
self._base_addr: int = -1
self._apacc_mem_interface: Optional[APAccessMemoryInterface] = None
# Subscribe to reset events.
self._session.subscribe(self._reset_did_occur, (Target.Event.PRE_RESET, Target.Event.POST_RESET))
@property
def probe(self) -> DebugProbe:
return self._probe
@property
def session(self) -> "Session":
return self._session
@property
def adi_version(self) -> ADIVersion:
return ADIVersion.ADIv6 if self._is_dpv3 else ADIVersion.ADIv5
@property
def base_address(self) -> int:
"""@brief Base address of the first component for an ADIv6 system."""
return self._base_addr
@property
def apacc_memory_interface(self) -> "APAccessMemoryInterface":
"""@brief Memory interface for performing APACC transactions."""
if self._apacc_mem_interface is None:
self._apacc_mem_interface = APAccessMemoryInterface(self)
return self._apacc_mem_interface
@property
def next_access_number(self) -> int:
self._access_number += 1
return self._access_number
def lock(self) -> None:
"""@brief Lock the DP from access by other threads."""
self.probe.lock()
def unlock(self) -> None:
"""@brief Unlock the DP."""
self.probe.unlock()
def connect(self, protocol: Optional[DebugProbe.Protocol] = None) -> None:
"""@brief Connect to the target.
This method causes the debug probe to connect using the selected wire protocol. The probe
must have already been opened prior to this call.
Unlike create_connect_sequence(), this method is intended to be used when manually constructing a
DebugPort instance. It simply calls create_connect_sequence() and invokes the returned call sequence.
@param self
@param protocol One of the @ref pyocd.probe.debug_probe.DebugProbe.Protocol
"DebugProbe.Protocol" enums. If not provided, will default to the `protocol` setting.
"""
self._protocol = protocol
self.create_connect_sequence().invoke()
def disconnect(self) -> None:
"""@brief Disconnect from target.
DP debug is powered down. See power_down_debug().
"""
self.power_down_debug()
def create_connect_sequence(self) -> CallSequence:
"""@brief Returns call sequence to connect to the target.
Returns a @ref pyocd.utility.sequence.CallSequence CallSequence that will connect to the
DP, power up debug and the system, check the DP version to identify whether the target uses
ADI v5 or v6, then clears sticky errors.
The probe must have already been opened prior to this method being called.
@param self
@return @ref pyocd.utility.sequence.CallSequence CallSequence
"""
seq: List[Tuple[str, Callable]] = [
('lock_probe', self.probe.lock),
]
if not self._have_probe_capabilities:
seq += [
('get_probe_capabilities', self._get_probe_capabilities),
]
seq += [
('connect', self._connect),
('clear_sticky_err', self.clear_sticky_err),
('power_up_debug', self.power_up_debug),
]
if not self._did_check_version:
seq += [
('check_version', self._check_version),
]
seq += [
('unlock_probe', self.probe.unlock),
]
return CallSequence(*seq)
def _get_probe_capabilities(self) -> None:
"""@brief Examine the probe's capabilities."""
caps = self._probe.capabilities
self._probe_managed_ap_select = (DebugProbe.Capability.MANAGED_AP_SELECTION in caps)
self._probe_managed_dpbanksel = (DebugProbe.Capability.MANAGED_DPBANKSEL in caps)
self._probe_supports_dpbanksel = (DebugProbe.Capability.BANKED_DP_REGISTERS in caps)
self._probe_supports_apv2_addresses = (DebugProbe.Capability.APv2_ADDRESSES in caps)
self._have_probe_capabilities = True
def _connect(self) -> None:
# Attempt to connect.
connector = DPConnector(self.probe)
connector.connect(self._protocol)
# Report on DP version.
self.dpidr = connector.idr
LOG.log(logging.INFO if self._log_dp_info else logging.DEBUG,
"DP IDR = 0x%08x (v%d%s rev%d)", self.dpidr.idr, self.dpidr.version,
" MINDP" if self.dpidr.mindp else "", self.dpidr.revision)
def _check_version(self) -> None:
self._is_dpv3 = (self.dpidr.version == 3)
if self._is_dpv3:
# Check that the probe will be able to access ADIv6 APs.
if self._probe_managed_ap_select and not self._probe_supports_apv2_addresses:
raise exceptions.ProbeError("connected to ADIv6 target with probe that does not support APv2 addresses")
idr1 = self.read_reg(DP_IDR1)
self._addr_size = idr1 & DPIDR1_ASIZE_MASK
self._addr_mask = (1 << self._addr_size) - 1
self._errmode_supported = (idr1 & DPIDR1_ERRMODE_MASK) != 0
LOG.debug("DP IDR1 = 0x%08x (addr size=%d, errmode=%d)", idr1, self._addr_size, self._errmode_supported)
# Read base system address.
baseptr0 = self.read_reg(DP_BASEPTR0)
valid = (baseptr0 & BASEPTR0_VALID_MASK) != 0
if valid:
base = (baseptr0 & BASEPTR0_PTR_MASK) >> BASEPTR0_PTR_SHIFT
if self._addr_size > 32:
baseptr1 = self.read_reg(DP_BASEPTR1)
base |= baseptr1 << 32
base &= self._addr_mask
self._base_addr = base
LOG.debug("DP BASEPTR = 0x%08x", self._base_addr)
else:
LOG.warning("DPv3 has no valid base address")
self._did_check_version = True
def flush(self):
try:
self.probe.flush()
except exceptions.TargetError as error:
self._handle_error(error, self.next_access_number)
raise
@overload
def read_reg(self, addr: int) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_reg(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_reg(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_reg(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
return self.read_dp(addr, now)
def write_reg(self, addr: int, data: int) -> None:
self.write_dp(addr, data)
def power_up_debug(self) -> bool:
"""@brief Assert DP power requests.
Request both debug and system power be enabled, and wait until the request is acked.
There is a timeout for the request.
@return Boolean indicating whether the power up request succeeded.
"""
# Send power up request for system and debug.
self.write_reg(DP_CTRL_STAT, CSYSPWRUPREQ | CDBGPWRUPREQ | MASKLANE | TRNNORMAL)
with Timeout(DP_POWER_REQUEST_TIMEOUT) as time_out:
while time_out.check():
r = self.read_reg(DP_CTRL_STAT)
if (r & (CDBGPWRUPACK | CSYSPWRUPACK)) == (CDBGPWRUPACK | CSYSPWRUPACK):
break
else:
return False
return True
def power_down_debug(self) -> bool:
"""@brief Deassert DP power requests.
ADIv6 says that we must not clear CSYSPWRUPREQ and CDBGPWRUPREQ at the same time.
ADIv5 says CSYSPWRUPREQ must not be set to 1 while CDBGPWRUPREQ is set to 0. So we
start with deasserting system power, then debug power. Each deassertion has its own
timeout.
@return Boolean indicating whether the power down request succeeded.
"""
# Power down system first.
self.write_reg(DP_CTRL_STAT, CDBGPWRUPREQ | MASKLANE | TRNNORMAL)
with Timeout(DP_POWER_REQUEST_TIMEOUT) as time_out:
while time_out.check():
r = self.read_reg(DP_CTRL_STAT)
if (r & (CDBGPWRUPACK | CSYSPWRUPACK)) == CDBGPWRUPACK:
break
else:
return False
# Now power down debug.
self.write_reg(DP_CTRL_STAT, MASKLANE | TRNNORMAL)
with Timeout(DP_POWER_REQUEST_TIMEOUT) as time_out:
while time_out.check():
r = self.read_reg(DP_CTRL_STAT)
if (r & (CDBGPWRUPACK | CSYSPWRUPACK)) == 0:
break
else:
return False
return True
def _invalidate_cache(self) -> None:
"""@brief Invalidate cached DP registers."""
self._cached_dp_select = None
def _reset_did_occur(self, notification: "Notification") -> None:
"""@brief Handles reset notifications to invalidate register cache.
The cache is cleared on all resets just to be safe. On most devices, warm resets do not reset
debug logic, but it does happen on some devices.
"""
self._invalidate_cache()
def post_reset_recovery(self) -> None:
"""@brief Wait for the target to recover from reset, with auto-reconnect if needed."""
# Check if we can access DP registers. If this times out, then reconnect the DP and retry.
with Timeout(self.session.options.get('reset.dap_recover.timeout'),
self._RESET_RECOVERY_SLEEP_INTERVAL) as time_out:
attempt = 0
while time_out.check():
try:
# Try to read CTRL/STAT. If the power-up bits request are reset, then the DP
# connection was not lost and we can just return.
value = self.read_reg(DP_CTRL_STAT)
if (value & (CSYSPWRUPREQ | CDBGPWRUPREQ)) == (CSYSPWRUPREQ | CDBGPWRUPREQ):
return
except exceptions.TransferError:
# Ignore errors caused by flushing.
try:
self.flush()
except exceptions.TransferError:
pass
if attempt == self._RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT:
LOG.info("DAP is not accessible after reset; attempting reconnect")
elif attempt > self._RESET_RECOVERY_ATTEMPTS_BEFORE_RECONNECT:
# Try reconnect.
try:
self._log_dp_info = False
self.connect()
finally:
self._log_dp_info = True
attempt += 1
else:
LOG.error("DAP is not accessible after reset followed by attempted reconnect")
def reset(self, *, send_notifications: bool = True) -> None:
"""@brief Hardware reset.
Pre- and post-reset notifications are sent.
This method can be called before the DebugPort is connected.
@param self This object.
@param send_notifications Optional keyword-only parameter used by higher-level reset methods so they can
manage the sending of reset notifications themselves, in order to provide more context in the notification.
@todo Should automatic recovery from a disconnected DAP be provided for these low-level hardware resets
like is done for CortexM.reset()?
"""
if send_notifications:
self.session.notify(Target.Event.PRE_RESET, self)
self.probe.reset()
self.post_reset_recovery()
if send_notifications:
self.session.notify(Target.Event.POST_RESET, self)
def assert_reset(self, asserted: bool, *, send_notifications: bool = True) -> None:
"""@brief Assert or deassert the hardware reset signal.
A pre-reset notification is sent before asserting reset, whereas a post-reset notification is sent
after deasserting reset.
This method can be called before the DebugPort is connected.
@param self This object.
@param asserted True if nRESET is to be driven low; False will drive nRESET high.
@param send_notifications Optional keyword-only parameter used by higher-level reset methods so they can
manage the sending of reset notifications themselves, in order to provide more context in the notification.
"""
is_asserted = False
if send_notifications:
is_asserted = self.is_reset_asserted()
if asserted and not is_asserted:
self.session.notify(Target.Event.PRE_RESET, self)
self.probe.assert_reset(asserted)
if send_notifications and not asserted and is_asserted:
self.session.notify(Target.Event.POST_RESET, self)
def is_reset_asserted(self) -> bool:
"""@brief Returns the current state of the nRESET signal.
This method can be called before the DebugPort is initalized.
@retval True Reset is asserted; nRESET is low.
@retval False Reset is not asserted; nRESET is high.
"""
return self.probe.is_reset_asserted()
def set_clock(self, frequency: float) -> None:
"""@brief Change the wire protocol's clock frequency.
@param self This object.
@param frequency New wire protocol frequency in Hertz.
"""
self.probe.set_clock(frequency)
def _write_dp_select(self, mask: int, value: int) -> None:
"""@brief Modify part of the DP SELECT register and write if cache is stale.
The DP lock must already be acquired before calling this method.
"""
# Compute the new SELECT value and see if we need to write it.
if self._cached_dp_select is None:
select = value
else:
select = (self._cached_dp_select & ~mask) | value
if select == self._cached_dp_select:
return
# Update the SELECT register and cache.
self.write_dp(DP_SELECT, select)
self._cached_dp_select = select
def _set_dpbanksel(self, addr: int, is_write: bool) -> bool:
"""@brief Updates the DPBANKSEL field of the SELECT register as required.
Several DP registers (most, actually) ignore DPBANKSEL. If one of those is being
accessed, any value of DPBANKSEL can be used. Otherwise SELECT is updated if necessary
and a lock acquired so another thread doesn't change DPBANKSEL until thsi transaction is
complete.
This method also handles the case where the debug probe manages DPBANKSEL on its own,
such as with STLink.
@return Whether the access needs a lock on DP SELECT.
@exception exceptions.ProbeError Raised when a banked register is being accessed but the
probe doesn't support DPBANKSEL.
"""
# For DPv1-2, only address 0x4 (CTRL/STAT) honours DPBANKSEL.
# For DPv3, SELECT and RDBUFF ignore DPBANKSEL for both reads and writes, while
# ABORT ignores it only for writes (address 0 for reads is IDR).
if self._is_dpv3 and not is_write:
registers_ignoring_dpbanksel = (DP_SELECT, DP_RDBUFF)
else:
registers_ignoring_dpbanksel = (DP_ABORT, DP_SELECT, DP_RDBUFF)
if (addr & DPADDR_MASK) not in registers_ignoring_dpbanksel:
# Get the DP bank.
dpbanksel = (addr & DPADDR_DPBANKSEL_MASK) >> DPADDR_DPBANKSEL_SHIFT
# Check if the probe handles this for us.
if self._probe_managed_dpbanksel:
# If there is a nonzero DPBANKSEL and the probe doesn't support this,
# then report an error.
if dpbanksel and not self._probe_supports_dpbanksel:
raise exceptions.ProbeError("probe does not support banked DP registers")
else:
return False
# Update the selected DP bank.
self.lock()
self._write_dp_select(SELECT_DPBANKSEL_MASK, dpbanksel)
return True
else:
return False
@overload
def read_dp(self, addr: int) -> int:
...
@overload
def read_dp(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_dp(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_dp(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_dp(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
if (addr & DPADDR_MASK) % 4 != 0:
raise ValueError("DP address must be word aligned")
num = self.next_access_number
# Update DPBANKSEL if required.
did_lock = self._set_dpbanksel(addr, False)
try:
result_cb = self.probe.read_dp(addr & DPADDR_MASK, now=False)
except exceptions.TargetError as error:
self._handle_error(error, num)
if did_lock:
self.unlock()
raise
except Exception:
if did_lock:
self.unlock()
raise
# Read callback returned for async reads.
def read_dp_cb() -> int:
try:
result = result_cb()
TRACE.debug("read_dp:%06d %s(addr=0x%08x) -> 0x%08x", num, "" if now else "...", addr, result)
return result
except exceptions.TargetError as error:
TRACE.debug("read_dp:%06d %s(addr=0x%08x) -> error (%s)", num, "" if now else "...", addr, error)
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
if now:
return read_dp_cb()
else:
TRACE.debug("read_dp:%06d (addr=0x%08x) -> ...", num, addr)
return read_dp_cb
def write_dp(self, addr: int, data: int) -> None:
if (addr & DPADDR_MASK) % 4 != 0:
raise ValueError("DP address must be word aligned")
num = self.next_access_number
# Update DPBANKSEL if required.
did_lock = self._set_dpbanksel(addr, True)
# Write the DP register.
try:
TRACE.debug("write_dp:%06d (addr=0x%08x) = 0x%08x", num, addr, data)
self.probe.write_dp(addr & DPADDR_MASK, data)
except exceptions.TargetError as error:
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
def _select_ap(self, addr: int) -> bool:
"""@brief Write DP_SELECT to choose the given AP.
Handles the case where the debug probe manages selecting an AP itself, in which case we
never write SELECT directly.
@return Whether the access needs a lock on DP SELECT.
"""
# If the probe handles selecting the AP for us, there's nothing to do here.
if self._probe_managed_ap_select:
return False
# Write DP SELECT to select the probe.
self.lock()
if self.adi_version == ADIVersion.ADIv5:
self._write_dp_select(APSEL_APBANKSEL, addr & APSEL_APBANKSEL)
elif self.adi_version == ADIVersion.ADIv6:
self._write_dp_select(SELECT_APADDR_MASK, addr & SELECT_APADDR_MASK)
else:
self.unlock()
assert False, "invalid ADI version"
return True
def write_ap(self, addr: int, data: int) -> None:
assert isinstance(addr, int)
num = self.next_access_number
did_lock = False
try:
did_lock = self._select_ap(addr)
TRACE.debug("write_ap:%06d (addr=0x%08x) = 0x%08x", num, addr, data)
self.probe.write_ap(addr, data)
except exceptions.TargetError as error:
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
@overload
def read_ap(self, addr: int) -> int:
...
@overload
def read_ap(self, addr: int, now: Literal[True] = True) -> int:
...
@overload
def read_ap(self, addr: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_ap(self, addr: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_ap(self, addr: int, now: bool = True) -> Union[int, Callable[[], int]]:
assert isinstance(addr, int)
num = self.next_access_number
did_lock = False
try:
did_lock = self._select_ap(addr)
result_cb = self.probe.read_ap(addr, now=False)
except exceptions.TargetError as error:
self._handle_error(error, num)
if did_lock:
self.unlock()
raise
except Exception:
if did_lock:
self.unlock()
raise
# Read callback returned for async reads.
def read_ap_cb() -> int:
try:
result = result_cb()
TRACE.debug("read_ap:%06d %s(addr=0x%08x) -> 0x%08x", num, "" if now else "...", addr, result)
return result
except exceptions.TargetError as error:
TRACE.debug("read_ap:%06d %s(addr=0x%08x) -> error (%s)", num, "" if now else "...", addr, error)
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
if now:
return read_ap_cb()
else:
TRACE.debug("read_ap:%06d (addr=0x%08x) -> ...", num, addr)
return read_ap_cb
def write_ap_multiple(self, addr: int, values: Sequence[int]) -> None:
assert isinstance(addr, int)
num = self.next_access_number
did_lock = False
try:
did_lock = self._select_ap(addr)
TRACE.debug("write_ap_multiple:%06d (addr=0x%08x) = (%i values)", num, addr, len(values))
return self.probe.write_ap_multiple(addr, values)
except exceptions.TargetError as error:
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
@overload
def read_ap_multiple(self, addr: int, count: int = 1) -> Sequence[int]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: Literal[True] = True) -> Sequence[int]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: Literal[False]) -> Callable[[], Sequence[int]]:
...
@overload
def read_ap_multiple(self, addr: int, count: int, now: bool) -> Union[Sequence[int], Callable[[], Sequence[int]]]:
...
def read_ap_multiple(self, addr: int, count: int = 1, now: bool = True) \
-> Union[Sequence[int], Callable[[], Sequence[int]]]:
assert isinstance(addr, int)
num = self.next_access_number
did_lock = False
try:
did_lock = self._select_ap(addr)
TRACE.debug("read_ap_multiple:%06d (addr=0x%08x, count=%i)", num, addr, count)
result_cb = self.probe.read_ap_multiple(addr, count, now=False)
except exceptions.TargetError as error:
self._handle_error(error, num)
if did_lock:
self.unlock()
raise
except Exception:
if did_lock:
self.unlock()
raise
# Need to wrap the deferred callback to convert exceptions.
def read_ap_multiple_cb() -> Sequence[int]:
try:
return result_cb()
except exceptions.TargetError as error:
TRACE.debug("read_ap_multiple:%06d %s(addr=0x%08x) -> error (%s)", num, "" if now else "...", addr, error)
self._handle_error(error, num)
raise
finally:
if did_lock:
self.unlock()
if now:
return read_ap_multiple_cb()
else:
return read_ap_multiple_cb
def _handle_error(self, error: Exception, num: int) -> None:
TRACE.debug("error:%06d %s", num, error)
# Clear sticky error for fault errors.
if isinstance(error, exceptions.TransferFaultError):
self.clear_sticky_err()
# For timeouts caused by WAIT responses, set DAPABORT to abort the transfer.
elif isinstance(error, exceptions.TransferTimeoutError):
# This may put the AP that was aborted into an unpredictable state. Should consider
# attempting to reset debug logic.
self.write_reg(DP_ABORT, ABORT_DAPABORT)
def clear_sticky_err(self) -> None:
self._invalidate_cache()
mode = self.probe.wire_protocol
if mode == DebugProbe.Protocol.SWD:
self.write_reg(DP_ABORT, ABORT_ORUNERRCLR | ABORT_WDERRCLR | ABORT_STKERRCLR | ABORT_STKCMPCLR)
elif mode == DebugProbe.Protocol.JTAG:
self.write_reg(DP_CTRL_STAT, CSYSPWRUPREQ | CDBGPWRUPREQ | TRNNORMAL | MASKLANE
| CTRLSTAT_STICKYERR | CTRLSTAT_STICKYCMP | CTRLSTAT_STICKYORUN)
else:
assert False
class APAccessMemoryInterface(memory_interface.MemoryInterface):
"""@brief Memory interface for performing simple APACC transactions.
This class allows the caller to generate Debug APB transactions from a DPv3. It simply
adapts the MemoryInterface to APACC transactions.
By default, it passes memory transaction addresses unmodified to the DP. But an instance can be
constructed by passing an APAddress object to the constructor that offsets transaction addresses
so they are relative to the APAddress base.
Only 32-bit transfers are supported.
"""
def __init__(self, dp: DebugPort, ap_address: Optional["APAddressBase"] = None) -> None:
"""@brief Constructor.
@param self
@param dp The DebugPort object.
@param ap_address Optional instance of APAddress. If provided, all memory transaction
addresses are offset by the base address of the APAddress.
"""
self._dp = dp
self._ap_address = ap_address
if ap_address is not None:
self._offset = ap_address.address
else:
self._offset = 0
@property
def dp(self) -> DebugPort:
return self._dp
@property
def short_description(self) -> str:
if self._ap_address is None:
return "Root Component"
else:
return "Root Component ({})".format(self._ap_address)
def write_memory(self, addr: int, data: int, transfer_size: int = 32) -> None:
"""@brief Write a single memory location.
By default the transfer size is a word."""
if transfer_size != 32:
raise exceptions.DebugError("unsupported transfer size")
return self._dp.write_ap(self._offset + addr, data)
@overload
def read_memory(self, addr: int, transfer_size: int) -> int:
...
@overload
def read_memory(self, addr: int, transfer_size: int, now: Literal[True] = True) -> int:
...
@overload
def read_memory(self, addr: int, transfer_size: int, now: Literal[False]) -> Callable[[], int]:
...
@overload
def read_memory(self, addr: int, transfer_size: int, now: bool) -> Union[int, Callable[[], int]]:
...
def read_memory(self, addr: int, transfer_size: int = 32, now: bool = True) -> Union[int, Callable[[], int]]:
"""@brief Read a memory location.
By default, a word will be read."""
if transfer_size != 32:
raise exceptions.DebugError("unsupported transfer size")
return self._dp.read_ap(self._offset + addr, now)
def write_memory_block32(self, addr: int, data: Sequence[int]) -> None:
"""@brief Write an aligned block of 32-bit words."""
addr += self._offset
for word in data:
self._dp.write_ap(addr, word)
addr += 4
def read_memory_block32(self, addr: int, size: int) -> Sequence[int]:
"""@brief Read an aligned block of 32-bit words."""
addr += self._offset
result_cbs = [self._dp.read_ap(addr + i * 4, now=False) for i in range(size)]
result = [cb() for cb in result_cbs]
return result
| |
#!/usr/bin/env python
#coding: utf-8
__author__ = 'Toshihiro Kamiya <kamiya@mbj.nifty.com>'
__status__ = 'experimental'
import re
import sys
import asm_manip as am
import ope_manip as om
import type_formatter as tf
import clonefile_manip as cm
import gen_ngram as ng
def extract_class_from_loc_linenum(L):
pat_loc_linenum = re.compile(r'^\s*([\w_$/.]+)[.]java\s*:\s*(\d+)\s+>\s*(\d+)\s*//\s*(.+)$')
m = pat_loc_linenum.match(L)
if m is None:
return None
claz_like, method_like = m.group(1), m.group(4)
p = method_like.find("(")
assert p >= 0
q = method_like[:p].find('.')
if q >= 0:
inner_claz = method_like[:q]
method_like = method_like[q+1:]
claz_like += "$" + inner_claz
claz, sig = tf.scan_in_javap_comment_style(claz_like + "." + method_like)
return claz, sig
class ParseError(ValueError):
pass
def extract_poeseq_and_ngrams(opeseq, locs, method2claz2code, ngram_size, max_call_depth=-1):
if opeseq == 0:
raise ParseError("empty ope sequence")
if ngram_size == -1:
ngram_size = len(opeseq)
else:
if len(opeseq) != ngram_size:
raise ParseError("length of ope sequence doesn't match to --ngram-size agrgument")
ngram_size = len(opeseq)
pat_loc_index = re.compile(r'\s*([^,]+),\s*(\d+)\s*>(\d+)')
target_claz_methods = None
if locs is not None:
tcm_set = set()
target_claz_methods = []
for L in locs:
cs = extract_class_from_loc_linenum(L)
if cs:
claz, sig = cs
else:
m = pat_loc_index.match(L)
if m:
claz_sig = m.group(1)
claz, sig = tf.scan_in_javap_comment_style(claz_sig)
else:
raise ParseError("invalid loc line: %s" % L)
cm = (claz, sig)
if cm not in tcm_set:
tcm_set.add(cm)
target_claz_methods.append(cm)
del tcm_set
target_opeseq = opeseq
traces = []
if target_claz_methods is None:
for method, claz2code in sorted(method2claz2code.iteritems()):
for claz, code in sorted(claz2code.iteritems()):
code_ngrams = ng.gen_code_ngrams(claz, method, method2claz2code,
ngram_size, max_call_depth=max_call_depth)
for ngram in code_ngrams:
opeseq = [ope for ope, _, _ in ngram]
if opeseq == target_opeseq:
traces.append(ngram)
else:
for claz, method in target_claz_methods:
c2c = method2claz2code.get(method)
assert c2c != None
code = c2c.get(claz)
assert code != None
code_ngrams = ng.gen_code_ngrams(claz, method, method2claz2code,
ngram_size, max_call_depth=max_call_depth)
for ngram in code_ngrams:
opeseq = [ope for ope, _, _ in ngram]
if opeseq == target_opeseq:
traces.append(ngram)
nonduplicated_traces = [trace for trace, next_trace in \
zip(traces, traces[1:] + [None]) if trace != next_trace]
return nonduplicated_traces
def extract_clat(ngrams):
# common location among traces
if not ngrams:
return []
common_locs = set([loc for _, loc, _ in ngrams[0]])
for ngram in ngrams[1:]:
locs = set([loc for _, loc, _ in ngram])
common_locs.intersection_update(locs)
return common_locs
def extract_max_depth(ngrams):
if not ngrams:
return 0
max_depth = max(depth for ngram in ngrams for _, _, depth in ngram)
return max_depth
def gen_argpsr():
from argparse import ArgumentParser
from _version_data import VERSION
psr = ArgumentParser(description="Expand clone's each location to a trace")
psr.add_argument('-a', '--asm-directory', action='store')
psr.add_argument('clone_file', action='store',
help="part of clone-index or clone-linenum file, which includes options and a clone to be expanded. specify '-' to read from stdin")
psr.add_argument('-t', '--loc-to-trace', action='store_true',
help='expand each clone location to trace')
psr.add_argument('-c', '--add-metric-clat', action='store_true',
help='add common-location-among-traces metric to each clone')
psr.add_argument('-d', '--add-metric-max-depth', action='store_true',
help='add max-depth metric to each clone')
psr.add_argument('--version', action='version', version='%(prog)s ' + VERSION)
return psr
def main(argv):
psr = gen_argpsr()
args = psr.parse_args(argv[1:])
asmdir = args.asm_directory
clonefile = args.clone_file
add_metric_clat = args.add_metric_clat
add_metric_max_depth = args.add_metric_max_depth
loc_to_trace = args.loc_to_trace
if not (add_metric_clat or loc_to_trace):
sys.exit("no action specfield. specify -t, -c, -d or mix of them")
def itfunc():
for sec, data in cm.read_clone_file_iter(clonefile):
yield sec, data
clonefile_iter = itfunc()
for sec, data in clonefile_iter:
if sec == cm.OPTIONS:
clone_data_args = data
break # for sec
else:
sys.exit('clonefile missing option section')
ngram_size = clone_data_args.ngram_size
max_branch = clone_data_args.max_branch
max_call_depth = clone_data_args.max_call_depth
max_method_definition = clone_data_args.max_method_definition
exclude = clone_data_args.exclude
sig2code = {}
#sig2exceptiontable = {}
sig2linenumbertable = {}
for asm_file, sig, code, etbl, ltbl in am.get_method_code_and_tables_iter(asmdir):
sig2code[sig] = tuple(code)
#sig2exceptiontable[sig] = etbl
sig2linenumbertable[sig] = ltbl
sig2oplist = {}
for sig, method_body in sorted(sig2code.iteritems()):
ol = om.body_text_to_ope_list(method_body, sig)
sig2oplist[sig] = ol
del sig2code
method2claz2code = ng.make_method2claz2code(sig2oplist)
del sig2oplist
if exclude:
ng.exclude_clazs(method2claz2code, exclude)
ng.remove_empty_method_bodies(method2claz2code)
if max_branch >= 0:
ng.remove_too_many_branch_method_bodies(method2claz2code, max_branch)
if max_method_definition > 0:
ng.remove_too_many_definition_methods(method2claz2code, max_method_definition)
if ngram_size < 0:
clone_data_args.delete("ngram-size")
for L in clone_data_args.format():
sys.stdout.write("%s\n" % L)
sys.stdout.write('\n') # separator
try:
for sec, data in clonefile_iter:
if sec == cm.OPESEQ_LOCS:
opeseq, locs = data
elif sec == cm.OPESEQ_TRACES:
opeseq, traces = data
locs = [l[0] for l in traces]
elif sec == cm.OPESEQ_SINGLE:
opeseq, _ = data
locs = None
else:
continue # for sec
ngrams = extract_poeseq_and_ngrams(opeseq, locs, method2claz2code,
ngram_size, max_call_depth=max_call_depth)
if add_metric_clat:
clat = extract_clat(ngrams)
sys.stdout.write(('metric-clat= %d\n' % len(clat)).encode('utf-8'))
if add_metric_max_depth:
max_depth = extract_max_depth(ngrams)
sys.stdout.write(('metric-max-depth= %d\n' % max_depth).encode('utf-8'))
sys.stdout.write('ope:\n')
for L in opeseq:
sys.stdout.write((' %s\n' % L).encode('utf-8'))
if loc_to_trace:
for ngram in ngrams:
sys.stdout.write('trace:\n')
for _, loc, depth in ngram:
sys.stdout.write((' %s >%d\n' % (loc, depth)).encode('utf-8'))
else:
sys.stdout.write('loc:\n')
for loc in locs:
sys.stdout.write((' %s\n' % loc).encode('utf-8'))
sys.stdout.write('\n')
except cm.CloneFileSyntaxError as e:
sys.exit(unicode(e).encode('utf-8'))
except ParseError as e:
sys.exit(unicode(e).encode('utf-8'))
if __name__ == '__main__':
main(sys.argv)
| |
__author__ = 'andreasveit'
__version__ = '1.3'
# Interface for evaluating with the COCO-Text dataset.
# COCO-Text is a large dataset designed for text detection and recognition.
# This is a Python API that assists in evaluating text detection and recognition results
# on COCO-Text. The format of the COCO-Text annotations is described on
# the project website http://vision.cornell.edu/se3/coco-text/. In addition to this evaluation API, please download
# the COCO-Text tool API, both the COCO images and annotations.
# This dataset is based on Microsoft COCO. Please visit http://mscoco.org/
# for more information on COCO, including for the image data, object annotatins
# and caption annotations.
# The following functions are defined:
# getDetections - Compute TP, FN and FP
# evaluateAttribute - Evaluates accuracy for classifying text attributes
# evaluateTranscription - Evaluates accuracy of transcriptions
# area, intersect, iou_score, decode, inter - small helper functions
# printDetailedResults - Prints detailed results as reported in COCO-Text paper
# COCO-Text Evaluation Toolbox. Version 1.3
# Data, Data API and paper available at: http://vision.cornell.edu/se3/coco-text/
# Code written by Andreas Veit, 2016.
# Licensed under the Simplified BSD License [see bsd.txt]
import editdistance
import copy
import re
# Compute detections
def getDetections(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5):
"""
A box is a match iff the intersection of union score is >= 0.5.
Params
------
Input dicts have the format of annotation dictionaries
"""
#parameters
detectRes = {}
# results are lists of dicts {gt_id: xxx, eval_id: yyy}
detectRes['true_positives'] = []
detectRes['false_negatives'] = []
detectRes['false_positives'] = []
# the default is set to evaluate on the validation set
if imgIds == None:
imgIds = groundtruth.val
imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys())
for cocoid in imgIds:
gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else []
eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else []
for gt_box_id in gt_bboxes:
gt_box = groundtruth.anns[gt_box_id]['bbox']
max_iou = 0.0
match = None
for eval_box_id in eval_bboxes:
eval_box = evaluation.anns[eval_box_id]['bbox']
iou = iou_score(gt_box,eval_box)
if iou >= detection_threshold and iou > max_iou:
max_iou = iou
match = eval_box_id
if match is not None:
detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match})
eval_bboxes.remove(match)
else:
detectRes['false_negatives'].append({'gt_id': gt_box_id})
if len(eval_bboxes)>0:
detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes])
return detectRes
def evaluateAttribute(groundtruth, evaluation, resultDict, attributes):
'''
Input:
groundtruth_Dict: dict, AnnFile format
evalDict: dict, AnnFile format
resultDict: dict, output from getDetections
attributes : list of strings, attribute categories
-----
Output:
'''
assert 'utf8_string' not in attributes, 'there is a separate function for utf8_string'
res = {}
for attribute in attributes:
correct = []
incorrect = []
for detection in resultDict['true_positives']:
gt_val = groundtruth.anns[detection['gt_id']][attribute]
eval_val = evaluation.anns[detection['eval_id']][attribute]
if gt_val==eval_val:
correct.append(detection)
else:
if gt_val!='na':
incorrect.append(detection)
res[attribute] = {'attribute': attribute, 'correct':len(correct), 'incorrect':len(incorrect), 'accuracy':len(correct)*1.0/len(correct+incorrect)}
return res
def evaluateEndToEnd(groundtruth, evaluation, imgIds = None, annIds = [], detection_threshold = 0.5):
"""
A box is a match iff the intersection of union score is >= 0.5.
Params
------
Input dicts have the format of annotation dictionaries
"""
#parameters
detectRes = {}
# results are lists of dicts {gt_id: xxx, eval_id: yyy}
detectRes['true_positives'] = []
detectRes['false_negatives'] = []
detectRes['false_positives'] = []
# the default is set to evaluate on the validation set
if imgIds == None:
imgIds = groundtruth.val
imgIds = imgIds if len(imgIds)>0 else inter(groundtruth.imgToAnns.keys(), evaluation.imgToAnns.keys())
for cocoid in imgIds:
gt_bboxes = groundtruth.imgToAnns[cocoid] if cocoid in groundtruth.imgToAnns else []
eval_bboxes = copy.copy(evaluation.imgToAnns[cocoid]) if cocoid in evaluation.imgToAnns else []
for gt_box_id in gt_bboxes:
gt_box = groundtruth.anns[gt_box_id]['bbox']
if 'utf8_string' not in groundtruth.anns[gt_box_id]:
continue
gt_val = decode(groundtruth.anns[gt_box_id]['utf8_string'])
max_iou = 0.0
match = None
for eval_box_id in eval_bboxes:
eval_box = evaluation.anns[eval_box_id]['bbox']
iou = iou_score(gt_box,eval_box)
if iou >=detection_threshold and iou > max_iou:
max_iou = iou
match = eval_box_id
if 'utf8_string' in evaluation.anns[eval_box_id]:
eval_val = decode(evaluation.anns[eval_box_id]['utf8_string'])
if editdistance.eval(gt_val, eval_val)==0:
break
if match is not None:
detectRes['true_positives'].append({'gt_id': gt_box_id, 'eval_id': match})
eval_bboxes.remove(match)
else:
detectRes['false_negatives'].append({'gt_id': gt_box_id})
if len(eval_bboxes)>0:
detectRes['false_positives'].extend([{'eval_id': eval_box_id} for eval_box_id in eval_bboxes])
resultDict = detectRes
res = {}
for setting, threshold in zip(['exact', 'distance1'],[0,1]):
correct = []
incorrect = []
ignore = []
for detection in resultDict['true_positives']:
if 'utf8_string' not in groundtruth.anns[detection['gt_id']]:
ignore.append(detection)
continue
gt_val = decode(groundtruth.anns[detection['gt_id']]['utf8_string'])
if len(gt_val)<3:
ignore.append(detection)
continue
if 'utf8_string' not in evaluation.anns[detection['eval_id']]:
incorrect.append(detection)
continue
eval_val = decode(evaluation.anns[detection['eval_id']]['utf8_string'])
detection['gt_string'] = gt_val
detection['eval_string'] = eval_val
if editdistance.eval(gt_val, eval_val)<=threshold:
correct.append(detection)
else:
incorrect.append(detection)
res[setting] = {'setting': setting, 'correct':correct, 'incorrect':incorrect, 'ignore':ignore, 'accuracy':len(correct)*1.0/len(correct+incorrect)}
return res
def area(bbox):
return bbox[2] * 1.0 * bbox[3] # width * height
def intersect(bboxA, bboxB):
"""Return a new bounding box that contains the intersection of
'self' and 'other', or None if there is no intersection
"""
new_top = max(bboxA[1], bboxB[1])
new_left = max(bboxA[0], bboxB[0])
new_right = min(bboxA[0]+bboxA[2], bboxB[0]+bboxB[2])
new_bottom = min(bboxA[1]+bboxA[3], bboxB[1]+bboxB[3])
if new_top < new_bottom and new_left < new_right:
return [new_left, new_top, new_right - new_left, new_bottom - new_top]
return None
def iou_score(bboxA, bboxB):
"""Returns the Intersection-over-Union score, defined as the area of
the intersection divided by the intersection over the union of
the two bounding boxes. This measure is symmetric.
"""
if intersect(bboxA, bboxB):
intersection_area = area(intersect(bboxA, bboxB))
else:
intersection_area = 0
union_area = area(bboxA) + area(bboxB) - intersection_area
if union_area > 0:
return float(intersection_area) / float(union_area)
else:
return 0
def decode(trans):
trans = trans.encode("ascii" ,'ignore')
trans = trans.replace('\n', ' ')
trans2 = re.sub('[^a-zA-Z0-9!?@\_\-\+\*\:\&\/ \.]', '', trans)
return trans2.lower()
def inter(list1, list2):
return list(set(list1).intersection(set(list2)))
def printDetailedResults(c_text, detection_results, transcription_results, name):
print(name)
#detected coco-text annids
found = [x['gt_id'] for x in detection_results['true_positives']]
n_found = [x['gt_id'] for x in detection_results['false_negatives']]
fp = [x['eval_id'] for x in detection_results['false_positives']]
leg_eng_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','machine printed')], areaRng=[])
leg_eng_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('language','english'),('class','handwritten')], areaRng=[])
leg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','machine printed')], areaRng=[])
ileg_mp = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','machine printed')], areaRng=[])
leg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','handwritten')], areaRng=[])
ileg_hw = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','handwritten')], areaRng=[])
leg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','legible'),('class','others')], areaRng=[])
ileg_ot = c_text.getAnnIds(imgIds=[], catIds=[('legibility','illegible'),('class','others')], areaRng=[])
#Detection
print()
print("Detection")
print("Recall")
if (len(inter(found+n_found, leg_mp)))>0:
lm = "%.2f"%(100*len(inter(found, leg_mp))*1.0/(len(inter(found+n_found, leg_mp))))
else:
lm = 0
print('legible & machine printed: ', lm)
if (len(inter(found+n_found, leg_hw)))>0:
lh = "%.2f"%(100*len(inter(found, leg_hw))*1.0/(len(inter(found+n_found, leg_hw))))
else:
lh = 0
print('legible & handwritten: ', lh)
if (len(inter(found+n_found, leg_ot)))>0:
lo = "%.2f"%(100*len(inter(found, leg_ot))*1.0/(len(inter(found+n_found, leg_ot))))
else:
lo = 0
# print 'legible & others: ', lo
if (len(inter(found+n_found, leg_mp+leg_hw)))>0:
lto = "%.2f"%(100*len(inter(found, leg_mp+leg_hw))*1.0/(len(inter(found+n_found, leg_mp+leg_hw))))
else:
lto = 0
print('legible overall: ', lto)
if (len(inter(found+n_found, ileg_mp)))>0:
ilm = "%.2f"%(100*len(inter(found, ileg_mp))*1.0/(len(inter(found+n_found, ileg_mp))))
else:
ilm = 0
print('illegible & machine printed: ', ilm)
if (len(inter(found+n_found, ileg_hw)))>0:
ilh = "%.2f"%(100*len(inter(found, ileg_hw))*1.0/(len(inter(found+n_found, ileg_hw))))
else:
ilh = 0
print('illegible & handwritten: ', ilh)
if (len(inter(found+n_found, ileg_ot)))>0:
ilo = "%.2f"%(100*len(inter(found, ileg_ot))*1.0/(len(inter(found+n_found, ileg_ot))))
else:
ilo = 0
# print 'illegible & others: ', ilo
if (len(inter(found+n_found, ileg_mp+ileg_hw)))>0:
ilto = "%.2f"%(100*len(inter(found, ileg_mp+ileg_hw))*1.0/(len(inter(found+n_found, ileg_mp+ileg_hw))))
else:
ilto = 0
print('illegible overall: ', ilto)
#total = "%.1f"%(100*len(found)*1.0/(len(found)+len(n_found)))
t_recall = 100*len(found)*1.0/(len(inter(found+n_found, leg_mp+leg_hw+ileg_mp+ileg_hw)))
total = "%.1f"%(t_recall)
print('total recall: ', total)
print("Precision")
t_precision = 100*len(found)*1.0/(len(found+fp))
precision = "%.2f"%(t_precision)
print('total precision: ', precision)
print("f-score")
f_score = "%.2f"%(2 * t_recall * t_precision / (t_recall + t_precision)) if (t_recall + t_precision)>0 else 0
print('f-score localization: ', f_score)
print()
print("Transcription")
transAcc = "%.2f"%(100*transcription_results['exact']['accuracy'])
transAcc1 = "%.2f"%(100*transcription_results['distance1']['accuracy'])
print('accuracy for exact matches: ', transAcc)
print('accuracy for matches with edit distance<=1: ', transAcc1)
print()
print('End-to-end')
TP_new = len(inter(found, leg_eng_mp+leg_eng_hw)) * transcription_results['exact']['accuracy']
FP_new = len(fp) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy'])
FN_new = len(inter(n_found, leg_eng_mp+leg_eng_hw)) + len(inter(found, leg_eng_mp+leg_eng_hw))*(1-transcription_results['exact']['accuracy'])
t_recall_new = 100 * TP_new / (TP_new + FN_new)
t_precision_new = 100 * TP_new / (TP_new + FP_new) if (TP_new + FP_new)>0 else 0
fscore = "%.2f"%(2 * t_recall_new * t_precision_new / (t_recall_new + t_precision_new)) if (t_recall_new + t_precision_new)>0 else 0
recall_new = "%.2f"%(t_recall_new)
precision_new = "%.2f"%(t_precision_new)
print('recall: ', recall_new, )
print('precision: ', precision_new)
print('End-to-end f-score: ', fscore)
print()
#print lm, ' & ', lh, ' & ', lto, ' & ', ilm, ' & ', ilh, ' & ', ilto, '&', total, ' & ', precision, ' & ', transAcc, ' & ', transAcc1, ' & ', fscore
print(lm, ' & ', lh, ' & ', ilm, ' & ', ilh, '&', total, ' & ', precision, ' & ', f_score, ' & ', transAcc, ' & ', recall_new, ' & ', precision_new, ' & ', fscore)
print()
| |
"""
MI logging can be configured using a combination of two of four files.
there is first a "base" configuration, and then a "local" set of overrides.
the base configuration is from the file specified in the environment variable MI_LOGGING_CONFIG
or res/config/mi-logging.yml (ie, users can set MI-specific configuration for drivers run from pycc container)
or config/logging.yml from within the MI egg (default to use if no mi-logging.yml was created)
then the local override may be res/config/mi-logging.local.yml (for overrides specific to MI),
or if this is not found, then res/config/logging.local.yml,
or if this is not found then no overrides.
The get_logger function is obsolete but kept to simplify transition to the ooi.logging code.
USAGE:
to configure logging from the standard MI configuration files:
from mi.core.log import LoggerManager
LoggerManager()
to create a logger automatically scoped with the calling package and ready to use:
from ooi.logging import log # no longer need get_logger at all
"""
import inspect
import logging
import os
import sys
import yaml
import pkg_resources
from types import FunctionType
from functools import wraps
from mi.core.common import Singleton
from ooi.logging import config, log
LOGGING_CONFIG_ENVIRONMENT_VARIABLE="MI_LOGGING_CONFIG"
LOGGING_PRIMARY_FROM_FILE='res/config/mi-logging.yml'
LOGGING_PRIMARY_FROM_EGG='mi-logging.yml'
LOGGING_MI_OVERRIDE='res/config/mi-logging.local.yml'
LOGGING_CONTAINER_OVERRIDE='res/config/logging.local.yml'
"""Basic pyon logging (with or without container)
NOTE: the functionality of this module has moved to ooi.logging.config.
currently this module is maintained for API compatability, but is implemented using the new package.
"""
import logging
from ooi.logging import config
DEFAULT_LOGGING_PATHS = ['res/config/logging.yml', 'res/config/logging.local.yml']
logging_was_configured = False
def configure_logging(logging_conf_paths, logging_config_override=None):
"""
Public call to configure and initialize logging.
@param logging_conf_paths List of paths to logging config YML files (in read order)
@param config_override Dict with config entries overriding files read
"""
global logging_was_configured
logging_was_configured = True
for path in logging_conf_paths:
try:
config.add_configuration(path)
except Exception, e:
print 'WARNING: could not load logging configuration file %s: %s' % (path, e)
if logging_config_override:
try:
config.add_configuration(logging_config_override)
except Exception,e:
print 'WARNING: failed to apply logging override %r: %e' % (logging_config_override,e)
# direct warnings mechanism to loggers
logging.captureWarnings(True)
def is_logging_configured():
""" allow caller to determine if logging has already been configured in this container """
global logging_was_configured
return logging_was_configured or config.get_configuration()
class LoggerManager(Singleton):
"""
Logger Manager. Provides an interface to configure logging at runtime.
"""
def init(self, debug=False):
"""Initialize logging for MI. Because this is a singleton it will only be initialized once."""
path = os.environ[LOGGING_CONFIG_ENVIRONMENT_VARIABLE] if LOGGING_CONFIG_ENVIRONMENT_VARIABLE in os.environ else None
haveenv = path and os.path.isfile(path)
if path and not haveenv:
print >> os.stderr, 'WARNING: %s was set but %s was not found (using default configuration files instead)' % (LOGGING_CONFIG_ENVIRONMENT_VARIABLE, path)
if path and haveenv:
config.replace_configuration(path)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + path
elif os.path.isfile(LOGGING_PRIMARY_FROM_FILE):
config.replace_configuration(LOGGING_PRIMARY_FROM_FILE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from ' + LOGGING_PRIMARY_FROM_FILE
else:
logconfig = pkg_resources.resource_string('mi', LOGGING_PRIMARY_FROM_EGG)
parsed = yaml.load(logconfig)
config.replace_configuration(parsed)
if debug:
print >> sys.stderr, str(os.getpid()) + ' configured logging from config/' + LOGGING_PRIMARY_FROM_FILE
if os.path.isfile(LOGGING_MI_OVERRIDE):
config.add_configuration(LOGGING_MI_OVERRIDE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_MI_OVERRIDE
elif os.path.isfile(LOGGING_CONTAINER_OVERRIDE):
config.add_configuration(LOGGING_CONTAINER_OVERRIDE)
if debug:
print >> sys.stderr, str(os.getpid()) + ' supplemented logging from ' + LOGGING_CONTAINER_OVERRIDE
class LoggingMetaClass(type):
_log_level = 'trace'
def __new__(mcs, class_name, bases, class_dict):
wrapped_set_name = '__wrapped'
wrapper = log_method(class_name=class_name, log_level=mcs._log_level)
new_class_dict = {}
wrapped = class_dict.get(wrapped_set_name, set())
# wrap all methods, unless they have been previously wrapped
for attributeName, attribute in class_dict.items():
if attributeName not in wrapped and type(attribute) == FunctionType:
attribute = wrapper(attribute)
wrapped.add(attributeName)
new_class_dict[attributeName] = attribute
new_class_dict[wrapped_set_name] = wrapped
return type.__new__(mcs, class_name, bases, new_class_dict)
class DebugLoggingMetaClass(LoggingMetaClass):
_log_level = 'debug'
class InfoLoggingMetaClass(DebugLoggingMetaClass):
_log_level = 'info'
class WarnLoggingMetaClass(InfoLoggingMetaClass):
_log_level = 'warn'
class ErrorLoggingMetaClass(WarnLoggingMetaClass):
_log_level = 'error'
def get_logging_metaclass(log_level='trace'):
class_map = {
'trace': LoggingMetaClass,
'debug': DebugLoggingMetaClass,
'info': InfoLoggingMetaClass,
'warn': WarnLoggingMetaClass,
'error': ErrorLoggingMetaClass,
}
return class_map.get(log_level, LoggingMetaClass)
def log_method(class_name=None, log_level='trace'):
name = "UNKNOWN_MODULE_NAME"
stack = inspect.stack()
# step through the stack until we leave mi.core.log
for frame in stack:
module = inspect.getmodule(frame[0])
if module:
name = module.__name__
if name != 'mi.core.log':
break
logger = logging.getLogger(name)
def wrapper(func):
if class_name is not None:
func_name = '%s.%s' % (class_name, func.__name__)
else:
func_name = func.__name__
@wraps(func)
def inner(*args, **kwargs):
getattr(logger, log_level)('entered %s | args: %r | kwargs: %r', func_name, args, kwargs)
r = func(*args, **kwargs)
getattr(logger, log_level)('exiting %s | returning %r', func_name, r)
return r
return inner
return wrapper
def get_logger():
return log
| |
#!/usr/bin/env python
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import os
import yaml
from fabric.api import task, run, sudo, put, task, \
parallel, execute, env
from cuisine import file_exists, file_write, file_append, \
text_strip_margin, mode_sudo, file_update, ssh_keygen, \
ssh_authorize, dir_ensure
@task
def status():
""" Check the status """
# Read cofiguration file to cfg
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = cfg_dir + '/hadoop.yml'
f = open(cfg_file)
cfg = yaml.safe_load(f)
f.close()
# Set ssh user and have ssh not check .ssh/known_hosts
env.user = cfg['admin_user']
env.disable_known_hosts = True
# Set hosts
hosts = []
for host in cfg['hosts']:
hosts.append(cfg['hosts'][host]['ipaddr'])
# Execute check_status on the hosts.
execute(check_status, hosts=hosts)
def check_status():
sudo('jps', user='hdfs')
@task
def install():
""" Install Hadoop Cluster """
cfg_dir = os.path.dirname(__file__).replace('fabfile','ymlfile')
cfg_file = cfg_dir + '/hadoop.yml'
f = open(cfg_file)
cfg = yaml.safe_load(f)
f.close()
env.user = cfg['admin_user']
env.disable_known_hosts = True
hosts = []
for host in cfg['hosts']:
hosts.append(cfg['hosts'][host]['ipaddr'])
execute(pkg_install,hosts=hosts)
execute(update_etc_hosts,cfg_hosts=cfg['hosts'],hosts=hosts)
execute(update_roles,cfg_hosts=cfg['hosts'],hosts=hosts)
sites = ['core-site',
'hdfs-site',
'mapred-site']
for site in sites:
execute(update_config,cfg_name=site,cfg_list=cfg[site],hosts=hosts)
execute(update_env_sh,hosts=hosts)
admin_node = cfg['admin_node']
admin_node_ip = cfg['hosts'][admin_node]['ipaddr']
output = execute(create_hdfs_sshkey,hosts=[admin_node_ip])
key = output[admin_node_ip]
execute(update_authorized_keys,key=key,hosts=hosts)
execute(update_dir,cfg['update_dir_list'],hosts=hosts)
@parallel
def update_dir(update_dir_list):
with mode_sudo():
for dir in update_dir_list:
owner = update_dir_list[dir]['owner']
mode = update_dir_list[dir]['mode']
dir_ensure(dir, mode=mode, owner=owner)
@parallel
def update_authorized_keys(key):
with mode_sudo():
ssh_authorize(user='hdfs',key=key)
def create_hdfs_sshkey():
with mode_sudo():
ssh_keygen(user='hdfs',keytype='rsa')
key = sudo('cat /usr/lib/hadoop/.ssh/id_rsa.pub')
return key
@parallel
def update_env_sh():
""" Update /usr/lib/hadoop/conf/hadoop-env.sh """
file = '/usr/lib/hadoop/conf/hadoop-env.sh'
with mode_sudo():
file_update(file, _update_env_sh_like_this)
def _update_env_sh_like_this(text):
res = []
for line in text.split('\n'):
if line.strip().startswith("# export JAVA_HOME"):
res.append("export JAVA_HOME=/usr/lib/jvm/java-7-oracle")
else:
res.append(line)
return '\n'.join(res) + '\n'
@parallel
def update_config(cfg_name, cfg_list):
""" Update xml files """
lines = []
header = text_strip_margin(
"""
|<?xml version="1.0"?>
|<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
|<!-- Put site-specific property overrides in this file. -->
|
|<configuration>
|""")
lines.append(header)
for entry in cfg_list:
property = text_strip_margin(
"""
| <property>
| <name>{0}</name>
| <value>{1}</value>
| </property>
|""".format(entry, cfg_list[entry]))
lines.append(property)
footer = '</configuration>\n'
lines.append(footer)
file = '/usr/lib/hadoop/conf/' + cfg_name + '.xml'
text = '\n'.join(lines) + '\n'
file_write(file, text, sudo=True)
@parallel
def update_etc_hosts(cfg_hosts):
"""Update /etc/hosts """
file = '/etc/hosts'
lines = []
lines.append("127.0.0.1 localhost")
for host in cfg_hosts:
lines.append("{0} {1}".format(cfg_hosts[host]['ipaddr'], host))
text = '\n'.join(lines) + '\n'
file_write(file, text, sudo=True)
@parallel
def update_roles(cfg_hosts):
""" Update /usr/lib/hadoop/conf/[masters/slaves] """
dir = '/usr/lib/hadoop/conf/'
masters = []
slaves = []
for host in cfg_hosts:
if cfg_hosts[host]['group'] == 'masters':
masters.append(host)
elif cfg_hosts[host]['group'] == 'slaves':
slaves.append(host)
# Update masters
file = dir + 'masters'
text = '\n'.join(masters) + '\n'
file_write(file, text, sudo=True)
# Update slaves
file = dir + 'slaves'
text = '\n'.join(slaves) + '\n'
file_write(file, text, sudo=True)
@parallel
def pkg_install():
''':hostname - Install Hadoop Master'''
file_name = '/usr/lib/jvm/java-7-oracle'
if not file_exists(file_name):
sudo('add-apt-repository -y ppa:webupd8team/java')
sudo('add-apt-repository -y ppa:hadoop-ubuntu/stable')
sudo('apt-get update && sudo apt-get -y upgrade')
sudo('echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections')
sudo('echo debconf shared/accepted-oracle-license-v1-1 seen true | sudo debconf-set-selections')
sudo('apt-get -y install oracle-java7-installer')
sudo('apt-get -y install hadoop')
else:
print '{0} exists. Oracle Java is already installed.'.format(file_name)
@task
@parallel
def check_online():
yml_path = __file__.replace('fabfile','ymlfile').rstrip(r'\py|\pyc') + 'yml'
f = open(yml_path)
cfg = yaml.safe_load(f)
f.close()
env.user = cfg['admin_user']
env.disable_known_hosts = True
hosts = []
for host in cfg['hosts']:
hosts.append(cfg['hosts'][host]['ipaddr'])
execute(hello,hosts=hosts)
@parallel
def hello():
run('hostname && id && echo hello')
| |
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# author: Reza Hosseini
""" Functions to find first entry points in time-stamped data
This includes functions to find previous event/usage (entry point)
before given conditions hold: FindPrev
Also it finds the entry point to the next time the condition holds
This is useful if we want to compare the very first entry point to
the later entry point
"""
## finds previous event given in "usageCol"
# previous to the first time the conditions given by
# condCols, conDValues hold
# also it will only accepts an event as a previous event
# if its not off more than the time gap
# if the conditions do not hold or the gap is too big
# it will return None
def FindPrev0(df,
condCols,
condValues,
usageCol='cond',
userCol='user_id',
timeCol='ts',
timeGap=10 * 60):
df = df.reset_index(drop=True)
df = df.sort_values([timeCol])
df = df.reset_index(drop=True)
condDict = {}
for i in range(len(condCols)):
col = condCols[i]
if condValues[i] is not None:
condDict[col] = [condValues[i]]
dfCond = df.copy()
if len(condDict) > 0:
dfCond = SubDf_withCond(df=df, condDict=condDict, resetIndex=False)
outDict = {'prev': None, 'df': None, 'ts': None}
if len(dfCond) == 0:
return None
ind0 = list(dfCond.index)[0]
if ind0 == 0:
return None
indR = range(ind0 - 1, ind0 + 1)
dfClose = df.iloc[indR]
times = dfClose[timeCol].values
conds = dfClose[usageCol].values
delta = times[1] - times[0]
secs = delta / np.timedelta64(1, 's')
if secs < timeGap:
prev = conds[0]
else:
prev = 'BLANK'
outDict['prev'] = prev
outDict['df'] = dfClose
outDict['ts'] = times[1]
return outDict
'''
df = GenUsageDf_forTesting()
df = df[df['user_id'] == '1']
df = df.sort_values(['time'])
Mark(df)
condCols = ['prod', 'form_factor']
condValues = [browsingFeat, None]
FindPrev0(
df,
condCols=condCols,
condValues=condValues,
usageCol='prod',
userCol='user_id',
timeCol='time',
timeGap=10*600)
'''
## find previous activity for a user given conditions:
# condValues for columns: condCols e.g. (product, form_factor)
# it can also find the previous event for a second occurrence of the conditions too
# this happens if secondUsage == True
# then it finds the entry point to the second occurrence
# it requires at least a secondUsageGap for considering the event
# a second usage
def FindPrev(df,
user,
condCols,
condValues,
userCol='user_id',
timeCol='ts',
timeGap=10 * 60,
secondUsage=False,
secondUsageGap=3600):
outDict = {
'prev': None,
'df': None,
'ts': None,
'prev2': None,
'df2': None,
'ts2': None}
dfUser = df[df[userCol] == user]
dfUser = Concat_stringColsDf(
df=dfUser, cols=condCols, colName="cond", sepStr='-')
out = FindPrev0(
df=dfUser,
condCols=condCols,
condValues=condValues,
usageCol='cond',
timeCol=timeCol,
timeGap=timeGap)
if out == None:
return outDict
else:
outDict['prev'] = out['prev']
outDict['df'] = out['df']
outDict['ts'] = out['ts']
if secondUsage:
cond = '-'.join(condValues)
t1 = outDict['ts']
t2 = t1 + np.timedelta64(secondUsageGap, 's')
dfUser2 = dfUser[dfUser[timeCol] > t2]
out2 = None
if len(dfUser2) > 0:
conds = dfUser2['cond'].values
first = next((i for i, v in enumerate(conds) if v != cond), -1)
if first < len(dfUser2):
dfUser2 = dfUser2.iloc[first:]
else:
return outDict
out2 = FindPrev0(
df=dfUser2,
condCols=condCols,
condValues=condValues,
usageCol='cond',
userCol=userCol,
timeCol=timeCol,
timeGap=timeGap)
if out2 == None:
return outDict
else:
outDict['prev2'] = out2['prev']
outDict['ts2'] = out2['ts']
outDict['df2'] = out2['df']
return outDict
'''
df = GenUsageDf_forTesting()
FindPrev(df=df,
user='1',
condCols=['prod', 'form_factor'],
condValues=['locFeat', 'COMP'],
userCol='user_id',
timeCol='time',
timeGap=10 * 60,
secondUsage=True,
secondUsageGap=1)
'''
## this finds the previous activity of users satisfying a condition
# e.g. (product, form_factor)
def FindPrevUsers(
users,
dfDetails,
condCols,
condValues,
userCol,
timeCol,
timeGap,
secondUsage,
secondUsageGap=3600):
out = {'prev': [], 'prev2': []}
def F(user):
res = FindPrev(
df=dfDetails,
user=user,
condCols=condCols,
condValues=condValues,
userCol=userCol,
timeCol=timeCol,
timeGap=timeGap,
secondUsage=secondUsage,
secondUsageGap=secondUsageGap)
prev = res['prev']
prev2 = res['prev2']
return {'prev': prev, 'prev2': prev2}
if len(users) == 0:
return out
res = [F(u) for u in users]
df = pd.DataFrame(res)
outPrev = df['prev'].values
outPrev2 = df['prev2'].values
return {'prev': outPrev, 'prev2': outPrev2}
'''
df = GenUsageDf_forTesting()
Mark(df[df['user_id'].isin(['0', '1', '2'])].sort_values(['user_id', 'time']))
FindPrevUsers(
users=map(lambda x: str(x), range(10)),
dfDetails=df,
condCols=['prod', 'form_factor'],
condValues=['PresFeat', 'PHN'],
userCol='user_id',
timeCol='time',
timeGap=6000,
secondUsage=True,
secondUsageGap=3)
'''
## this first segments the users who satisfy conditions (condValues)
# using two datetime variables to ex and new users
# then it finds out the previous activity for each user
# it returns the previous activity in the same format as the conditions
def FindPrevControlTreat(
dfSummary,
dfDetails,
dt1,
dt2,
userCol,
condCols,
condValues,
timeColSumm,
timeColDet,
timeGap,
secondUsage,
secondUsageGap=3600,
limit_exUsersNum=200):
# this subsets a data frame using conditions and then segments it using datetimes
def SegmentNewUsage(df, dt1, dt2, condCols, condValues, timeCol):
condDict = {}
for i in range(len(condCols)):
col = condCols[i]
condDict[col] = [condValues[i]]
ind = BuildCondInd(df, condDict=condDict)
dfNew = df[(df[timeCol] >= dt1) * ind]
dfEx = df[(df[timeCol] <= dt2) * ind]
outDict = {'new': dfNew, 'ex': dfEx}
return(outDict)
## segment the summary data (dfSummary) using dt1, dt2
dfDict = SegmentNewUsage(
df=dfSummary,
dt1=dt1,
dt2=dt2,
condCols=condCols,
condValues=condValues,
timeCol=timeColSumm)
dfNew = dfDict['new']
dfEx = dfDict['ex']
newUsers = list(set(dfNew[userCol].values))
exUsers = list(set(dfEx[userCol].values))
## limiting the number of ex-users
if (limit_exUsersNum is not None) and (len(exUsers) > 200):
exUsers = exUsers[:limit_exUsersNum]
new = FindPrevUsers(
users=newUsers,
dfDetails=dfDetails,
condCols=condCols,
condValues=condValues,
userCol=userCol,
timeCol=timeColDet,
timeGap=timeGap,
secondUsage=secondUsage,
secondUsageGap=secondUsageGap)
ex = FindPrevUsers(
users=exUsers,
dfDetails=dfDetails,
condCols=condCols,
condValues=condValues,
userCol=userCol,
timeCol=timeColDet,
timeGap=timeGap,
secondUsage=secondUsage,
secondUsageGap=secondUsageGap)
outDict = {'new': None, 'ex': None, 'new2': None, 'ex2': None, 'ss': None}
prevProdNew = new['prev']
prevProdEx = ex['prev']
prevProdNew = [j for i, j in enumerate(prevProdNew) if (j is not None)]
prevProdEx = [j for i, j in enumerate(prevProdEx) if (j is not None)]
tabNew = pd.Series(prevProdNew).value_counts()
tabEx = pd.Series(prevProdEx).value_counts()
ss = {'new': len(prevProdNew), 'ex': len(prevProdEx)}
outDict['new'] = tabNew
outDict['ex'] = tabEx
if secondUsage == True:
prevProdNew2 = new['prev2']
prevProdEx2 = ex['prev2']
prevProdNew2 = [j for i, j in enumerate(prevProdNew2) if (j is not None)]
prevProdEx2 = [j for i, j in enumerate(prevProdEx2) if (j is not None)]
tabNew2 = pd.Series(prevProdNew2).value_counts()
tabEx2 = pd.Series(prevProdEx2).value_counts()
outDict['new2'] = tabNew2
outDict['ex2'] = tabEx2
ss['new2'] = len(prevProdNew2)
ss['ex2'] = len(prevProdEx2)
outDict['ss'] = ss
return outDict
## compare the entry points for various users
def CompareEntryPoints(
dfSummary,
dfDetails,
dt1,
dt2,
userCol,
condCols,
condValues,
timeColSumm,
timeColDet,
timeGap,
treat,
base,
otherArms=[],
colorListOther=[],
secondUsageGap=3600,
includePvalue=True,
removeCols=None,
removeValues=None):
if removeCols != None:
for i in range(len(removeCols)):
col = removeCols[i]
values = removeValues[i]
dfDetails = dfDetails[~dfDetails[col].isin(values)]
dfDetails = dfDetails.reset_index(drop=True)
dfSummary = dfSummary.reset_index(drop=True)
secondUsage = False
allArms = [base, treat] + otherArms
if ('new2' in allArms) or ('ex2' in allArms):
secondUsage = True
res = FindPrevControlTreat(
dfSummary=dfSummary,
dfDetails=dfDetails,
dt1=dt1,
dt2=dt2,
userCol=userCol,
condCols=condCols,
condValues=condValues,
timeColSumm=timeColSumm,
timeColDet=timeColDet,
timeGap=timeGap,
secondUsage=secondUsage)
tabTreat = res[treat]
tabBase = res[base]
ss = res['ss']
tabDict = {treat: tabTreat, base: tabBase}
for i in range(len(otherArms)):
tabDict[otherArms[i]] = res[otherArms[i]]
tab = MergeTablesDict(tabDict)
condName = '-'.join(condValues)
p = None
if (ss[base]*ss[treat]) > 0:
tab2 = TabComplPvalue(
tab=tab,
categCol='categ',
freqCols=['freq_' + treat, 'freq_' + base])
tab['(1-pvalue)%'] = 100.0 - 100.0 * tab2['p-value']
ind = ((tab['prop_' + treat] >2) + (tab['prop_' + base] > 2)) > 0
tab = tab[ind]
def ExtraFcn1(ax):
plt.axhline(y=95, alpha=0.3, color='gray', linestyle='--')
plt.axhline(y=90, alpha=0.3, color='gray', linestyle='--')
ax.text(0, 95, '95%', fontsize=10)
ax.text(0, 90, '90%', fontsize=10)
def ExtraFcn2(ax):
plt.axvline(x=95, alpha=0.3, color='gray', linestyle='--')
plt.axvline(x=90, alpha=0.3, color='gray', linestyle='--')
ax.text(95, 0, '95%', fontsize=10)
ax.text(90, 0, '90%', fontsize=10)
cols = ['prop_' + treat, 'prop_' + base]
for i in range(len(otherArms)):
cols = cols + [('prop_' + otherArms[i])]
colorList = ['g', 'r']
alphaList = [0.6] * 2
if (len(otherArms) > 0):
alphaList = alphaList + [0.3] * len(otherArms)
if (len(colorListOther) == 0):
colorListOther = ['gray'] * len(otherArms)
colorList = colorList + colorListOther
if (includePvalue):
cols = cols + ['(1-pvalue)%']
colorList = colorList + ['y']
alphaList = alphaList + [0.2]
p = PltCols_wrtIndex(
df=tab,
cols=cols,
categCol='categ',
pltTitle=('' + condName + '; ss: ' + treat + ':' + str(ss[treat]) + ', '
+ base + ':' + str(ss[base])),
colorList=colorList,
alphaList=alphaList,
ymax=100,
ExtraFcn=ExtraFcn2,
orient='h')
return {'tab': tab, 'plot': p}
## plots the bar plot of the time difference between two time columns
def Plt_timeDiff_barPlot(df, col1, col2, pltTitle=None):
x = df[col2] - df[col1]
y = x.dt.days
if pltTitle == None:
pltTitle = col2 + ' - ' + col1
CutBarPlot(y, method='uniform', num=5, pltTitle=pltTitle)
| |
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u"\N{MATHEMATICAL LEFT ANGLE BRACKET}"
_rbracket_ucode = u"\N{MATHEMATICAL RIGHT ANGLE BRACKET}"
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u"\N{LIGHT VERTICAL BAR}"
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}', \
u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}', \
u'\N{BOX DRAWINGS LIGHT VERTICAL}'
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in {_lbracket, _lbracket_ucode}:
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in {_rbracket, _rbracket_ucode}:
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in {_straight_bracket, _straight_bracket_ucode}:
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import re
import time
from copy import deepcopy
from functools import wraps
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from gcp.gcp import GCPError, GCPHttpError, GoogleCloudPlatform
from gcp.gcp import check_response
from gcp.gcp import is_missing_resource_error, is_resource_used_error
from gcp.compute import constants
def get_item_from_gcp_response(key_field, key_name, items):
"""
Get item from GCP REST response JSON list by name.
items = [{ 'key_field': 'key_name', 'key_field_value': 'value'}]
:param key_field: item dictionary key
:param key_value: item dictionary value
:param items: list of items(dictionaries)
:return: item if found in collection, None otherwise
"""
for item in items.get('items', []):
if item.get(key_field) == key_name:
return item
return None
def get_gcp_resource_name(name):
"""
Create GCP accepted name of resource. From GCP specification:
"Specifically, the name must be 1-63 characters long and match the regular
expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must
be a lowercase letter, and all following characters must be a dash,
lowercase letter, or digit, except the last character,
which cannot be a dash."
:param name: name of resource to be given
:return: GCP accepted instance name
"""
# replace underscores with hyphens
final_name = name.replace('_', '-')
# remove all non-alphanumeric characters except hyphens
final_name = re.sub(r'[^a-zA-Z0-9-]+', '', final_name)
# assure the first character is alpha
if not final_name[0].isalpha():
final_name = '{0}{1}'.format('a', final_name)
# trim to the length limit
if len(final_name) > constants.MAX_GCP_NAME:
remain_len = constants.MAX_GCP_NAME - len(final_name)
final_name = '{0}{1}'.format(
final_name[:remain_len - constants.ID_HASH_CONST],
final_name[-constants.ID_HASH_CONST:])
# convert string to lowercase
return final_name.lower()
def should_use_external_resource():
return ctx.node.properties.get(constants.USE_EXTERNAL_RESOURCE, False)
def assure_resource_id_correct():
resource_id = ctx.node.properties.get(constants.RESOURCE_ID)
if not resource_id:
raise NonRecoverableError('Resource id is missing.')
if resource_id != get_gcp_resource_name(resource_id):
raise NonRecoverableError('{} cannot be used as resource id.'
.format(resource_id))
return resource_id
def get_final_resource_name(name):
if should_use_external_resource():
return assure_resource_id_correct()
else:
return name or get_gcp_resource_name(ctx.instance.id)
def create_resource(func):
def _decorator(resource, *args, **kwargs):
if should_use_external_resource():
try:
resource.body = resource.get()
resource.update_model()
except GCPHttpError as error:
if is_missing_resource_error(error):
name = ctx.node.properties.get(constants.RESOURCE_ID)
raise NonRecoverableError(
'Resource {0} defined as external, but does not exist. Error: {1}'.
format(name, str(error)))
else:
raise error
else:
return func(resource, *args, **kwargs)
return wraps(func)(_decorator)
@create_resource
def create(resource):
return resource.create()
def delete_if_not_external(resource):
if not should_use_external_resource():
resource.delete()
def sync_operation(func):
def _decorator(resource, *args, **kwargs):
response = func(resource, *args, **kwargs)
operation = response_to_operation(
response, resource.config, resource.logger)
while not operation.has_finished():
time.sleep(1)
return operation.last_response
return wraps(func)(_decorator)
def retry_on_failure(msg, delay=constants.RETRY_DEFAULT_DELAY):
def _retry_on_failure(func):
def _decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except GCPHttpError as error:
if is_resource_used_error(error):
ctx.operation.retry(msg, delay)
else:
raise error
return wraps(func)(_decorator)
return _retry_on_failure
def get_firewall_rule_name(network, firewall):
"""
Prefix firewall rule name with network name
:param network: network to which the firewall rule belongs
:param firewall: firewall for which the name is created
:return: network prefixed firewall rule name
"""
name = '{0}-{1}'.format(network, firewall)
return get_gcp_resource_name(name)
def throw_cloudify_exceptions(func):
def _decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except GCPError as e:
raise NonRecoverableError(e.message)
return wraps(func)(_decorator)
def get_gcp_config():
def _get_gcp_config_from_properties():
try:
return ctx.node.properties[constants.GCP_CONFIG]
except NonRecoverableError:
return ctx.source.node.properties[constants.GCP_CONFIG]
gcp_config_from_properties = _get_gcp_config_from_properties()
if gcp_config_from_properties:
gcp_config = gcp_config_from_properties
else:
config = ctx.provider_context['resources'][constants.GCP_CONFIG]
gcp_config = deepcopy(config)
return update_zone(gcp_config)
def update_zone(gcp_config):
def _get_zone_from_runtime_properties():
try:
return ctx.instance.runtime_properties.get(constants.GCP_ZONE)
except NonRecoverableError:
src = ctx.source.instance.runtime_properties
tar = ctx.target.instance.runtime_properties
return src.get(constants.GCP_ZONE) or tar.get(constants.GCP_ZONE)
non_default_zone = _get_zone_from_runtime_properties()
if non_default_zone:
gcp_config['zone'] = non_default_zone
return gcp_config
def get_manager_provider_config():
provider_config = ctx.provider_context.get('resources', {})
agents_security_group = provider_config.get('agents_security_group', {})
manager_agent_security_group = \
provider_config.get('manager_agent_security_group', {})
provider_context = {
'agents_security_group': agents_security_group,
'manager_security_group': manager_agent_security_group
}
return provider_context
def create_firewall_structure_from_rules(network, rules):
firewall = {'name': get_firewall_rule_name(network, ctx.instance.id),
'allowed': [],
constants.SOURCE_TAGS: [],
'sourceRanges': [],
constants.TARGET_TAGS: []}
for rule in rules:
source_tags = rule.get('source_tags', [])
target_tags = rule.get('target_tags', [])
for tag in source_tags:
tag = get_gcp_resource_name(tag)
if tag not in firewall[constants.SOURCE_TAGS]:
firewall[constants.SOURCE_TAGS].append(tag)
for tag in target_tags:
tag = get_gcp_resource_name(tag)
if tag not in firewall[constants.TARGET_TAGS]:
firewall[constants.TARGET_TAGS].append(tag)
firewall['allowed'].extend([{'IPProtocol': rule.get('ip_protocol'),
'ports': [rule.get('port', [])]}])
cidr = rule.get('cidr_ip')
if cidr and cidr not in firewall['sourceRanges']:
firewall['sourceRanges'].append(cidr)
return firewall
def get_key_user_string(user, key):
return '{0}:{1}'.format(user, key)
def get_agent_ssh_key_string():
try:
return ctx.provider_context['resources']['cloudify-agent']['public-key']
except KeyError:
# means that we are bootstrapping the manager
return ''
def response_to_operation(response, config, logger):
operation_name = response['name']
if 'zone' in response:
return ZoneOperation(config, logger, operation_name)
elif 'region' in response:
raise NonRecoverableError('RegionOperation is not implemented')
else:
return GlobalOperation(config, logger, operation_name)
class GlobalOperation(GoogleCloudPlatform):
def __init__(self, config, logger, name):
super(GlobalOperation, self).__init__(config, logger, name)
self.last_response = None
self.last_status = None
def has_finished(self):
if self.last_status != constants.GCP_OP_DONE:
self.get()
return self.last_status == constants.GCP_OP_DONE
@check_response
def get(self):
self.last_response = self._get()
self.last_status = self.last_response['status']
return self.last_response
def _get(self):
return self.discovery.globalOperations().get(
project=self.project,
operation=self.name).execute()
class ZoneOperation(GlobalOperation):
def _get(self):
return self.discovery.zoneOperations().get(
project=self.project,
zone=self.zone,
operation=self.name).execute()
| |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
from collections import defaultdict
from datetime import datetime
"""Performance test to make sure rule keys are unaffected by absolute paths.
The general algorithm is:
- Build all targets
- Rename directory being tested
- Build all targets, check to ensure everything pulls from dir cache
- Buck build all targets to verify no-op build works.
"""
def createArgParser():
parser = argparse.ArgumentParser(description="Run the buck performance test")
parser.add_argument(
"--perftest_id",
action="store",
type=str,
help="The identifier of this performance test",
)
parser.add_argument(
"--revisions_to_go_back",
action="store",
type=int,
help="The maximum number of revisions to go back when testing",
)
parser.add_argument(
"--iterations_per_diff",
action="store",
type=int,
help="The number of iterations to run on diff",
)
parser.add_argument(
"--targets_to_build", action="append", type=str, help="The targets to build"
)
parser.add_argument(
"--repo_under_test",
action="store",
type=str,
help="Path to the repo under test",
)
parser.add_argument(
"--project_under_test",
action="store",
type=str,
help="Path to the project folder being tested under repo",
)
parser.add_argument(
"--path_to_buck", action="store", type=str, help="The path to the buck binary"
)
parser.add_argument(
"--old_buck_revision",
action="store",
type=str,
help="The original buck revision",
)
parser.add_argument(
"--new_buck_revision", action="store", type=str, help="The new buck revision"
)
return parser
def log(message):
print("%s\t%s" % (str(datetime.now()), message))
sys.stdout.flush()
def timedelta_total_seconds(timedelta):
return (
timedelta.microseconds
+ 0.0
+ (timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6
) / 10 ** 6
class BuildResult:
def __init__(self, time_delta, cache_results, rule_key_map):
self.time_delta = time_delta
self.cache_results = cache_results
self.rule_key_map = rule_key_map
def clean(cwd):
log("Running hg purge.")
subprocess.check_call(["hg", "purge", "--all"], cwd=cwd)
def reset(revision, cwd):
subprocess.check_call(["hg", "revert", "-a", "-r", revision], cwd=cwd)
def ant_clean_build(buck_repo):
log("Running ant clean default.")
subprocess.check_call(["ant", "clean", "default"], cwd=buck_repo)
def buck_clean(args, cwd):
log("Running buck clean.")
subprocess.check_call([args.path_to_buck, "clean"], cwd=cwd)
BUILD_RESULT_LOG_LINE = re.compile(
r"BuildRuleFinished\((?P<rule_name>[\w_\-:#\/,]+)\): (?P<result>[A-Z_]+) "
r"(?P<cache_result>[A-Z_]+) (?P<success_type>[A-Z_]+) "
r"(?P<rule_key>[0-9a-f]*)"
)
RULEKEY_LINE = re.compile(
r"^INFO: RuleKey (?P<rule_key>[0-9a-f]*)=" r"(?P<rule_key_debug>.*)$"
)
BUCK_LOG_RULEKEY_LINE = re.compile(
r".*\[[\w ]+\](?:\[command:[0-9a-f-]+\])?\[tid:\d+\]"
r"\[com.facebook.buck.rules.keys.RuleKey[\$\.]?Builder\] "
r"RuleKey (?P<rule_key>[0-9a-f]+)="
r"(?P<rule_key_debug>.*)$"
)
def buck_build_target(args, cwd, targets, log_as_perftest=True):
"""Builds a target with buck and returns performance information.
"""
log("Running buck build %s." % " ".join(targets))
bucklogging_properties_path = os.path.join(cwd, ".bucklogging.local.properties")
with open(bucklogging_properties_path, "w") as bucklogging_properties:
# The default configuration has the root logger and FileHandler
# discard anything below FINE level.
#
# We need RuleKey logging, which uses FINER (verbose), so the
# root logger and file handler both need to be reconfigured
# to enable verbose logging.
bucklogging_properties.write(
""".level=FINER
java.util.logging.FileHandler.level=FINER"""
)
env = os.environ.copy()
# Force buck to pretend it's repo is clean.
env.update({"BUCK_REPOSITORY_DIRTY": "0"})
if log_as_perftest:
with open(".buckjavaargs.local", "a") as f:
f.write("-Dbuck.perftest_id=%s\n" % (args.perftest_id,))
f.write("-Dbuck.perftest_side=new\n")
start = datetime.now()
tmpFile = tempfile.TemporaryFile()
try:
subprocess.check_call(
[
args.path_to_buck,
"build",
"--deep",
# t16296463
"--config",
"project.glob_handler=",
"--config",
"cache._exp_propagation=false",
]
+ targets
+ ["-v", "5"],
stdout=tmpFile,
stderr=tmpFile,
cwd=cwd,
env=env,
)
except:
tmpFile.seek(0)
log("Buck build failed: %s" % tmpFile.read())
raise
tmpFile.seek(0)
finish = datetime.now()
(cache_results, rule_key_map) = build_maps(cwd, tmpFile)
result = BuildResult(finish - start, cache_results, rule_key_map)
cache_counts = {}
for key, value in result.cache_results.iteritems():
cache_counts[key] = len(value)
log(
"Test Build Finished! Elapsed Seconds: %d, Cache Counts: %s"
% (timedelta_total_seconds(result.time_delta), repr(cache_counts))
)
return result
def build_maps(cwd, tmpFile):
java_utils_log_path = os.path.join(cwd, "buck-out", "log", "buck-0.log")
if os.path.exists(java_utils_log_path):
pattern = BUCK_LOG_RULEKEY_LINE
build_output_file = open(java_utils_log_path)
else:
pattern = RULEKEY_LINE
build_output_file = tmpFile
rule_debug_map = {}
for line in build_output_file:
match = pattern.match(line)
if match:
rule_debug_map[match.group("rule_key")] = match.group("rule_key_debug")
logfile_path = os.path.join(cwd, "buck-out", "bin", "build.log")
cache_results = defaultdict(list)
rule_key_map = {}
with open(logfile_path, "r") as logfile:
for line in logfile.readlines():
line = line.strip()
match = BUILD_RESULT_LOG_LINE.search(line)
if match:
rule_name = match.group("rule_name")
rule_key = match.group("rule_key")
if not rule_key in rule_debug_map:
raise Exception(
"""ERROR: build.log contains an entry
which was not found in buck build -v 5 output.
Rule: {0}, rule key: {1}""".format(
rule_name, rule_key
)
)
cache_results[match.group("cache_result")].append(
{
"rule_name": rule_name,
"rule_key": rule_key,
"rule_key_debug": rule_debug_map[rule_key],
}
)
rule_key_map[match.group("rule_name")] = (
rule_key,
rule_debug_map[rule_key],
)
return (cache_results, rule_key_map)
def set_cache_settings(args, cwd, cache_mode, dir_cache_only=True):
log("Reconfiguring cache settings:")
buckconfig_contents = """[cache]
%s
dir = buck-cache
dir_mode = %s
[build]
# Some repositories set this to a lower value, which breaks an assumption
# in this test: that all rules with correct rule keys will get hits.
artifact_cache_size_limit = 2000000000
""" % (
"mode = dir" if dir_cache_only else "",
cache_mode,
)
log(buckconfig_contents)
buckconfig_path = os.path.join(cwd, ".buckconfig.local")
with open(buckconfig_path, "w") as buckconfig:
buckconfig.write(buckconfig_contents)
buckconfig.truncate()
buckversion_path = os.path.join(cwd, ".buckversion")
with open(buckversion_path, "w") as buckversion:
buckversion.write(args.new_buck_revision + os.linesep)
buckversion.truncate()
def build_all_targets(
args, cwd, cache_mode, run_clean=True, dir_cache_only=True, log_as_perftest=True
):
set_cache_settings(args, cwd, cache_mode, dir_cache_only=dir_cache_only)
targets = []
for target_str in args.targets_to_build:
targets.extend(target_str.split(","))
if run_clean:
buck_clean(args, cwd)
return buck_build_target(args, cwd, targets, log_as_perftest=log_as_perftest)
def check_cache_results(result, expected_keys, message, exception_message, last_result):
suspect_keys = [x for x in result.cache_results.keys() if x not in expected_keys]
if suspect_keys:
log(message)
for result_type in suspect_keys:
for rule in result.cache_results[result_type]:
rule_name = rule["rule_name"]
key, key_debug = result.rule_key_map[rule_name]
old_key, old_key_debug = last_result.rule_key_map[rule_name]
log("Rule %s, result %s." % (rule_name, result_type))
log("\tOld Rule Key (%s): %s." % (old_key, old_key_debug))
log("\tNew Rule Key (%s): %s." % (key, key_debug))
raise Exception(exception_message)
def get_buck_repo_root(path):
while path is not None and not os.path.exists(os.path.join(path, ".buckconfig")):
path = os.path.dirname(path)
return path
def move_mount(from_mount, to_mount):
subprocess.check_call("sync")
subprocess.check_call(["mount", "--move", from_mount, to_mount])
for subdir, dirs, files in os.walk(to_mount):
for file in files:
path = os.path.join(subdir, file)
if os.path.islink(path) and os.path.realpath(path).startswith(
from_mount + "/"
):
new_path = os.path.realpath(path).replace(
from_mount + "/", to_mount + "/"
)
os.unlink(path)
os.symlink(new_path, path)
def main():
args = createArgParser().parse_args()
log("Running Performance Test!")
ant_clean_build(get_buck_repo_root(args.path_to_buck))
clean(args.repo_under_test)
log("=== Warming up cache ===")
cwd = os.path.join(args.repo_under_test, args.project_under_test)
last_result = build_all_targets(
args, cwd, "readwrite", dir_cache_only=False, log_as_perftest=False
)
log("=== Cache Warm! Running tests ===")
new_directory_name = os.path.basename(args.repo_under_test) + "_test_iteration_"
# Rename the directory to flesh out any cache problems.
cwd_root = os.path.join(os.path.dirname(args.repo_under_test), new_directory_name)
cwd = os.path.join(cwd_root, args.project_under_test)
log("Renaming %s to %s" % (args.repo_under_test, cwd_root))
if not os.path.isfile("/proc/mounts"):
is_mounted = False
else:
with open("/proc/mounts", "r") as mounts:
# grab the second element (mount point) from /proc/mounts
lines = [l.strip().split() for l in mounts.read().splitlines()]
lines = [l[1] for l in lines if len(l) >= 2]
is_mounted = args.repo_under_test in lines
if is_mounted:
if not os.path.exists(cwd_root):
os.makedirs(cwd_root)
move_mount(args.repo_under_test, cwd_root)
else:
# If cwd_root exists, it means that a previous attempt to run
# this script wasn't able to clean up that folder properly.
# In this case, we clean up that folder.
shutil.rmtree(cwd_root, ignore_errors=True)
os.rename(args.repo_under_test, cwd_root)
try:
log("== Checking for problems with absolute paths ==")
result = build_all_targets(args, cwd, "readonly")
check_cache_results(
result,
["DIR_HIT", "IGNORED", "LOCAL_KEY_UNCHANGED_HIT"],
"Building was unable to reuse the cache from a "
"previous run. This suggests one of the rule keys "
"contains an absolute path.",
"Failed to reuse cache across directories!!!",
last_result,
)
log("== Ensure noop build does nothing. ==")
result = build_all_targets(args, cwd, "readonly", run_clean=False)
check_cache_results(
result,
["LOCAL_KEY_UNCHANGED_HIT"],
"Doing a noop build not hit all of its keys.",
"Doing a noop build not hit all of its keys.",
last_result,
)
finally:
log("Renaming %s to %s" % (cwd_root, args.repo_under_test))
if is_mounted:
move_mount(cwd_root, args.repo_under_test)
shutil.rmtree(cwd_root)
else:
os.rename(cwd_root, args.repo_under_test)
if __name__ == "__main__":
main()
| |
"""Support for Wireless Sensor Tags."""
import logging
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from wirelesstagpy import NotificationConfig as NC, WirelessTags, WirelessTagsException
from homeassistant import util
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_VOLTAGE,
CONF_PASSWORD,
CONF_USERNAME,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
VOLT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
# Strength of signal in dBm
ATTR_TAG_SIGNAL_STRENGTH = "signal_strength"
# Indicates if tag is out of range or not
ATTR_TAG_OUT_OF_RANGE = "out_of_range"
# Number in percents from max power of tag receiver
ATTR_TAG_POWER_CONSUMPTION = "power_consumption"
NOTIFICATION_ID = "wirelesstag_notification"
NOTIFICATION_TITLE = "Wireless Sensor Tag Setup"
DOMAIN = "wirelesstag"
DEFAULT_ENTITY_NAMESPACE = "wirelesstag"
# Template for signal - first parameter is tag_id,
# second, tag manager mac address
SIGNAL_TAG_UPDATE = "wirelesstag.tag_info_updated_{}_{}"
# Template for signal - tag_id, sensor type and
# tag manager mac address
SIGNAL_BINARY_EVENT_UPDATE = "wirelesstag.binary_event_updated_{}_{}_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
class WirelessTagPlatform:
"""Principal object to manage all registered in HA tags."""
def __init__(self, hass, api):
"""Designated initializer for wirelesstags platform."""
self.hass = hass
self.api = api
self.tags = {}
self._local_base_url = None
@property
def tag_manager_macs(self):
"""Return list of tag managers mac addresses in user account."""
return self.api.mac_addresses
def load_tags(self):
"""Load tags from remote server."""
self.tags = self.api.load_tags()
return self.tags
def arm(self, switch):
"""Arm entity sensor monitoring."""
func_name = f"arm_{switch.sensor_type}"
arm_func = getattr(self.api, func_name)
if arm_func is not None:
arm_func(switch.tag_id, switch.tag_manager_mac)
def disarm(self, switch):
"""Disarm entity sensor monitoring."""
func_name = f"disarm_{switch.sensor_type}"
disarm_func = getattr(self.api, func_name)
if disarm_func is not None:
disarm_func(switch.tag_id, switch.tag_manager_mac)
def make_notifications(self, binary_sensors, mac):
"""Create configurations for push notifications."""
_LOGGER.info("Creating configurations for push notifications")
configs = []
bi_url = self.binary_event_callback_url
for bi_sensor in binary_sensors:
configs.extend(bi_sensor.event.build_notifications(bi_url, mac))
update_url = self.update_callback_url
update_config = NC.make_config_for_update_event(update_url, mac)
configs.append(update_config)
return configs
def install_push_notifications(self, binary_sensors):
"""Register local push notification from tag manager."""
_LOGGER.info("Registering local push notifications")
for mac in self.tag_manager_macs:
configs = self.make_notifications(binary_sensors, mac)
# install notifications for all tags in tag manager
# specified by mac
result = self.api.install_push_notification(0, configs, True, mac)
if not result:
self.hass.components.persistent_notification.create(
"Error: failed to install local push notifications <br />",
title="Wireless Sensor Tag Setup Local Push Notifications",
notification_id="wirelesstag_failed_push_notification",
)
else:
_LOGGER.info(
"Installed push notifications for all tags in %s",
mac,
)
@property
def local_base_url(self):
"""Define base url of hass in local network."""
if self._local_base_url is None:
self._local_base_url = f"http://{util.get_local_ip()}"
port = self.hass.config.api.port
if port is not None:
self._local_base_url += f":{port}"
return self._local_base_url
@property
def update_callback_url(self):
"""Return url for local push notifications(update event)."""
return f"{self.local_base_url}/api/events/wirelesstag_update_tags"
@property
def binary_event_callback_url(self):
"""Return url for local push notifications(binary event)."""
return f"{self.local_base_url}/api/events/wirelesstag_binary_event"
def handle_update_tags_event(self, event):
"""Handle push event from wireless tag manager."""
_LOGGER.info("Push notification for update arrived: %s", event)
try:
tag_id = event.data.get("id")
mac = event.data.get("mac")
dispatcher_send(self.hass, SIGNAL_TAG_UPDATE.format(tag_id, mac), event)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Unable to handle tag update event:\
%s error: %s",
str(event),
str(ex),
)
def handle_binary_event(self, event):
"""Handle push notifications for binary (on/off) events."""
_LOGGER.info("Push notification for binary event arrived: %s", event)
try:
tag_id = event.data.get("id")
event_type = event.data.get("type")
mac = event.data.get("mac")
dispatcher_send(
self.hass,
SIGNAL_BINARY_EVENT_UPDATE.format(tag_id, event_type, mac),
event,
)
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error(
"Unable to handle tag binary event:\
%s error: %s",
str(event),
str(ex),
)
def setup(hass, config):
"""Set up the Wireless Sensor Tag component."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
try:
wirelesstags = WirelessTags(username=username, password=password)
platform = WirelessTagPlatform(hass, wirelesstags)
platform.load_tags()
hass.data[DOMAIN] = platform
except (ConnectTimeout, HTTPError, WirelessTagsException) as ex:
_LOGGER.error("Unable to connect to wirelesstag.net service: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />Please restart hass after fixing this.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
# listen to custom events
hass.bus.listen(
"wirelesstag_update_tags", hass.data[DOMAIN].handle_update_tags_event
)
hass.bus.listen("wirelesstag_binary_event", hass.data[DOMAIN].handle_binary_event)
return True
class WirelessTagBaseSensor(Entity):
"""Base class for HA implementation for Wireless Sensor Tag."""
def __init__(self, api, tag):
"""Initialize a base sensor for Wireless Sensor Tag platform."""
self._api = api
self._tag = tag
self._uuid = self._tag.uuid
self.tag_id = self._tag.tag_id
self.tag_manager_mac = self._tag.tag_manager_mac
self._name = self._tag.name
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def principal_value(self):
"""Return base value.
Subclasses need override based on type of sensor.
"""
return 0
def updated_state_value(self):
"""Return formatted value.
The default implementation formats principal value.
"""
return self.decorate_value(self.principal_value)
# pylint: disable=no-self-use
def decorate_value(self, value):
"""Decorate input value to be well presented for end user."""
return f"{value:.1f}"
@property
def available(self):
"""Return True if entity is available."""
return self._tag.is_alive
def update(self):
"""Update state."""
if not self.should_poll:
return
updated_tags = self._api.load_tags()
updated_tag = updated_tags[self._uuid]
if updated_tag is None:
_LOGGER.error('Unable to update tag: "%s"', self.name)
return
self._tag = updated_tag
self._state = self.updated_state_value()
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_BATTERY_LEVEL: int(self._tag.battery_remaining * 100),
ATTR_VOLTAGE: f"{self._tag.battery_volts:.2f}{VOLT}",
ATTR_TAG_SIGNAL_STRENGTH: f"{self._tag.signal_strength}{SIGNAL_STRENGTH_DECIBELS_MILLIWATT}",
ATTR_TAG_OUT_OF_RANGE: not self._tag.is_in_range,
ATTR_TAG_POWER_CONSUMPTION: f"{self._tag.power_consumption:.2f}{PERCENTAGE}",
}
| |
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import time
import random
import shutil
import contextlib
import tempfile
import binascii
import platform
import select
import datetime
import unittest
from contextlib import ContextDecorator
from unittest import mock
from io import BytesIO
from subprocess import Popen, PIPE
from dateutil.tz import tzlocal
import botocore.loaders
import botocore.session
from botocore.awsrequest import AWSResponse
from botocore.compat import (
parse_qs, six, urlparse, HAS_CRT
)
from botocore import utils
from botocore import credentials
from botocore.stub import Stubber
_LOADER = botocore.loaders.Loader()
def skip_unless_has_memory_collection(cls):
"""Class decorator to skip tests that require memory collection.
Any test that uses memory collection (such as the resource leak tests)
can decorate their class with skip_unless_has_memory_collection to
indicate that if the platform does not support memory collection
the tests should be skipped.
"""
if platform.system() not in ['Darwin', 'Linux']:
return unittest.skip('Memory tests only supported on mac/linux.')(cls)
return cls
def skip_if_windows(reason):
"""Decorator to skip tests that should not be run on windows.
Example usage:
@skip_if_windows("Not valid")
def test_some_non_windows_stuff(self):
self.assertEqual(...)
"""
def decorator(func):
return unittest.skipIf(
platform.system() not in ['Darwin', 'Linux'], reason)(func)
return decorator
def requires_crt(reason=None):
if reason is None:
reason = "Test requires awscrt to be installed"
def decorator(func):
return unittest.skipIf(not HAS_CRT, reason)(func)
return decorator
def random_chars(num_chars):
"""Returns random hex characters.
Useful for creating resources with random names.
"""
return binascii.hexlify(os.urandom(int(num_chars / 2))).decode('ascii')
def create_session(**kwargs):
# Create a Session object. By default,
# the _LOADER object is used as the loader
# so that we reused the same models across tests.
session = botocore.session.Session(**kwargs)
session.register_component('data_loader', _LOADER)
session.set_config_variable('credentials_file', 'noexist/foo/botocore')
return session
@contextlib.contextmanager
def temporary_file(mode):
"""This is a cross platform temporary file creation.
tempfile.NamedTemporary file on windows creates a secure temp file
that can't be read by other processes and can't be opened a second time.
For tests, we generally *want* them to be read multiple times.
The test fixture writes the temp file contents, the test reads the
temp file.
"""
temporary_directory = tempfile.mkdtemp()
basename = 'tmpfile-%s-%s' % (int(time.time()), random.randint(1, 1000))
full_filename = os.path.join(temporary_directory, basename)
open(full_filename, 'w').close()
try:
with open(full_filename, mode) as f:
yield f
finally:
shutil.rmtree(temporary_directory)
class BaseEnvVar(unittest.TestCase):
def setUp(self):
# Automatically patches out os.environ for you
# and gives you a self.environ attribute that simulates
# the environment. Also will automatically restore state
# for you in tearDown()
self.environ = {}
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
class BaseSessionTest(BaseEnvVar):
"""Base class used to provide credentials.
This class can be used as a base class that want to use a real
session class but want to be completely isolated from the
external environment (including environment variables).
This class will also set credential vars so you can make fake
requests to services.
"""
def setUp(self, **environ):
super(BaseSessionTest, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.environ['AWS_CONFIG_FILE'] = 'no-exist-foo'
self.environ.update(environ)
self.session = create_session()
self.session.config_filename = 'no-exist-foo'
@skip_unless_has_memory_collection
class BaseClientDriverTest(unittest.TestCase):
INJECT_DUMMY_CREDS = False
def setUp(self):
self.driver = ClientDriver()
env = None
if self.INJECT_DUMMY_CREDS:
env = {'AWS_ACCESS_KEY_ID': 'foo',
'AWS_SECRET_ACCESS_KEY': 'bar'}
self.driver.start(env=env)
def cmd(self, *args):
self.driver.cmd(*args)
def send_cmd(self, *args):
self.driver.send_cmd(*args)
def record_memory(self):
self.driver.record_memory()
@property
def memory_samples(self):
return self.driver.memory_samples
def tearDown(self):
self.driver.stop()
class ClientDriver(object):
CLIENT_SERVER = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'cmd-runner'
)
def __init__(self):
self._popen = None
self.memory_samples = []
def _get_memory_with_ps(self, pid):
# It would be better to eventually switch to psutil,
# which should allow us to test on windows, but for now
# we'll just use ps and run on POSIX platforms.
command_list = ['ps', '-p', str(pid), '-o', 'rss']
p = Popen(command_list, stdout=PIPE)
stdout = p.communicate()[0]
if not p.returncode == 0:
raise RuntimeError("Could not retrieve memory")
else:
# Get the RSS from output that looks like this:
# RSS
# 4496
return int(stdout.splitlines()[1].split()[0]) * 1024
def record_memory(self):
mem = self._get_memory_with_ps(self._popen.pid)
self.memory_samples.append(mem)
def start(self, env=None):
"""Start up the command runner process."""
self._popen = Popen([sys.executable, self.CLIENT_SERVER],
stdout=PIPE, stdin=PIPE, env=env)
def stop(self):
"""Shutdown the command runner process."""
self.cmd('exit')
self._popen.wait()
def send_cmd(self, *cmd):
"""Send a command and return immediately.
This is a lower level method than cmd().
This method will instruct the cmd-runner process
to execute a command, but this method will
immediately return. You will need to use
``is_cmd_finished()`` to check that the command
is finished.
This method is useful if you want to record attributes
about the process while an operation is occurring. For
example, if you want to instruct the cmd-runner process
to upload a 1GB file to S3 and you'd like to record
the memory during the upload process, you can use
send_cmd() instead of cmd().
"""
cmd_str = ' '.join(cmd) + '\n'
cmd_bytes = cmd_str.encode('utf-8')
self._popen.stdin.write(cmd_bytes)
self._popen.stdin.flush()
def is_cmd_finished(self):
rlist = [self._popen.stdout.fileno()]
result = select.select(rlist, [], [], 0.01)
if result[0]:
return True
return False
def cmd(self, *cmd):
"""Send a command and block until it finishes.
This method will send a command to the cmd-runner process
to run. It will block until the cmd-runner process is
finished executing the command and sends back a status
response.
"""
self.send_cmd(*cmd)
result = self._popen.stdout.readline().strip()
if result != b'OK':
raise RuntimeError(
"Error from command '%s': %s" % (cmd, result))
# This is added to this file because it's used in both
# the functional and unit tests for cred refresh.
class IntegerRefresher(credentials.RefreshableCredentials):
"""Refreshable credentials to help with testing.
This class makes testing refreshable credentials easier.
It has the following functionality:
* A counter, self.refresh_counter, to indicate how many
times refresh was called.
* A way to specify how many seconds to make credentials
valid.
* Configurable advisory/mandatory refresh.
* An easy way to check consistency. Each time creds are
refreshed, all the cred values are set to the next
incrementing integer. Frozen credentials should always
have this value.
"""
_advisory_refresh_timeout = 2
_mandatory_refresh_timeout = 1
_credentials_expire = 3
def __init__(self, creds_last_for=_credentials_expire,
advisory_refresh=_advisory_refresh_timeout,
mandatory_refresh=_mandatory_refresh_timeout,
refresh_function=None):
expires_in = (
self._current_datetime() +
datetime.timedelta(seconds=creds_last_for))
if refresh_function is None:
refresh_function = self._do_refresh
super(IntegerRefresher, self).__init__(
'0', '0', '0', expires_in,
refresh_function, 'INTREFRESH')
self.creds_last_for = creds_last_for
self.refresh_counter = 0
self._advisory_refresh_timeout = advisory_refresh
self._mandatory_refresh_timeout = mandatory_refresh
def _do_refresh(self):
self.refresh_counter += 1
current = int(self._access_key)
next_id = str(current + 1)
return {
'access_key': next_id,
'secret_key': next_id,
'token': next_id,
'expiry_time': self._seconds_later(self.creds_last_for),
}
def _seconds_later(self, num_seconds):
# We need to guarantee at *least* num_seconds.
# Because this doesn't handle subsecond precision
# we'll round up to the next second.
num_seconds += 1
t = self._current_datetime() + datetime.timedelta(seconds=num_seconds)
return self._to_timestamp(t)
def _to_timestamp(self, datetime_obj):
obj = utils.parse_to_aware_datetime(datetime_obj)
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
def _current_timestamp(self):
return self._to_timestamp(self._current_datetime())
def _current_datetime(self):
return datetime.datetime.now(tzlocal())
def _urlparse(url):
if isinstance(url, six.binary_type):
# Not really necessary, but it helps to reduce noise on Python 2.x
url = url.decode('utf8')
return urlparse(url)
def assert_url_equal(url1, url2):
parts1 = _urlparse(url1)
parts2 = _urlparse(url2)
# Because the query string ordering isn't relevant, we have to parse
# every single part manually and then handle the query string.
assert parts1.scheme == parts2.scheme
assert parts1.netloc == parts2.netloc
assert parts1.path == parts2.path
assert parts1.params == parts2.params
assert parts1.fragment == parts2.fragment
assert parts1.username == parts2.username
assert parts1.password == parts2.password
assert parts1.hostname == parts2.hostname
assert parts1.port == parts2.port
assert parse_qs(parts1.query) == parse_qs(parts2.query)
class HTTPStubberException(Exception):
pass
class RawResponse(BytesIO):
# TODO: There's a few objects similar to this in various tests, let's
# try and consolidate to this one in a future commit.
def stream(self, **kwargs):
contents = self.read()
while contents:
yield contents
contents = self.read()
class BaseHTTPStubber(object):
def __init__(self, obj_with_event_emitter, strict=True):
self.reset()
self._strict = strict
self._obj_with_event_emitter = obj_with_event_emitter
def reset(self):
self.requests = []
self.responses = []
def add_response(self, url='https://example.com', status=200, headers=None,
body=b''):
if headers is None:
headers = {}
raw = RawResponse(body)
response = AWSResponse(url, status, headers, raw)
self.responses.append(response)
@property
def _events(self):
raise NotImplementedError('_events')
def start(self):
self._events.register('before-send', self)
def stop(self):
self._events.unregister('before-send', self)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def __call__(self, request, **kwargs):
self.requests.append(request)
if self.responses:
response = self.responses.pop(0)
if isinstance(response, Exception):
raise response
else:
return response
elif self._strict:
raise HTTPStubberException('Insufficient responses')
else:
return None
class ClientHTTPStubber(BaseHTTPStubber):
@property
def _events(self):
return self._obj_with_event_emitter.meta.events
class SessionHTTPStubber(BaseHTTPStubber):
@property
def _events(self):
return self._obj_with_event_emitter.get_component('event_emitter')
class ConsistencyWaiterException(Exception):
pass
class ConsistencyWaiter(object):
"""
A waiter class for some check to reach a consistent state.
:type min_successes: int
:param min_successes: The minimum number of successful check calls to
treat the check as stable. Default of 1 success.
:type max_attempts: int
:param min_successes: The maximum number of times to attempt calling
the check. Default of 20 attempts.
:type delay: int
:param delay: The number of seconds to delay the next API call after a
failed check call. Default of 5 seconds.
"""
def __init__(self, min_successes=1, max_attempts=20, delay=5,
delay_initial_poll=False):
self.min_successes = min_successes
self.max_attempts = max_attempts
self.delay = delay
self.delay_initial_poll = delay_initial_poll
def wait(self, check, *args, **kwargs):
"""
Wait until the check succeeds the configured number of times
:type check: callable
:param check: A callable that returns True or False to indicate
if the check succeeded or failed.
:type args: list
:param args: Any ordered arguments to be passed to the check.
:type kwargs: dict
:param kwargs: Any keyword arguments to be passed to the check.
"""
attempts = 0
successes = 0
if self.delay_initial_poll:
time.sleep(self.delay)
while attempts < self.max_attempts:
attempts += 1
if check(*args, **kwargs):
successes += 1
if successes >= self.min_successes:
return
else:
time.sleep(self.delay)
fail_msg = self._fail_message(attempts, successes)
raise ConsistencyWaiterException(fail_msg)
def _fail_message(self, attempts, successes):
format_args = (attempts, successes)
return 'Failed after %s attempts, only had %s successes' % format_args
class StubbedSession(botocore.session.Session):
def __init__(self, *args, **kwargs):
super(StubbedSession, self).__init__(*args, **kwargs)
self._cached_clients = {}
self._client_stubs = {}
def create_client(self, service_name, *args, **kwargs):
if service_name not in self._cached_clients:
client = self._create_stubbed_client(service_name, *args, **kwargs)
self._cached_clients[service_name] = client
return self._cached_clients[service_name]
def _create_stubbed_client(self, service_name, *args, **kwargs):
client = super(StubbedSession, self).create_client(
service_name, *args, **kwargs)
stubber = Stubber(client)
self._client_stubs[service_name] = stubber
return client
def stub(self, service_name, *args, **kwargs):
if service_name not in self._client_stubs:
self.create_client(service_name, *args, **kwargs)
return self._client_stubs[service_name]
def activate_stubs(self):
for stub in self._client_stubs.values():
stub.activate()
def verify_stubs(self):
for stub in self._client_stubs.values():
stub.assert_no_pending_responses()
class FreezeTime(ContextDecorator):
"""
Context manager for mocking out datetime in arbitrary modules when creating
performing actions like signing which require point in time specificity.
:type module: module
:param module: reference to imported module to patch (e.g. botocore.auth.datetime)
:type date: datetime.datetime
:param date: datetime object specifying the output for utcnow()
"""
def __init__(self, module, date=None):
if date is None:
date = datetime.datetime.utcnow()
self.date = date
self.datetime_patcher = mock.patch.object(
module, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
def __enter__(self, *args, **kwargs):
mock = self.datetime_patcher.start()
mock.utcnow.return_value = self.date
def __exit__(self, *args, **kwargs):
self.datetime_patcher.stop()
| |
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from django.contrib import messages
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.views.generic.list import ListView
from userena.contrib.umessages.models import Message, MessageRecipient, MessageContact
from userena.contrib.umessages.forms import ComposeForm
from userena.utils import get_datetime_now, get_user_model
from userena import settings as userena_settings
class MessageListView(ListView):
"""
Returns the message list for this user. This is a list contacts
which at the top has the user that the last conversation was with. This is
an imitation of the iPhone SMS functionality.
"""
page = 1
paginate_by = 50
template_name = 'umessages/message_list.html'
extra_context = {}
context_object_name = 'message_list'
def get_context_data(self, **kwargs):
context = super(MessageListView, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
def get_queryset(self):
return MessageContact.objects.get_contacts_for(self.request.user)
class MessageDetailListView(MessageListView):
"""
Returns a conversation between two users
"""
template_name = 'umessages/message_detail.html'
def get_context_data(self, **kwargs):
context = super(MessageDetailListView, self).get_context_data(**kwargs)
context['recipient'] = self.recipient
return context
def get_queryset(self):
username = self.kwargs['username']
self.recipient = get_object_or_404(get_user_model(),
username__iexact=username)
queryset = Message.objects.get_conversation_between(self.request.user,
self.recipient)
self._update_unread_messages(queryset)
return queryset
def _update_unread_messages(self, queryset):
message_pks = [m.pk for m in queryset]
unread_list = MessageRecipient.objects.filter(message__in=message_pks,
user=self.request.user,
read_at__isnull=True)
now = get_datetime_now()
unread_list.update(read_at=now)
@login_required
def message_compose(request, recipients=None, compose_form=ComposeForm,
success_url=None, template_name="umessages/message_form.html",
recipient_filter=None, extra_context=None):
"""
Compose a new message
:recipients:
String containing the usernames to whom the message is send to. Can be
multiple username by seperating them with a ``+`` sign.
:param compose_form:
The form that is used for getting neccesary information. Defaults to
:class:`ComposeForm`.
:param success_url:
String containing the named url which to redirect to after successfull
sending a message. Defaults to ``userena_umessages_list`` if there are
multiple recipients. If there is only one recipient, will redirect to
``userena_umessages_detail`` page, showing the conversation.
:param template_name:
String containing the name of the template that is used.
:param recipient_filter:
A list of :class:`User` that don"t want to receive any messages.
:param extra_context:
Dictionary with extra variables supplied to the template.
**Context**
``form``
The form that is used.
"""
initial_data = dict()
if recipients:
username_list = [r.strip() for r in recipients.split("+")]
recipients = [u for u in get_user_model().objects.filter(username__in=username_list)]
initial_data["to"] = recipients
form = compose_form(initial=initial_data)
if request.method == "POST":
form = compose_form(request.POST)
if form.is_valid():
requested_redirect = request.REQUEST.get("next", False)
message = form.save(request.user)
recipients = form.cleaned_data['to']
if userena_settings.USERENA_USE_MESSAGES:
messages.success(request, _('Message is sent.'),
fail_silently=True)
requested_redirect = request.REQUEST.get(REDIRECT_FIELD_NAME,
False)
# Redirect mechanism
redirect_to = reverse('userena_umessages_list')
if requested_redirect:
redirect_to = requested_redirect
elif success_url:
redirect_to = success_url
elif len(recipients) == 1:
redirect_to = reverse('userena_umessages_detail',
kwargs={'username': recipients[0].username})
return redirect(redirect_to)
if not extra_context: extra_context = dict()
extra_context["form"] = form
extra_context["recipients"] = recipients
return render(request, template_name, extra_context)
@login_required
@require_http_methods(["POST"])
def message_remove(request, undo=False):
"""
A ``POST`` to remove messages.
:param undo:
A Boolean that if ``True`` unremoves messages.
POST can have the following keys:
``message_pks``
List of message id's that should be deleted.
``next``
String containing the URI which to redirect to after the keys are
removed. Redirect defaults to the inbox view.
The ``next`` value can also be supplied in the URI with ``?next=<value>``.
"""
message_pks = request.POST.getlist('message_pks')
redirect_to = request.REQUEST.get('next', False)
if message_pks:
# Check that all values are integers.
valid_message_pk_list = set()
for pk in message_pks:
try:
valid_pk = int(pk)
except (TypeError, ValueError):
pass
else:
valid_message_pk_list.add(valid_pk)
# Delete all the messages, if they belong to the user.
now = get_datetime_now()
changed_message_list = set()
for pk in valid_message_pk_list:
message = get_object_or_404(Message, pk=pk)
# Check if the user is the owner
if message.sender == request.user:
if undo:
message.sender_deleted_at = None
else:
message.sender_deleted_at = now
message.save()
changed_message_list.add(message.pk)
# Check if the user is a recipient of the message
if request.user in message.recipients.all():
mr = message.messagerecipient_set.get(user=request.user,
message=message)
if undo:
mr.deleted_at = None
else:
mr.deleted_at = now
mr.save()
changed_message_list.add(message.pk)
# Send messages
if (len(changed_message_list) > 0) and userena_settings.USERENA_USE_MESSAGES:
if undo:
message = ungettext('Message is succesfully restored.',
'Messages are succesfully restored.',
len(changed_message_list))
else:
message = ungettext('Message is successfully removed.',
'Messages are successfully removed.',
len(changed_message_list))
messages.success(request, message, fail_silently=True)
if redirect_to:
return redirect(redirect_to)
else:
return redirect(reverse('userena_umessages_list'))
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
import mock
import st2tests.config as tests_config
tests_config.parse_args()
from unittest2 import TestCase
from st2actions.container.service import RunnerContainerService
from st2actions.runners import localrunner
from st2common.constants import action as action_constants
from st2tests.fixturesloader import FixturesLoader
from st2tests.fixturesloader import get_fixtures_base_path
from st2common.util.api import get_full_public_api_url
from st2common.constants.runners import LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT
class LocalShellCommandRunnerTestCase(TestCase):
fixtures_loader = FixturesLoader()
def test_shell_command_action_basic(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
runner = self._get_runner(action_db, cmd='echo 10')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(result['stdout'], 10)
def test_shell_script_action(self):
models = self.fixtures_loader.load_models(
fixtures_pack='localrunner_pack', fixtures_dict={'actions': ['text_gen.yml']})
action_db = models['actions']['text_gen.yml']
entry_point = self.fixtures_loader.get_fixture_file_path_abs(
'localrunner_pack', 'actions', 'text_gen.py')
runner = self._get_runner(action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run({'chars': 1000})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(len(result['stdout']), 1000)
def test_timeout(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
# smaller timeout == faster tests.
runner = self._get_runner(action_db, cmd='sleep 10', timeout=0.01)
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_TIMED_OUT)
def test_large_stdout(self):
models = self.fixtures_loader.load_models(
fixtures_pack='localrunner_pack', fixtures_dict={'actions': ['text_gen.yml']})
action_db = models['actions']['text_gen.yml']
entry_point = self.fixtures_loader.get_fixture_file_path_abs(
'localrunner_pack', 'actions', 'text_gen.py')
runner = self._get_runner(action_db, entry_point=entry_point)
runner.pre_run()
char_count = 10 ** 6 # Note 10^7 succeeds but ends up being slow.
status, result, _ = runner.run({'chars': char_count})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(len(result['stdout']), char_count)
def test_common_st2_env_vars_are_available_to_the_action(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
runner = self._get_runner(action_db, cmd='echo $ST2_ACTION_API_URL')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), get_full_public_api_url())
runner = self._get_runner(action_db, cmd='echo $ST2_ACTION_AUTH_TOKEN')
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), 'mock-token')
def test_sudo_and_env_variable_preservation(self):
# Verify that the environment environment are correctly preserved when running as a
# root / non-system user
# Note: This test will fail if SETENV option is not present in the sudoers file
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local.yaml']})
action_db = models['actions']['local.yaml']
cmd = 'echo `whoami` ; echo ${VAR1}'
env = {'VAR1': 'poniesponies'}
runner = self._get_runner(action_db, cmd=cmd, sudo=True, env=env)
runner.pre_run()
status, result, _ = runner.run({})
runner.post_run(status, result)
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(result['stdout'].strip(), 'root\nponiesponies')
@staticmethod
def _get_runner(action_db,
entry_point=None,
cmd=None,
on_behalf_user=None,
user=None,
kwarg_op=localrunner.DEFAULT_KWARG_OP,
timeout=LOCAL_RUNNER_DEFAULT_ACTION_TIMEOUT,
sudo=False,
env=None):
runner = localrunner.LocalShellRunner(uuid.uuid4().hex)
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction_id = uuid.uuid4().hex
runner.entry_point = entry_point
runner.runner_parameters = {localrunner.RUNNER_COMMAND: cmd,
localrunner.RUNNER_SUDO: sudo,
localrunner.RUNNER_ENV: env,
localrunner.RUNNER_ON_BEHALF_USER: user,
localrunner.RUNNER_KWARG_OP: kwarg_op,
localrunner.RUNNER_TIMEOUT: timeout}
runner.context = dict()
runner.callback = dict()
runner.libs_dir_path = None
runner.auth_token = mock.Mock()
runner.auth_token.token = 'mock-token'
return runner
class LocalShellScriptRunner(TestCase):
fixtures_loader = FixturesLoader()
def test_script_with_paramters_parameter_serialization(self):
models = self.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['local_script_with_params.yaml']})
action_db = models['actions']['local_script_with_params.yaml']
entry_point = os.path.join(get_fixtures_base_path(),
'generic/actions/local_script_with_params.sh')
action_parameters = {
'param_string': 'test string',
'param_integer': 1,
'param_float': 2.55,
'param_boolean': True,
'param_list': ['a', 'b', 'c'],
'param_object': {'foo': 'bar'}
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_STRING=test string' in result['stdout'])
self.assertTrue('PARAM_INTEGER=1' in result['stdout'])
self.assertTrue('PARAM_FLOAT=2.55' in result['stdout'])
self.assertTrue('PARAM_BOOLEAN=1' in result['stdout'])
self.assertTrue('PARAM_LIST=a,b,c' in result['stdout'])
self.assertTrue('PARAM_OBJECT={"foo": "bar"}' in result['stdout'])
action_parameters = {
'param_string': 'test string',
'param_integer': 1,
'param_float': 2.55,
'param_boolean': False,
'param_list': ['a', 'b', 'c'],
'param_object': {'foo': 'bar'}
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_BOOLEAN=0' in result['stdout'])
action_parameters = {
'param_string': '',
'param_integer': None,
'param_float': None,
}
runner = self._get_runner(action_db=action_db, entry_point=entry_point)
runner.pre_run()
status, result, _ = runner.run(action_parameters=action_parameters)
runner.post_run(status, result)
self.assertEqual(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue('PARAM_STRING=\n' in result['stdout'])
self.assertTrue('PARAM_INTEGER=\n' in result['stdout'])
self.assertTrue('PARAM_FLOAT=\n' in result['stdout'])
def _get_runner(self, action_db, entry_point):
runner = localrunner.LocalShellRunner(uuid.uuid4().hex)
runner.container_service = RunnerContainerService()
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction_id = uuid.uuid4().hex
runner.entry_point = entry_point
runner.runner_parameters = {}
runner.context = dict()
runner.callback = dict()
runner.libs_dir_path = None
runner.auth_token = mock.Mock()
runner.auth_token.token = 'mock-token'
return runner
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.