gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
from django.views.generic import CreateView, TemplateView
from django.shortcuts import redirect
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.utils.html import strip_tags
from django.contrib.gis.geos import GEOSGeometry
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from braces.views import LoginRequiredMixin
from geokey.core.decorators import (
handle_exceptions_for_ajax, handle_exceptions_for_admin
)
from geokey.core.exceptions import Unauthenticated
from geokey.users.serializers import UserSerializer
from geokey.users.models import User
from geokey.categories.models import Category
from geokey.contributions.models import Comment, MediaFile
from .base import STATUS
from .models import Project, Admins
from .forms import ProjectCreateForm
from .serializers import ProjectSerializer
# ############################################################################
#
# Administration views
#
# ############################################################################
class ProjectCreate(LoginRequiredMixin, CreateView):
"""
Displays the create project page
`/admin/projects/new`
"""
form_class = ProjectCreateForm
template_name = 'projects/project_create.html'
def form_valid(self, form):
"""
Creates the project and redirects to the project overview page
Parameters
----------
form : geokey.projects.forms.ProjectCreateForm
Represents the user input
"""
data = form.cleaned_data
project = Project.create(
strip_tags(data.get('name')),
strip_tags(data.get('description')),
data.get('isprivate'),
data.get('everyone_contributes'),
self.request.user
)
messages.success(self.request, "The project has been created.")
return redirect('admin:project_overview', project_id=project.id)
class ProjectsInvolved(LoginRequiredMixin, TemplateView):
"""
Displays a list of all projects the user is involved in
"""
template_name = 'projects/projects_involved.html'
def get_context_data(self):
"""
Returns the context to render the view. Overwrites the method to add
the list of projects to the context.
Returns
-------
dict
context
"""
projects = Project.objects.get_list(self.request.user).exclude(
admins=self.request.user)
project_list = []
for project in projects:
project_list.append({
'name': project.name,
'role': project.get_role(self.request.user),
'contributions': project.observations.filter(
creator=self.request.user).count(),
})
return {
'projects': project_list
}
class ProjectOverview(LoginRequiredMixin, TemplateView):
"""
Displays the project overview page
`/admin/projects/:project_id`
"""
model = Project
template_name = 'projects/project_overview.html'
@handle_exceptions_for_admin
def get_context_data(self, project_id):
"""
Returns the context to render the view. Overwrites the method to add
the project, number of contributions and number of user contributions
to the context.
Parameters
----------
project_id : int
identifies the project in the database
Returns
-------
dict
context
"""
user = self.request.user
project = Project.objects.as_admin(user, project_id)
contributions = project.observations.all()
comments = Comment.objects.filter(commentto=contributions).count()
files = MediaFile.objects.filter(contribution=contributions).count()
return {
'project': project,
'allcontributions': contributions.count(),
'contributions': contributions.filter(
creator=self.request.user).count(),
'comments': comments,
'files': files
}
class ProjectExtend(LoginRequiredMixin, TemplateView):
"""
Displays the page to edit the geograhic extent of the project
"""
template_name = 'projects/project_extend.html'
@handle_exceptions_for_admin
def get_context_data(self, project_id):
"""
Returns the context to render the view. Overwrites the method to add
the project to the context.
Parameters
----------
project_id : int
identifies the project in the database
Returns
-------
dict
context
"""
project = Project.objects.as_admin(self.request.user, project_id)
context = super(ProjectExtend, self).get_context_data()
context['project'] = project
return context
def post(self, request, project_id):
"""
Adds or updates the geographic extent of the project.
Parameter
---------
request : django.http.HttpRequest
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
django.http.HttpResponse
Rendered template
"""
data = request.POST
context = self.get_context_data(project_id)
project = context.pop('project', None)
geometry = data.get('geometry')
if project is not None:
if geometry is not None and len(geometry) > 0:
project.geographic_extend = GEOSGeometry(data.get('geometry'))
else:
project.geographic_extend = None
project.save()
messages.success(
self.request,
'The geographic extent has been updated successfully.'
)
context['project'] = project
return self.render_to_response(context)
class ProjectSettings(LoginRequiredMixin, TemplateView):
"""
Displays the project settings page
`/admin/projects/:project_id/settings/`
"""
model = Project
template_name = 'projects/project_settings.html'
@handle_exceptions_for_admin
def get_context_data(self, project_id):
"""
Returns the context to render the view. Overwrites the method to add
the project and status types to the context.
Parameters
----------
project_id : int
identifies the project in the database
Returns
-------
dict
context
"""
project = Project.objects.as_admin(self.request.user, project_id)
return {
'project': project,
'status_types': STATUS
}
def post(self, request, project_id):
"""
Updates the project settings
Parameter
---------
request : django.http.HttpRequest
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
django.http.HttpResponse
Rendered template
"""
context = self.get_context_data(project_id)
project = context.pop('project')
if project is not None:
data = request.POST
project.name = strip_tags(data.get('name'))
project.description = strip_tags(data.get('description'))
project.everyone_contributes = data.get('everyone_contributes')
project.save()
messages.success(self.request, "The project has been updated.")
context['project'] = project
return self.render_to_response(context)
class ProjectDelete(LoginRequiredMixin, TemplateView):
"""
Deletes a project
"""
template_name = 'base.html'
@handle_exceptions_for_admin
def get_context_data(self, project_id, **kwargs):
"""
Returns the context to render the view. Overwrites the method to add
the project and status types to the context.
Parameters
----------
project_id : int
identifies the project in the database
Returns
-------
dict
context
"""
project = Project.objects.as_admin(self.request.user, project_id)
return super(ProjectDelete, self).get_context_data(
project=project, **kwargs)
def get(self, request, project_id):
"""
Deletes the project
Parameter
---------
request : django.http.HttpRequest
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
django.http.HttpResponseRedirect
redirecting to the dashboard
django.http.HttpResponse
If user is not administrator of the project, the error message is
rendered.
"""
context = self.get_context_data(project_id)
project = context.pop('project', None)
if project is not None:
project.delete()
messages.success(self.request, "The project has been deleted.")
return redirect('admin:dashboard')
return self.render_to_response(context)
# ############################################################################
#
# AJAX API views
#
# ############################################################################
class ProjectUpdate(APIView):
"""
AJAX Endpoint for a project update.
/ajax/projects/:project_id
"""
@handle_exceptions_for_ajax
def put(self, request, project_id):
"""
Updates a project.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Response containing the serialised project or an error message
"""
project = Project.objects.as_admin(request.user, project_id)
serializer = ProjectSerializer(
project, data=request.DATA, partial=True,
fields=(
'id', 'name', 'description', 'status', 'isprivate',
'everyone_contributes'
)
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProjectAdmins(APIView):
"""
AJAX Endpoint for project administrators.
/ajax/projects/:project_id/admins
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Adds a user to the admin group.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Response containing the serialised list of admins or an error
message.
"""
project = Project.objects.as_admin(request.user, project_id)
user_id = request.DATA.get('userId')
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return Response(
'The user you are trying to add to the user group does ' +
'not exist',
status=status.HTTP_400_BAD_REQUEST
)
Admins.objects.create(project=project, user=user)
serializer = UserSerializer(project.admins.all(), many=True)
return Response(
{'users': serializer.data}, status=status.HTTP_201_CREATED)
class ProjectAdminsUser(APIView):
"""
AJAX Endpoint for a single project administrator.
/ajax/projects/:project_id/admins
"""
@handle_exceptions_for_ajax
def delete(self, request, project_id, user_id):
"""
Removes a user from the user group.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Empty response if successful or response containing an error
message.
"""
project = Project.objects.as_admin(request.user, project_id)
user = project.admins.get(pk=user_id)
Admins.objects.get(project=project, user=user).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class CategoriesReorderView(APIView):
"""
AJAX Endpoint to re-order categories in a project.
/ajax/projects/:project_id/cotegories/re-order
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Reorders the cateories in the project.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Contains the serialised project or an error message
"""
project = Project.objects.as_admin(request.user, project_id)
try:
project.re_order_categories(request.DATA.get('order'))
serializer = ProjectSerializer(
project,
fields=(
'id', 'name', 'description', 'status', 'isprivate',
'everyone_contributes'
)
)
return Response(serializer.data)
except Category.DoesNotExist:
return Response(
{'error': 'One or more categories ids where not found.'},
status=status.HTTP_400_BAD_REQUEST
)
# ############################################################################
#
# Public API views
#
# ############################################################################
class Projects(APIView):
"""
API Endpoint for project list in the public API.
/api/projects/
"""
@handle_exceptions_for_ajax
def get(self, request):
"""
Returns a list a all projects accessible to the user.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
Returns
-------
rest_framework.reponse.Response
Contains serialised list of projects
"""
projects = Project.objects.get_list(
request.user).filter(status='active')
serializer = ProjectSerializer(
projects, many=True, context={'user': request.user},
fields=('id', 'name', 'description', 'user_info')
)
return Response(serializer.data)
class SingleProject(APIView):
"""
API Endpoint for single project in the public API.
/api/projects/:project_id/
"""
@handle_exceptions_for_ajax
def get(self, request, project_id):
"""
Returns a single project.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Contains the serialised project
Raises
------
PermissionDenied
if the project is inactive, is handled in the
handle_exceptions_for_ajax decorator
"""
project = Project.objects.get_single(request.user, project_id)
if project.status == 'active':
serializer = ProjectSerializer(
project, context={'user': request.user}
)
return Response(serializer.data)
raise PermissionDenied('The project is inactive and therefore '
'not accessable through the public API.')
class ProjectContactAdmins(APIView):
"""
API Endpoint for single project in the public API.
/api/projects/:project_id/get-in-touch/
"""
@handle_exceptions_for_ajax
def post(self, request, project_id):
"""
Sends an email to all admins that are contact persons for the given
project.
Parameter
---------
request : rest_framework.request.Request
Object representing the request.
project_id : int
identifies the project in the database
Returns
-------
rest_framework.reponse.Response
Empty reponse indicating success
Raises
------
Unauthenticated
if the user is anonymous; is handled in the
handle_exceptions_for_ajax decorator
"""
user = request.user
if user.is_anonymous():
raise Unauthenticated('Unauthenticated users can not contact the '
'administrators of the project.')
email_text = self.request.DATA.get('email_text')
project = Project.objects.get_single(request.user, project_id)
project.contact_admins(user, email_text)
return Response(status=status.HTTP_204_NO_CONTENT)
|
|
import pytz
from django.contrib.auth.models import User
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.http import Http404
from django.utils.datastructures import SortedDict
from django.utils.html import conditional_escape
from django.utils.translation import ugettext_lazy as _
from djblets.datagrid.grids import Column, DateTimeColumn, \
DateTimeSinceColumn, DataGrid
from djblets.util.templatetags.djblets_utils import ageid
from reviewboard.accounts.models import Profile
from reviewboard.reviews.models import Group, ReviewRequest
from reviewboard.reviews.templatetags.reviewtags import render_star
from reviewboard.site.urlresolvers import local_site_reverse
class StarColumn(Column):
"""
A column used to indicate whether the object is "starred" or watched.
The star is interactive, allowing the user to star or unstar the object.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.image_url = static("rb/images/star_on.png")
self.image_width = 16
self.image_height = 15
self.image_alt = _("Starred")
self.detailed_label = _("Starred")
self.shrink = True
self.all_starred = {}
def render_data(self, obj):
obj.starred = self.all_starred.get(obj.id, False)
return render_star(self.datagrid.request.user, obj)
class ReviewGroupStarColumn(StarColumn):
"""
A specialization of StarColumn that augments the SQL query to include
the starred calculation for review groups.
"""
def augment_queryset(self, queryset):
user = self.datagrid.request.user
if user.is_anonymous():
return queryset
try:
profile = user.get_profile()
except Profile.DoesNotExist:
return queryset
pks = profile.starred_groups.filter(
pk__in=self.datagrid.id_list).values_list('pk', flat=True)
self.all_starred = {}
for pk in pks:
self.all_starred[pk] = True
return queryset
class ReviewRequestStarColumn(StarColumn):
"""
A specialization of StarColumn that augments the SQL query to include
the starred calculation for review requests.
"""
def augment_queryset(self, queryset):
user = self.datagrid.request.user
if user.is_anonymous():
return queryset
try:
profile = user.get_profile()
except Profile.DoesNotExist:
return queryset
pks = profile.starred_review_requests.filter(
pk__in=self.datagrid.id_list).values_list('pk', flat=True)
self.all_starred = {}
for pk in pks:
self.all_starred[pk] = True
return queryset
class ShipItColumn(Column):
"""
A column used to indicate whether someone has marked this review request
as "Ship It!"
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.image_url = static("rb/images/shipit.png")
self.image_width = 16
self.image_height = 16
self.image_alt = _("Ship It!")
self.detailed_label = _("Ship It!")
self.db_field = "shipit_count"
self.sortable = True
self.shrink = True
def render_data(self, review_request):
if review_request.shipit_count > 0:
return '<span class="shipit-count">' \
'<img src="%s" width="9" height="8" alt="%s" ' \
'title="%s" /> %s' \
'</span>' % \
(static("rb/images/shipit_checkmark.png"),
self.image_alt, self.image_alt, review_request.shipit_count)
return ""
class MyCommentsColumn(Column):
"""
A column meant to represent the status of the logged-in user's
comments on the review.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.image_url = static("rb/images/comment-draft-small.png")
self.image_width = 16
self.image_height = 16
self.image_alt = _("My Comments")
self.detailed_label = _("My Comments")
self.shrink = True
# XXX It'd be nice to be able to sort on this, but datagrids currently
# can only sort based on stored (in the DB) values, not computed values.
def augment_queryset(self, queryset):
user = self.datagrid.request.user
if user.is_anonymous():
return queryset
query_dict = {
'user_id': str(user.id),
}
return queryset.extra(select={
'mycomments_my_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
""" % query_dict,
'mycomments_private_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND NOT reviews_review.public
""" % query_dict,
'mycomments_shipit_reviews': """
SELECT COUNT(1)
FROM reviews_review
WHERE reviews_review.user_id = %(user_id)s
AND reviews_review.review_request_id =
reviews_reviewrequest.id
AND reviews_review.ship_it
""" % query_dict,
})
def render_data(self, review_request):
user = self.datagrid.request.user
if user.is_anonymous() or review_request.mycomments_my_reviews == 0:
return ""
image_url = None
image_alt = None
# Priority is ranked in the following order:
#
# 1) Non-public (draft) reviews
# 2) Public reviews marked "Ship It"
# 3) Public reviews not marked "Ship It"
if review_request.mycomments_private_reviews > 0:
image_url = self.image_url
image_alt = _("Comments drafted")
else:
if review_request.mycomments_shipit_reviews > 0:
image_url = static("rb/images/comment-shipit-small.png")
image_alt = _("Comments published. Ship it!")
else:
image_url = static("rb/images/comment-small.png")
image_alt = _("Comments published")
return '<img src="%s" width="%s" height="%s" alt="%s" ' \
'title="%s" />' % \
(image_url, self.image_width, self.image_height,
image_alt, image_alt)
class ToMeColumn(Column):
"""
A column used to indicate whether the current logged-in user is targeted
by the review request.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.label = u"\u00BB" # this is »
self.detailed_label = u"\u00BB To Me"
self.shrink = True
def render_data(self, review_request):
user = self.datagrid.request.user
if (user.is_authenticated() and
review_request.target_people.filter(pk=user.pk).exists()):
return '<div title="%s"><b>»</b></div>' % \
(self.detailed_label)
return ""
class NewUpdatesColumn(Column):
"""
A column used to indicate whether the review request has any new updates
since the user last saw it.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.image_url = static("rb/images/convo.png")
self.image_width = 18
self.image_height = 16
self.image_alt = "New Updates"
self.detailed_label = "New Updates"
self.shrink = True
def render_data(self, review_request):
if review_request.new_review_count > 0:
return '<img src="%s" width="%s" height="%s" alt="%s" ' \
'title="%s" />' % \
(self.image_url, self.image_width, self.image_height,
self.image_alt, self.image_alt)
return ""
class SummaryColumn(Column):
"""
A column used to display a summary of the review request, along with
labels indicating if it's a draft or if it's submitted.
"""
def __init__(self, label=_("Summary"), *args, **kwargs):
Column.__init__(self, label=label, *args, **kwargs)
self.sortable = True
def augment_queryset(self, queryset):
user = self.datagrid.request.user
if user.is_anonymous():
return queryset
return queryset.extra(select={
'draft_summary': """
SELECT reviews_reviewrequestdraft.summary
FROM reviews_reviewrequestdraft
WHERE reviews_reviewrequestdraft.review_request_id =
reviews_reviewrequest.id
"""
})
def render_data(self, review_request):
summary = conditional_escape(review_request.summary)
labels = {}
if not summary:
summary = ' <i>%s</i>' % _('No Summary')
if review_request.submitter_id == self.datagrid.request.user.id:
if review_request.draft_summary is not None:
summary = conditional_escape(review_request.draft_summary)
labels.update({_('Draft'): 'label-draft'})
elif (not review_request.public and
review_request.status == ReviewRequest.PENDING_REVIEW):
labels.update({_('Draft'): 'label-draft'})
if review_request.status == ReviewRequest.SUBMITTED:
labels.update({_('Submitted'): 'label-submitted'})
elif review_request.status == ReviewRequest.DISCARDED:
labels.update({_('Discarded'): 'label-discarded'})
display_data = ''
for label in labels:
display_data += u'<span class="%s">[%s] </span>' % (
labels[label], label)
display_data += u'%s' % summary
return display_data
class SubmitterColumn(Column):
def __init__(self, *args, **kwargs):
Column.__init__(self, _("Submitter"), db_field="submitter__username",
shrink=True, sortable=True, link=True,
*args, **kwargs)
def augment_queryset(self, queryset):
return queryset.select_related('submitter')
class RepositoryColumn(Column):
def __init__(self, *args, **kwargs):
Column.__init__(self, _("Repository"), db_field="repository__name",
shrink=True, sortable=True, link=False,
css_class='repository-column',
*args, **kwargs)
def augment_queryset(self, queryset):
return queryset.select_related('repository')
class PendingCountColumn(Column):
"""
A column used to show the pending number of review requests for a
group or user.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
def render_data(self, obj):
return str(getattr(obj, self.field_name).filter(public=True,
status='P').count())
class PeopleColumn(Column):
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.label = _("People")
self.detailed_label = _("Target People")
self.sortable = False
self.shrink = False
def render_data(self, review_request):
people = review_request.target_people.all()
return reduce(lambda a, d: a + d.username + ' ', people, '')
class GroupsColumn(Column):
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.label = _("Groups")
self.detailed_label = _("Target Groups")
self.sortable = False
self.shrink = False
def render_data(self, review_request):
groups = review_request.target_groups.all()
return reduce(lambda a, d: a + d.name + ' ', groups, '')
class GroupMemberCountColumn(Column):
"""
A column used to show the number of users that registered for a group.
"""
def __init__(self, *args, **kwargs):
Column.__init__(self, *args, **kwargs)
self.link = True
self.link_func = self.link_to_object
def render_data(self, group):
return str(group.users.count())
def link_to_object(self, group, value):
return local_site_reverse('group_members',
request=self.datagrid.request,
args=[group.name])
class ReviewCountColumn(Column):
"""
A column showing the number of reviews for a review request.
"""
def __init__(self, label=_("Reviews"),
detailed_label=_("Number of Reviews"),
*args, **kwargs):
Column.__init__(self, label=label, detailed_label=detailed_label,
*kwargs, **kwargs)
self.shrink = True
self.link = True
self.link_func = self.link_to_object
def render_data(self, review_request):
return str(review_request.publicreviewcount_count)
def augment_queryset(self, queryset):
return queryset.extra(select={
'publicreviewcount_count': """
SELECT COUNT(*)
FROM reviews_review
WHERE reviews_review.public
AND reviews_review.base_reply_to_id is NULL
AND reviews_review.review_request_id =
reviews_reviewrequest.id
"""
})
def link_to_object(self, review_request, value):
return "%s#last-review" % review_request.get_absolute_url()
class DiffUpdatedColumn(DateTimeColumn):
"""
A column indicating the date and time the diff was last updated.
"""
def __init__(self, *args, **kwargs):
DateTimeColumn.__init__(self, _("Diff Updated"),
db_field="diffset_history__last_diff_updated",
field_name='last_diff_updated', sortable=True, link=False,
*args, **kwargs)
def augment_queryset(self, queryset):
return queryset.select_related('diffset_history')
def render_data(self, obj):
if obj.diffset_history.last_diff_updated:
return DateTimeColumn.render_data(self, obj.diffset_history)
else:
return ""
class DiffUpdatedSinceColumn(DateTimeSinceColumn):
"""
A column indicating the elapsed time since the diff was last
updated.
"""
def __init__(self, *args, **kwargs):
DateTimeSinceColumn.__init__(self, _("Diff Updated"),
db_field="diffset_history__last_diff_updated",
field_name='last_diff_updated', sortable=True, link=False,
*args, **kwargs)
def augment_queryset(self, queryset):
return queryset.select_related('diffset_history')
def render_data(self, obj):
if obj.diffset_history.last_diff_updated:
return DateTimeSinceColumn.render_data(self, obj.diffset_history)
else:
return ""
class ReviewRequestDataGrid(DataGrid):
"""
A datagrid that displays a list of review requests.
This datagrid accepts the show_submitted parameter in the URL, allowing
submitted review requests to be filtered out or displayed.
"""
star = ReviewRequestStarColumn()
ship_it = ShipItColumn()
summary = SummaryColumn(expand=True, link=True, css_class="summary")
submitter = SubmitterColumn()
branch = Column(_("Branch"), db_field="branch",
shrink=True, sortable=True, link=False)
bugs_closed = Column(_("Bugs"), db_field="bugs_closed",
shrink=True, sortable=False, link=False)
repository = RepositoryColumn()
time_added = DateTimeColumn(_("Posted"),
detailed_label=_("Posted Time"),
format="F jS, Y, P", shrink=True,
css_class=lambda r: ageid(r.time_added))
last_updated = DateTimeColumn(_("Last Updated"),
format="F jS, Y, P", shrink=True,
db_field="last_updated",
field_name="last_updated",
css_class=lambda r: ageid(r.last_updated))
diff_updated = DiffUpdatedColumn(format="F jS, Y, P", shrink=True,
css_class=lambda r: ageid(r.diffset_history.last_diff_updated))
time_added_since = DateTimeSinceColumn(_("Posted"),
detailed_label=_("Posted Time (Relative)"),
field_name="time_added", shrink=True,
css_class=lambda r: ageid(r.time_added))
last_updated_since = DateTimeSinceColumn(_("Last Updated"),
detailed_label=_("Last Updated (Relative)"), shrink=True,
db_field="last_updated",
field_name="last_updated",
css_class=lambda r: ageid(r.last_updated))
diff_updated_since = DiffUpdatedSinceColumn(
detailed_label=_("Diff Updated (Relative)"), shrink=True,
css_class=lambda r: ageid(r.diffset_history.last_diff_updated))
review_count = ReviewCountColumn()
target_groups = GroupsColumn()
target_people = PeopleColumn()
to_me = ToMeColumn()
review_id = Column(_("Review ID"),
shrink=True, sortable=True, link=True)
def __init__(self, *args, **kwargs):
self.local_site = kwargs.pop('local_site', None)
if self.local_site:
review_id_field = 'local_id'
else:
review_id_field = 'pk'
self.review_id = Column(_("Review ID"),
field_name=review_id_field,
shrink=True, sortable=True, link=True)
DataGrid.__init__(self, *args, **kwargs)
self.listview_template = 'reviews/review_request_listview.html'
self.profile_sort_field = 'sort_review_request_columns'
self.profile_columns_field = 'review_request_columns'
self.show_submitted = True
self.submitter_url_name = "user"
self.default_sort = ["-last_updated"]
self.default_columns = [
"star", "summary", "submitter", "time_added", "last_updated_since"
]
# Add local timezone info to the columns
user = self.request.user
if user.is_authenticated():
self.timezone = pytz.timezone(user.get_profile().timezone)
self.time_added.timezone = self.timezone
self.last_updated.timezone = self.timezone
self.diff_updated.timezone = self.timezone
def load_extra_state(self, profile):
if profile:
self.show_submitted = profile.show_submitted
try:
self.show_submitted = \
int(self.request.GET.get('show_submitted',
self.show_submitted)) != 0
except ValueError:
# do nothing
pass
if self.show_submitted:
# There are only three states: Published, Submitted and Discarded.
# We want the first two, but it's faster to just search for not
# discarded.
self.queryset = self.queryset.exclude(status='D')
else:
self.queryset = self.queryset.filter(status='P')
self.queryset = self.queryset.filter(local_site=self.local_site)
if profile and self.show_submitted != profile.show_submitted:
profile.show_submitted = self.show_submitted
return True
return False
def post_process_queryset(self, queryset):
q = queryset.with_counts(self.request.user)
return super(ReviewRequestDataGrid, self).post_process_queryset(q)
def link_to_object(self, obj, value):
if value and isinstance(value, User):
return local_site_reverse("user", request=self.request,
args=[value])
return obj.get_absolute_url()
class DashboardDataGrid(ReviewRequestDataGrid):
"""
A version of the ReviewRequestDataGrid that displays additional fields
useful in the dashboard. It also displays a different set of data
depending on the view that was passed.
"""
new_updates = NewUpdatesColumn()
my_comments = MyCommentsColumn()
def __init__(self, *args, **kwargs):
local_site = kwargs.pop('local_site', None)
ReviewRequestDataGrid.__init__(self, *args, **kwargs)
self.listview_template = 'datagrid/listview.html'
self.profile_sort_field = 'sort_dashboard_columns'
self.profile_columns_field = 'dashboard_columns'
self.default_view = "incoming"
self.show_submitted = False
self.default_sort = ["-last_updated"]
self.default_columns = [
"new_updates", "star", "summary", "submitter",
"time_added", "last_updated_since"
]
self.counts = {}
group = self.request.GET.get('group', None)
view = self.request.GET.get('view', None)
extra_query = []
if view:
extra_query.append("view=%s" % view)
if group:
extra_query.append("group=%s" % group)
self.extra_context['extra_query'] = "&".join(extra_query)
self.local_site = local_site
def load_extra_state(self, profile):
group = self.request.GET.get('group', '')
view = self.request.GET.get('view', self.default_view)
user = self.request.user
if view == 'outgoing':
self.queryset = ReviewRequest.objects.from_user(
user, user, local_site=self.local_site)
self.title = _(u"All Outgoing Review Requests")
elif view == 'mine':
self.queryset = ReviewRequest.objects.from_user(
user, user, None, local_site=self.local_site)
self.title = _(u"All My Review Requests")
elif view == 'to-me':
self.queryset = ReviewRequest.objects.to_user_directly(
user, user, local_site=self.local_site)
self.title = _(u"Incoming Review Requests to Me")
elif view == 'to-group':
if group != "":
# to-group is special because we want to make sure that the
# group exists and show a 404 if it doesn't. Otherwise, we'll
# show an empty datagrid with the name.
if not Group.objects.filter(name=group,
local_site=self.local_site).exists():
raise Http404
self.queryset = ReviewRequest.objects.to_group(
group, self.local_site, user)
self.title = _(u"Incoming Review Requests to %s") % group
else:
self.queryset = ReviewRequest.objects.to_user_groups(
user, user, local_site=self.local_site)
self.title = _(u"All Incoming Review Requests to My Groups")
elif view == 'starred':
profile = user.get_profile()
self.queryset = profile.starred_review_requests.public(
user, local_site=self.local_site)
self.title = _(u"Starred Review Requests")
elif view == 'incoming':
self.queryset = ReviewRequest.objects.to_user(
user, user, local_site=self.local_site)
self.title = _(u"All Incoming Review Requests")
else:
raise Http404
# Pre-load all querysets for the sidebar.
self.counts = get_sidebar_counts(user, self.local_site)
return False
class SubmitterDataGrid(DataGrid):
"""
A datagrid showing a list of submitters.
"""
username = Column(_("Username"), link=True, sortable=True)
fullname = Column(_("Full Name"), field_name="get_full_name",
link=True, expand=True)
pending_count = PendingCountColumn(_("Pending Reviews"),
field_name="directed_review_requests",
shrink=True)
def __init__(self, request,
queryset=User.objects.filter(is_active=True),
title=_("All submitters"),
local_site=None):
if local_site:
qs = queryset.filter(local_site=local_site)
else:
qs = queryset
DataGrid.__init__(self, request, qs, title)
self.default_sort = ["username"]
self.profile_sort_field = 'sort_submitter_columns'
self.profile_columns_field = 'submitter_columns'
self.default_columns = [
"username", "fullname", "pending_count"
]
def link_to_object(self, obj, value):
return local_site_reverse("user", request=self.request,
args=[obj.username])
class GroupDataGrid(DataGrid):
"""
A datagrid showing a list of review groups.
"""
star = ReviewGroupStarColumn()
name = Column(_("Group ID"), link=True, sortable=True)
displayname = Column(_("Group Name"), field_name="display_name",
link=True, expand=True)
pending_count = PendingCountColumn(_("Pending Reviews"),
field_name="review_requests",
link=True,
shrink=True)
member_count = GroupMemberCountColumn(_("Members"),
field_name="members",
shrink=True)
def __init__(self, request, title=_("All groups"), *args, **kwargs):
local_site = kwargs.pop('local_site', None)
queryset = Group.objects.accessible(request.user, local_site=local_site)
DataGrid.__init__(self, request, queryset=queryset, title=title,
*args, **kwargs)
self.profile_sort_field = 'sort_group_columns'
self.profile_columns_field = 'group_columns'
self.default_sort = ["name"]
self.default_columns = [
"star", "name", "displayname", "pending_count"
]
@staticmethod
def link_to_object(obj, value):
return obj.get_absolute_url()
class WatchedGroupDataGrid(GroupDataGrid):
"""
A special version of GroupDataGrid that shows a list of watched groups,
linking to a dashboard view of them. This is meant for display in the
dashboard.
"""
def __init__(self, request, title=_("Watched groups"), *args, **kwargs):
local_site = kwargs.pop('local_site', None)
GroupDataGrid.__init__(self, request, title=title, *args, **kwargs)
user = request.user
profile = user.get_profile()
self.queryset = profile.starred_groups.all()
self.queryset = self.queryset.filter(local_site=local_site)
# Pre-load all querysets for the sidebar.
self.counts = get_sidebar_counts(user, local_site)
def link_to_object(self, group, value):
return ".?view=to-group&group=%s" % group.name
def get_sidebar_counts(user, local_site):
"""Returns counts used for the Dashboard sidebar."""
profile = user.get_profile()
site_profile, is_new = user.get_profile().site_profiles.get_or_create(
local_site=local_site,
user=user,
profile=profile)
if is_new:
site_profile.save()
counts = {
'outgoing': site_profile.pending_outgoing_request_count,
'incoming': site_profile.total_incoming_request_count,
'to-me': site_profile.direct_incoming_request_count,
'starred': site_profile.starred_public_request_count,
'mine': site_profile.total_outgoing_request_count,
'groups': SortedDict(),
'starred_groups': SortedDict(),
}
for group in Group.objects.filter(
users=user, local_site=local_site).order_by('name'):
counts['groups'][group.name] = group.incoming_request_count
for group in Group.objects.filter(
starred_by=user, local_site=local_site).order_by('name'):
counts['starred_groups'][group.name] = group.incoming_request_count
return counts
|
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
class Bunch(dict):
""" Container object for datasets: dictionnary-like object that
exposes its keys as attributes.
"""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
class BunchConst(Bunch):
"""Class to prevent us from re-defining constants (DRY)"""
def __setattr__(self, attr, val):
if attr != '__dict__' and hasattr(self, attr):
raise AttributeError('Attribute "%s" already set' % attr)
super(BunchConst, self).__setattr__(attr, val)
FIFF = BunchConst()
#
# FIFF version number in use
#
FIFF.FIFFC_MAJOR_VERSION = 1
FIFF.FIFFC_MINOR_VERSION = 3
FIFF.FIFFC_VERSION = FIFF.FIFFC_MAJOR_VERSION << 16 | FIFF.FIFFC_MINOR_VERSION
#
# Blocks
#
FIFF.FIFFB_ROOT = 999
FIFF.FIFFB_MEAS = 100
FIFF.FIFFB_MEAS_INFO = 101
FIFF.FIFFB_RAW_DATA = 102
FIFF.FIFFB_PROCESSED_DATA = 103
FIFF.FIFFB_EVOKED = 104
FIFF.FIFFB_ASPECT = 105
FIFF.FIFFB_SUBJECT = 106
FIFF.FIFFB_ISOTRAK = 107
FIFF.FIFFB_HPI_MEAS = 108
FIFF.FIFFB_HPI_RESULT = 109
FIFF.FIFFB_HPI_COIL = 110
FIFF.FIFFB_PROJECT = 111
FIFF.FIFFB_CONTINUOUS_DATA = 112
FIFF.FIFFB_VOID = 114
FIFF.FIFFB_EVENTS = 115
FIFF.FIFFB_INDEX = 116
FIFF.FIFFB_DACQ_PARS = 117
FIFF.FIFFB_REF = 118
FIFF.FIFFB_SMSH_RAW_DATA = 119
FIFF.FIFFB_SMSH_ASPECT = 120
FIFF.FIFFB_HPI_SUBSYSTEM = 121
FIFF.FIFFB_SPHERE = 300 # Concentric sphere model related
FIFF.FIFFB_BEM = 310 # Boundary-element method
FIFF.FIFFB_BEM_SURF = 311 # Boundary-element method surfaces
FIFF.FIFFB_CONDUCTOR_MODEL = 312 # One conductor model definition
FIFF.FIFFB_PROJ = 313
FIFF.FIFFB_PROJ_ITEM = 314
FIFF.FIFFB_MRI = 200
FIFF.FIFFB_MRI_SET = 201
FIFF.FIFFB_MRI_SLICE = 202
FIFF.FIFFB_MRI_SCENERY = 203 # These are for writing unrelated 'slices'
FIFF.FIFFB_MRI_SCENE = 204 # Which are actually 3D scenes...
FIFF.FIFFB_MRI_SEG = 205 # MRI segmentation data
FIFF.FIFFB_MRI_SEG_REGION = 206 # One MRI segmentation region
FIFF.FIFFB_PROCESSING_HISTORY = 900
FIFF.FIFFB_PROCESSING_RECORD = 901
FIFF.FIFFB_DATA_CORRECTION = 500
FIFF.FIFFB_CHANNEL_DECOUPLER = 501
FIFF.FIFFB_SSS_INFO = 502
FIFF.FIFFB_SSS_CAL = 503
FIFF.FIFFB_SSS_ST_INFO = 504
FIFF.FIFFB_SSS_BASES = 505
FIFF.FIFFB_SMARTSHIELD = 510
#
# Of general interest
#
FIFF.FIFF_FILE_ID = 100
FIFF.FIFF_DIR_POINTER = 101
FIFF.FIFF_BLOCK_ID = 103
FIFF.FIFF_BLOCK_START = 104
FIFF.FIFF_BLOCK_END = 105
FIFF.FIFF_FREE_LIST = 106
FIFF.FIFF_FREE_BLOCK = 107
FIFF.FIFF_NOP = 108
FIFF.FIFF_PARENT_FILE_ID = 109
FIFF.FIFF_PARENT_BLOCK_ID = 110
FIFF.FIFF_BLOCK_NAME = 111
FIFF.FIFF_BLOCK_VERSION = 112
FIFF.FIFF_CREATOR = 113 # Program that created the file (string)
FIFF.FIFF_MODIFIER = 114 # Program that modified the file (string)
FIFF.FIFF_REF_ROLE = 115
FIFF.FIFF_REF_FILE_ID = 116
FIFF.FIFF_REF_FILE_NUM = 117
FIFF.FIFF_REF_FILE_NAME = 118
#
# Megacq saves the parameters in these tags
#
FIFF.FIFF_DACQ_PARS = 150
FIFF.FIFF_DACQ_STIM = 151
FIFF.FIFF_NCHAN = 200
FIFF.FIFF_SFREQ = 201
FIFF.FIFF_DATA_PACK = 202
FIFF.FIFF_CH_INFO = 203
FIFF.FIFF_MEAS_DATE = 204
FIFF.FIFF_SUBJECT = 205
FIFF.FIFF_COMMENT = 206
FIFF.FIFF_NAVE = 207
FIFF.FIFF_FIRST_SAMPLE = 208 # The first sample of an epoch
FIFF.FIFF_LAST_SAMPLE = 209 # The last sample of an epoch
FIFF.FIFF_ASPECT_KIND = 210
FIFF.FIFF_REF_EVENT = 211
FIFF.FIFF_EXPERIMENTER = 212
FIFF.FIFF_DIG_POINT = 213
FIFF.FIFF_CH_POS = 214
FIFF.FIFF_HPI_SLOPES = 215
FIFF.FIFF_HPI_NCOIL = 216
FIFF.FIFF_REQ_EVENT = 217
FIFF.FIFF_REQ_LIMIT = 218
FIFF.FIFF_LOWPASS = 219
FIFF.FIFF_BAD_CHS = 220
FIFF.FIFF_ARTEF_REMOVAL = 221
FIFF.FIFF_COORD_TRANS = 222
FIFF.FIFF_HIGHPASS = 223
FIFF.FIFF_CH_CALS = 22 # This will not occur in new files
FIFF.FIFF_HPI_BAD_CHS = 225 # List of channels considered to be bad in hpi
FIFF.FIFF_HPI_CORR_COEFF = 226 # Hpi curve fit correlations
FIFF.FIFF_EVENT_COMMENT = 227 # Comment about the events used in averaging
FIFF.FIFF_NO_SAMPLES = 228 # Number of samples in an epoch
FIFF.FIFF_FIRST_TIME = 229 # Time scale minimum
FIFF.FIFF_SUBAVE_SIZE = 230 # Size of a subaverage
FIFF.FIFF_SUBAVE_FIRST = 231 # The first epoch # contained in the subaverage
FIFF.FIFF_NAME = 233 # Intended to be a short name.
FIFF.FIFF_DESCRIPTION = FIFF.FIFF_COMMENT # (Textual) Description of an object
FIFF.FIFF_DIG_STRING = 234 # String of digitized points
FIFF.FIFF_LINE_FREQ = 235 # Line frequency
#
# HPI fitting program tags
#
FIFF.FIFF_HPI_COIL_FREQ = 236 # HPI coil excitation frequency
FIFF.FIFF_HPI_COIL_MOMENTS = 240 # Estimated moment vectors for the HPI coil magnetic dipoles
FIFF.FIFF_HPI_FIT_GOODNESS = 241 # Three floats indicating the goodness of fit
FIFF.FIFF_HPI_FIT_ACCEPT = 242 # Bitmask indicating acceptance (see below)
FIFF.FIFF_HPI_FIT_GOOD_LIMIT = 243 # Limit for the goodness-of-fit
FIFF.FIFF_HPI_FIT_DIST_LIMIT = 244 # Limit for the coil distance difference
FIFF.FIFF_HPI_COIL_NO = 245 # Coil number listed by HPI measurement
FIFF.FIFF_HPI_COILS_USED = 246 # List of coils finally used when the transformation was computed
FIFF.FIFF_HPI_DIGITIZATION_ORDER = 247 # Which Isotrak digitization point corresponds to each of the coils energized
#
# Pointers
#
FIFF.FIFFV_NEXT_SEQ = 0
FIFF.FIFFV_NEXT_NONE = -1
#
# Channel types
#
FIFF.FIFFV_MEG_CH = 1
FIFF.FIFFV_REF_MEG_CH = 301
FIFF.FIFFV_EEG_CH = 2
FIFF.FIFFV_MCG_CH = 201
FIFF.FIFFV_STIM_CH = 3
FIFF.FIFFV_EOG_CH = 202
FIFF.FIFFV_EMG_CH = 302
FIFF.FIFFV_ECG_CH = 402
FIFF.FIFFV_MISC_CH = 502
FIFF.FIFFV_RESP_CH = 602 # Respiration monitoring
FIFF.FIFFV_SEEG_CH = 802 # stereotactic EEG
FIFF.FIFFV_SYST_CH = 900 # some system status information (on Triux systems only)
FIFF.FIFFV_IAS_CH = 910 # Internal Active Shielding data (maybe on Triux only)
FIFF.FIFFV_EXCI_CH = 920 # flux excitation channel used to be a stimulus channel
#
# Quaternion channels for head position monitoring
#
FIFF.FIFFV_QUAT_0 = 700 # Quaternion param q0 obsolete for unit quaternion
FIFF.FIFFV_QUAT_1 = 701 # Quaternion param q1 rotation
FIFF.FIFFV_QUAT_2 = 702 # Quaternion param q2 rotation
FIFF.FIFFV_QUAT_3 = 703 # Quaternion param q3 rotation
FIFF.FIFFV_QUAT_4 = 704 # Quaternion param q4 translation
FIFF.FIFFV_QUAT_5 = 705 # Quaternion param q5 translation
FIFF.FIFFV_QUAT_6 = 706 # Quaternion param q6 translation
FIFF.FIFFV_HPI_G = 707 # Goodness-of-fit in continuous hpi
FIFF.FIFFV_HPI_ERR = 708 # Estimation error in continuous hpi
FIFF.FIFFV_HPI_MOV = 709 # Estimated head movement speed in continuous hpi
#
# Coordinate frames
#
FIFF.FIFFV_COORD_UNKNOWN = 0
FIFF.FIFFV_COORD_DEVICE = 1
FIFF.FIFFV_COORD_ISOTRAK = 2
FIFF.FIFFV_COORD_HPI = 3
FIFF.FIFFV_COORD_HEAD = 4
FIFF.FIFFV_COORD_MRI = 5
FIFF.FIFFV_COORD_MRI_SLICE = 6
FIFF.FIFFV_COORD_MRI_DISPLAY = 7
FIFF.FIFFV_COORD_DICOM_DEVICE = 8
FIFF.FIFFV_COORD_IMAGING_DEVICE = 9
#
# Needed for raw and evoked-response data
#
FIFF.FIFF_DATA_BUFFER = 300 # Buffer containing measurement data
FIFF.FIFF_DATA_SKIP = 301 # Data skip in buffers
FIFF.FIFF_EPOCH = 302 # Buffer containing one epoch and channel
FIFF.FIFF_DATA_SKIP_SAMP = 303 # Data skip in samples
#
# Info on subject
#
FIFF.FIFF_SUBJ_ID = 400 # Subject ID
FIFF.FIFF_SUBJ_FIRST_NAME = 401 # First name of the subject
FIFF.FIFF_SUBJ_MIDDLE_NAME = 402 # Middle name of the subject
FIFF.FIFF_SUBJ_LAST_NAME = 403 # Last name of the subject
FIFF.FIFF_SUBJ_BIRTH_DAY = 404 # Birthday of the subject
FIFF.FIFF_SUBJ_SEX = 405 # Sex of the subject
FIFF.FIFF_SUBJ_HAND = 406 # Handedness of the subject
FIFF.FIFF_SUBJ_WEIGHT = 407 # Weight of the subject
FIFF.FIFF_SUBJ_HEIGHT = 408 # Height of the subject
FIFF.FIFF_SUBJ_COMMENT = 409 # Comment about the subject
FIFF.FIFF_SUBJ_HIS_ID = 410 # ID used in the Hospital Information System
FIFF.FIFF_PROJ_ID = 500
FIFF.FIFF_PROJ_NAME = 501
FIFF.FIFF_PROJ_AIM = 502
FIFF.FIFF_PROJ_PERSONS = 503
FIFF.FIFF_PROJ_COMMENT = 504
FIFF.FIFF_EVENT_CHANNELS = 600 # Event channel numbers
FIFF.FIFF_EVENT_LIST = 601 # List of events (integers: <sample before after>
FIFF.FIFF_EVENT_CHANNEL = 602 # Event channel
FIFF.FIFF_EVENT_BITS = 603 # Event bits array
#
# Tags used in saving SQUID characteristics etc.
#
FIFF.FIFF_SQUID_BIAS = 701
FIFF.FIFF_SQUID_OFFSET = 702
FIFF.FIFF_SQUID_GATE = 703
#
# Aspect values used to save charactersitic curves of SQUIDs. (mjk)
#
FIFF.FIFFV_ASPECT_IFII_LOW = 1100
FIFF.FIFFV_ASPECT_IFII_HIGH = 1101
FIFF.FIFFV_ASPECT_GATE = 1102
#
# Values for file references
#
FIFF.FIFFV_ROLE_PREV_FILE = 1
FIFF.FIFFV_ROLE_NEXT_FILE = 2
#
# References
#
FIFF.FIFF_REF_PATH = 1101
#
# Different aspects of data
#
FIFF.FIFFV_ASPECT_AVERAGE = 100 # Normal average of epochs
FIFF.FIFFV_ASPECT_STD_ERR = 101 # Std. error of mean
FIFF.FIFFV_ASPECT_SINGLE = 102 # Single epoch cut out from the continuous data
FIFF.FIFFV_ASPECT_SUBAVERAGE = 103
FIFF.FIFFV_ASPECT_ALTAVERAGE = 104 # Alternating subaverage
FIFF.FIFFV_ASPECT_SAMPLE = 105 # A sample cut out by graph
FIFF.FIFFV_ASPECT_POWER_DENSITY = 106 # Power density spectrum
FIFF.FIFFV_ASPECT_DIPOLE_WAVE = 200 # Dipole amplitude curve
#
# BEM surface IDs
#
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN = -1
FIFF.FIFFV_BEM_SURF_ID_BRAIN = 1
FIFF.FIFFV_BEM_SURF_ID_SKULL = 3
FIFF.FIFFV_BEM_SURF_ID_HEAD = 4
FIFF.FIFF_BEM_SURF_ID = 3101 # int surface number
FIFF.FIFF_BEM_SURF_NAME = 3102 # string surface name
FIFF.FIFF_BEM_SURF_NNODE = 3103 # int number of nodes on a surface
FIFF.FIFF_BEM_SURF_NTRI = 3104 # int number of triangles on a surface
FIFF.FIFF_BEM_SURF_NODES = 3105 # float surface nodes (nnode,3)
FIFF.FIFF_BEM_SURF_TRIANGLES = 3106 # int surface triangles (ntri,3)
FIFF.FIFF_BEM_SURF_NORMALS = 3107 # float surface node normal unit vectors
FIFF.FIFF_BEM_POT_SOLUTION = 3110 # float ** The solution matrix
FIFF.FIFF_BEM_APPROX = 3111 # int approximation method, see below
FIFF.FIFF_BEM_COORD_FRAME = 3112 # The coordinate frame of the model
FIFF.FIFF_BEM_SIGMA = 3113 # Conductivity of a compartment
FIFF.FIFFV_BEM_APPROX_CONST = 1 # The constant potential approach
FIFF.FIFFV_BEM_APPROX_LINEAR = 2 # The linear potential approach
#
# More of those defined in MNE
#
FIFF.FIFFV_MNE_SURF_UNKNOWN = -1
FIFF.FIFFV_MNE_SURF_LEFT_HEMI = 101
FIFF.FIFFV_MNE_SURF_RIGHT_HEMI = 102
FIFF.FIFFV_MNE_SURF_MEG_HELMET = 201 # Use this irrespective of the system
#
# These relate to the Isotrak data
#
FIFF.FIFFV_POINT_CARDINAL = 1
FIFF.FIFFV_POINT_HPI = 2
FIFF.FIFFV_POINT_EEG = 3
FIFF.FIFFV_POINT_ECG = FIFF.FIFFV_POINT_EEG
FIFF.FIFFV_POINT_EXTRA = 4
FIFF.FIFFV_POINT_LPA = 1
FIFF.FIFFV_POINT_NASION = 2
FIFF.FIFFV_POINT_RPA = 3
#
# SSP
#
FIFF.FIFF_PROJ_ITEM_KIND = 3411
FIFF.FIFF_PROJ_ITEM_TIME = 3412
FIFF.FIFF_PROJ_ITEM_NVEC = 3414
FIFF.FIFF_PROJ_ITEM_VECTORS = 3415
FIFF.FIFF_PROJ_ITEM_DEFINITION = 3416
FIFF.FIFF_PROJ_ITEM_CH_NAME_LIST = 3417
#
# MRIs
#
FIFF.FIFF_MRI_SOURCE_PATH = FIFF.FIFF_REF_PATH
FIFF.FIFF_MRI_SOURCE_FORMAT = 2002
FIFF.FIFF_MRI_PIXEL_ENCODING = 2003
FIFF.FIFF_MRI_PIXEL_DATA_OFFSET = 2004
FIFF.FIFF_MRI_PIXEL_SCALE = 2005
FIFF.FIFF_MRI_PIXEL_DATA = 2006
FIFF.FIFF_MRI_PIXEL_OVERLAY_ENCODING = 2007
FIFF.FIFF_MRI_PIXEL_OVERLAY_DATA = 2008
FIFF.FIFF_MRI_BOUNDING_BOX = 2009
FIFF.FIFF_MRI_WIDTH = 2010
FIFF.FIFF_MRI_WIDTH_M = 2011
FIFF.FIFF_MRI_HEIGHT = 2012
FIFF.FIFF_MRI_HEIGHT_M = 2013
FIFF.FIFF_MRI_DEPTH = 2014
FIFF.FIFF_MRI_DEPTH_M = 2015
FIFF.FIFF_MRI_THICKNESS = 2016
FIFF.FIFF_MRI_SCENE_AIM = 2017
FIFF.FIFF_MRI_ORIG_SOURCE_PATH = 2020
FIFF.FIFF_MRI_ORIG_SOURCE_FORMAT = 2021
FIFF.FIFF_MRI_ORIG_PIXEL_ENCODING = 2022
FIFF.FIFF_MRI_ORIG_PIXEL_DATA_OFFSET = 2023
FIFF.FIFF_MRI_VOXEL_DATA = 2030
FIFF.FIFF_MRI_VOXEL_ENCODING = 2031
FIFF.FIFF_MRI_MRILAB_SETUP = 2100
FIFF.FIFF_MRI_SEG_REGION_ID = 2200
#
FIFF.FIFFV_MRI_PIXEL_UNKNOWN = 0
FIFF.FIFFV_MRI_PIXEL_BYTE = 1
FIFF.FIFFV_MRI_PIXEL_WORD = 2
FIFF.FIFFV_MRI_PIXEL_SWAP_WORD = 3
FIFF.FIFFV_MRI_PIXEL_FLOAT = 4
FIFF.FIFFV_MRI_PIXEL_BYTE_INDEXED_COLOR = 5
FIFF.FIFFV_MRI_PIXEL_BYTE_RGB_COLOR = 6
FIFF.FIFFV_MRI_PIXEL_BYTE_RLE_RGB_COLOR = 7
FIFF.FIFFV_MRI_PIXEL_BIT_RLE = 8
#
# These are the MNE fiff definitions (range 350-390 reserved for MNE)
#
FIFF.FIFFB_MNE = 350
FIFF.FIFFB_MNE_SOURCE_SPACE = 351
FIFF.FIFFB_MNE_FORWARD_SOLUTION = 352
FIFF.FIFFB_MNE_PARENT_MRI_FILE = 353
FIFF.FIFFB_MNE_PARENT_MEAS_FILE = 354
FIFF.FIFFB_MNE_COV = 355
FIFF.FIFFB_MNE_INVERSE_SOLUTION = 356
FIFF.FIFFB_MNE_NAMED_MATRIX = 357
FIFF.FIFFB_MNE_ENV = 358
FIFF.FIFFB_MNE_BAD_CHANNELS = 359
FIFF.FIFFB_MNE_VERTEX_MAP = 360
FIFF.FIFFB_MNE_EVENTS = 361
FIFF.FIFFB_MNE_MORPH_MAP = 362
FIFF.FIFFB_MNE_SURFACE_MAP = 363
FIFF.FIFFB_MNE_SURFACE_MAP_GROUP = 364
#
# CTF compensation data
#
FIFF.FIFFB_MNE_CTF_COMP = 370
FIFF.FIFFB_MNE_CTF_COMP_DATA = 371
FIFF.FIFFB_MNE_DERIVATIONS = 372
FIFF.FIFFB_MNE_EPOCHS = 373
FIFF.FIFFB_MNE_ICA = 374
#
# Fiff tags associated with MNE computations (3500...)
#
#
# 3500... Bookkeeping
#
FIFF.FIFF_MNE_ROW_NAMES = 3502
FIFF.FIFF_MNE_COL_NAMES = 3503
FIFF.FIFF_MNE_NROW = 3504
FIFF.FIFF_MNE_NCOL = 3505
FIFF.FIFF_MNE_COORD_FRAME = 3506 # Coordinate frame employed. Defaults:
# FIFFB_MNE_SOURCE_SPACE FIFFV_COORD_MRI
# FIFFB_MNE_FORWARD_SOLUTION FIFFV_COORD_HEAD
# FIFFB_MNE_INVERSE_SOLUTION FIFFV_COORD_HEAD
FIFF.FIFF_MNE_CH_NAME_LIST = 3507
FIFF.FIFF_MNE_FILE_NAME = 3508 # This removes the collision with fiff_file.h (used to be 3501)
#
# 3510... 3590... Source space or surface
#
FIFF.FIFF_MNE_SOURCE_SPACE_POINTS = 3510 # The vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS = 3511 # The vertex normals
FIFF.FIFF_MNE_SOURCE_SPACE_NPOINTS = 3512 # How many vertices
FIFF.FIFF_MNE_SOURCE_SPACE_SELECTION = 3513 # Which are selected to the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE = 3514 # How many are in use
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST = 3515 # Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_NEAREST_DIST = 3516 # Distance to the Nearest source space vertex for all vertices
FIFF.FIFF_MNE_SOURCE_SPACE_ID = 3517 # Identifier
FIFF.FIFF_MNE_SOURCE_SPACE_TYPE = 3518 # Surface or volume
FIFF.FIFF_MNE_SOURCE_SPACE_VERTICES = 3519 # List of vertices (zero based)
FIFF.FIFF_MNE_SOURCE_SPACE_VOXEL_DIMS = 3596 # Voxel space dimensions in a volume source space
FIFF.FIFF_MNE_SOURCE_SPACE_INTERPOLATOR = 3597 # Matrix to interpolate a volume source space into a mri volume
FIFF.FIFF_MNE_SOURCE_SPACE_MRI_FILE = 3598 # MRI file used in the interpolation
FIFF.FIFF_MNE_SOURCE_SPACE_NTRI = 3590 # Number of triangles
FIFF.FIFF_MNE_SOURCE_SPACE_TRIANGLES = 3591 # The triangulation
FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI = 3592 # Number of triangles corresponding to the number of vertices in use
FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES = 3593 # The triangulation of the used vertices in the source space
FIFF.FIFF_MNE_SOURCE_SPACE_NNEIGHBORS = 3594 # Number of neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_NEIGHBORS = 3595 # Neighbors for each source space point (used for volume source spaces)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST = 3599 # Distances between vertices in use (along the surface)
FIFF.FIFF_MNE_SOURCE_SPACE_DIST_LIMIT = 3600 # If distance is above this limit (in the volume) it has not been calculated
FIFF.FIFF_MNE_SURFACE_MAP_DATA = 3610 # Surface map data
FIFF.FIFF_MNE_SURFACE_MAP_KIND = 3611 # Type of map
#
# 3520... Forward solution
#
FIFF.FIFF_MNE_FORWARD_SOLUTION = 3520
FIFF.FIFF_MNE_SOURCE_ORIENTATION = 3521 # Fixed or free
FIFF.FIFF_MNE_INCLUDED_METHODS = 3522
FIFF.FIFF_MNE_FORWARD_SOLUTION_GRAD = 3523
#
# 3530... Covariance matrix
#
FIFF.FIFF_MNE_COV_KIND = 3530 # What kind of a covariance matrix
FIFF.FIFF_MNE_COV_DIM = 3531 # Matrix dimension
FIFF.FIFF_MNE_COV = 3532 # Full matrix in packed representation (lower triangle)
FIFF.FIFF_MNE_COV_DIAG = 3533 # Diagonal matrix
FIFF.FIFF_MNE_COV_EIGENVALUES = 3534 # Eigenvalues and eigenvectors of the above
FIFF.FIFF_MNE_COV_EIGENVECTORS = 3535
FIFF.FIFF_MNE_COV_NFREE = 3536 # Number of degrees of freedom
FIFF.FIFF_MNE_COV_METHOD = 3537 # The estimator used
FIFF.FIFF_MNE_COV_SCORE = 3538 # Negative log-likelihood
#
# 3540... Inverse operator
#
# We store the inverse operator as the eigenleads, eigenfields,
# and weights
#
FIFF.FIFF_MNE_INVERSE_LEADS = 3540 # The eigenleads
FIFF.FIFF_MNE_INVERSE_LEADS_WEIGHTED = 3546 # The eigenleads (already weighted with R^0.5)
FIFF.FIFF_MNE_INVERSE_FIELDS = 3541 # The eigenfields
FIFF.FIFF_MNE_INVERSE_SING = 3542 # The singular values
FIFF.FIFF_MNE_PRIORS_USED = 3543 # Which kind of priors have been used for the source covariance matrix
FIFF.FIFF_MNE_INVERSE_FULL = 3544 # Inverse operator as one matrix
# This matrix includes the whitening operator as well
# The regularization is applied
FIFF.FIFF_MNE_INVERSE_SOURCE_ORIENTATIONS = 3545 # Contains the orientation of one source per row
# The source orientations must be expressed in the coordinate system
# given by FIFF_MNE_COORD_FRAME
FIFF.FIFF_MNE_INVERSE_SOURCE_UNIT = 3547 # Are the sources given in Am or Am/m^2 ?
#
# 3550... Saved environment info
#
FIFF.FIFF_MNE_ENV_WORKING_DIR = 3550 # Working directory where the file was created
FIFF.FIFF_MNE_ENV_COMMAND_LINE = 3551 # The command used to create the file
FIFF.FIFF_MNE_EXTERNAL_BIG_ENDIAN = 3552 # Reference to an external binary file (big-endian) */
FIFF.FIFF_MNE_EXTERNAL_LITTLE_ENDIAN = 3553 # Reference to an external binary file (little-endian) */
#
# 3560... Miscellaneous
#
FIFF.FIFF_MNE_PROJ_ITEM_ACTIVE = 3560 # Is this projection item active?
FIFF.FIFF_MNE_EVENT_LIST = 3561 # An event list (for STI 014)
FIFF.FIFF_MNE_HEMI = 3562 # Hemisphere association for general purposes
FIFF.FIFF_MNE_DATA_SKIP_NOP = 3563 # A data skip turned off in the raw data
FIFF.FIFF_MNE_ORIG_CH_INFO = 3564 # Channel information before any changes
FIFF.FIFF_MNE_EVENT_TRIGGER_MASK = 3565 # Mask applied to the trigger channnel values
FIFF.FIFF_MNE_EVENT_COMMENTS = 3566 # Event comments merged into one long string
FIFF.FIFF_MNE_CUSTOM_REF = 3567 # Whether a custom reference was applied to the data
FIFF.FIFF_MNE_BASELINE_MIN = 3568 # Time of baseline beginning
FIFF.FIFF_MNE_BASELINE_MAX = 3569 # Time of baseline end
#
# 3570... Morphing maps
#
FIFF.FIFF_MNE_MORPH_MAP = 3570 # Mapping of closest vertices on the sphere
FIFF.FIFF_MNE_MORPH_MAP_FROM = 3571 # Which subject is this map from
FIFF.FIFF_MNE_MORPH_MAP_TO = 3572 # Which subject is this map to
#
# 3580... CTF compensation data
#
FIFF.FIFF_MNE_CTF_COMP_KIND = 3580 # What kind of compensation
FIFF.FIFF_MNE_CTF_COMP_DATA = 3581 # The compensation data itself
FIFF.FIFF_MNE_CTF_COMP_CALIBRATED = 3582 # Are the coefficients calibrated?
FIFF.FIFF_MNE_DERIVATION_DATA = 3585 # Used to store information about EEG and other derivations
#
# 3601... values associated with ICA decomposition
#
FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS = 3601 # ICA interface parameters
FIFF.FIFF_MNE_ICA_CHANNEL_NAMES = 3602 # ICA channel names
FIFF.FIFF_MNE_ICA_WHITENER = 3603 # ICA whitener
FIFF.FIFF_MNE_ICA_PCA_COMPONENTS = 3604 # PCA components
FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR = 3605 # PCA explained variance
FIFF.FIFF_MNE_ICA_PCA_MEAN = 3606 # PCA mean
FIFF.FIFF_MNE_ICA_MATRIX = 3607 # ICA unmixing matrix
FIFF.FIFF_MNE_ICA_BADS = 3608 # ICA bad sources
FIFF.FIFF_MNE_ICA_MISC_PARAMS = 3609 # ICA misc params
#
# Maxfilter tags
#
FIFF.FIFF_SSS_FRAME = 263
FIFF.FIFF_SSS_JOB = 264
FIFF.FIFF_SSS_ORIGIN = 265
FIFF.FIFF_SSS_ORD_IN = 266
FIFF.FIFF_SSS_ORD_OUT = 267
FIFF.FIFF_SSS_NMAG = 268
FIFF.FIFF_SSS_COMPONENTS = 269
FIFF.FIFF_SSS_CAL_CHANS = 270
FIFF.FIFF_SSS_CAL_CORRS = 271
FIFF.FIFF_SSS_ST_CORR = 272
FIFF.FIFF_SSS_NFREE = 278
FIFF.FIFF_SSS_ST_LENGTH = 279
FIFF.FIFF_DECOUPLER_MATRIX = 800
#
# Fiff values associated with MNE computations
#
FIFF.FIFFV_MNE_UNKNOWN_ORI = 0
FIFF.FIFFV_MNE_FIXED_ORI = 1
FIFF.FIFFV_MNE_FREE_ORI = 2
FIFF.FIFFV_MNE_MEG = 1
FIFF.FIFFV_MNE_EEG = 2
FIFF.FIFFV_MNE_MEG_EEG = 3
FIFF.FIFFV_MNE_PRIORS_NONE = 0
FIFF.FIFFV_MNE_PRIORS_DEPTH = 1
FIFF.FIFFV_MNE_PRIORS_LORETA = 2
FIFF.FIFFV_MNE_PRIORS_SULCI = 3
FIFF.FIFFV_MNE_UNKNOWN_COV = 0
FIFF.FIFFV_MNE_SENSOR_COV = 1
FIFF.FIFFV_MNE_NOISE_COV = 1 # This is what it should have been called
FIFF.FIFFV_MNE_SOURCE_COV = 2
FIFF.FIFFV_MNE_FMRI_PRIOR_COV = 3
FIFF.FIFFV_MNE_SIGNAL_COV = 4 # This will be potentially employed in beamformers
FIFF.FIFFV_MNE_DEPTH_PRIOR_COV = 5 # The depth weighting prior
FIFF.FIFFV_MNE_ORIENT_PRIOR_COV = 6 # The orientation prior
FIFF.FIFFV_MNE_PROJ_ITEM_EEG_AVREF = 10 # Linear projection related to EEG average reference
#
# Output map types
#
FIFF.FIFFV_MNE_MAP_UNKNOWN = -1 # Unspecified
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT = 1 # Scalar current value
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_SIZE = 2 # Absolute value of the above
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT = 3 # Current vector components
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_SIZE = 4 # Vector current size
FIFF.FIFFV_MNE_MAP_T_STAT = 5 # Student's t statistic
FIFF.FIFFV_MNE_MAP_F_STAT = 6 # F statistic
FIFF.FIFFV_MNE_MAP_F_STAT_SQRT = 7 # Square root of the F statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT = 8 # (Approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_CHI2_STAT_SQRT = 9 # Square root of the (approximate) chi^2 statistic
FIFF.FIFFV_MNE_MAP_SCALAR_CURRENT_NOISE = 10 # Current noise approximation (scalar)
FIFF.FIFFV_MNE_MAP_VECTOR_CURRENT_NOISE = 11 # Current noise approximation (vector)
#
# Source space types (values of FIFF_MNE_SOURCE_SPACE_TYPE)
#
FIFF.FIFFV_MNE_SPACE_UNKNOWN = -1
FIFF.FIFFV_MNE_SPACE_SURFACE = 1
FIFF.FIFFV_MNE_SPACE_VOLUME = 2
FIFF.FIFFV_MNE_SPACE_DISCRETE = 3
#
# Covariance matrix channel classification
#
FIFF.FIFFV_MNE_COV_CH_UNKNOWN = -1 # No idea
FIFF.FIFFV_MNE_COV_CH_MEG_MAG = 0 # Axial gradiometer or magnetometer [T]
FIFF.FIFFV_MNE_COV_CH_MEG_GRAD = 1 # Planar gradiometer [T/m]
FIFF.FIFFV_MNE_COV_CH_EEG = 2 # EEG [V]
#
# Projection item kinds
#
FIFF.FIFFV_PROJ_ITEM_NONE = 0
FIFF.FIFFV_PROJ_ITEM_FIELD = 1
FIFF.FIFFV_PROJ_ITEM_DIP_FIX = 2
FIFF.FIFFV_PROJ_ITEM_DIP_ROT = 3
FIFF.FIFFV_PROJ_ITEM_HOMOG_GRAD = 4
FIFF.FIFFV_PROJ_ITEM_HOMOG_FIELD = 5
#
# Additional coordinate frames
#
FIFF.FIFFV_MNE_COORD_TUFTS_EEG = 300 # For Tufts EEG data
FIFF.FIFFV_MNE_COORD_CTF_DEVICE = 1001 # CTF device coordinates
FIFF.FIFFV_MNE_COORD_CTF_HEAD = 1004 # CTF head coordinates
FIFF.FIFFV_MNE_COORD_DIGITIZER = FIFF.FIFFV_COORD_ISOTRAK # Original (Polhemus) digitizer coordinates
FIFF.FIFFV_MNE_COORD_SURFACE_RAS = FIFF.FIFFV_COORD_MRI # The surface RAS coordinates
FIFF.FIFFV_MNE_COORD_MRI_VOXEL = 2001 # The MRI voxel coordinates
FIFF.FIFFV_MNE_COORD_RAS = 2002 # Surface RAS coordinates with non-zero origin
FIFF.FIFFV_MNE_COORD_MNI_TAL = 2003 # MNI Talairach coordinates
FIFF.FIFFV_MNE_COORD_FS_TAL_GTZ = 2004 # FreeSurfer Talairach coordinates (MNI z > 0)
FIFF.FIFFV_MNE_COORD_FS_TAL_LTZ = 2005 # FreeSurfer Talairach coordinates (MNI z < 0)
FIFF.FIFFV_MNE_COORD_FS_TAL = 2006 # FreeSurfer Talairach coordinates
#
# 4D and KIT use the same head coordinate system definition as CTF
#
FIFF.FIFFV_MNE_COORD_4D_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
FIFF.FIFFV_MNE_COORD_KIT_HEAD = FIFF.FIFFV_MNE_COORD_CTF_HEAD
#
# KIT system coil types
#
FIFF.FIFFV_COIL_KIT_GRAD = 6001
FIFF.FIFFV_COIL_KIT_REF_MAG = 6002
#
# CTF coil and channel types
#
FIFF.FIFFV_COIL_CTF_GRAD = 5001
FIFF.FIFFV_COIL_CTF_REF_MAG = 5002
FIFF.FIFFV_COIL_CTF_REF_GRAD = 5003
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD = 5004
#
# Magnes reference sensors
#
FIFF.FIFFV_COIL_MAGNES_REF_MAG = 4003
FIFF.FIFFV_COIL_MAGNES_REF_GRAD = 4004
FIFF.FIFFV_COIL_MAGNES_OFFDIAG_REF_GRAD = 4005
#
# BabySQUID sensors
#
FIFF.FIFFV_COIL_BABY_GRAD = 7001
FIFF.FIFFV_COIL_BABY_MAG = 7002
FIFF.FIFFV_COIL_BABY_REF_MAG = 7003
#
# FWD Types
#
FIFF.FWD_COIL_UNKNOWN = 0
FIFF.FWD_COILC_UNKNOWN = 0
FIFF.FWD_COILC_EEG = 1000
FIFF.FWD_COILC_MAG = 1
FIFF.FWD_COILC_AXIAL_GRAD = 2
FIFF.FWD_COILC_PLANAR_GRAD = 3
FIFF.FWD_COILC_AXIAL_GRAD2 = 4
FIFF.FWD_COIL_ACCURACY_POINT = 0
FIFF.FWD_COIL_ACCURACY_NORMAL = 1
FIFF.FWD_COIL_ACCURACY_ACCURATE = 2
FIFF.FWD_BEM_UNKNOWN = -1
FIFF.FWD_BEM_CONSTANT_COLL = 1
FIFF.FWD_BEM_LINEAR_COLL = 2
FIFF.FWD_BEM_IP_APPROACH_LIMIT = 0.1
FIFF.FWD_BEM_LIN_FIELD_SIMPLE = 1
FIFF.FWD_BEM_LIN_FIELD_FERGUSON = 2
FIFF.FWD_BEM_LIN_FIELD_URANKAR = 3
#
# Data types
#
FIFF.FIFFT_VOID = 0
FIFF.FIFFT_BYTE = 1
FIFF.FIFFT_SHORT = 2
FIFF.FIFFT_INT = 3
FIFF.FIFFT_FLOAT = 4
FIFF.FIFFT_DOUBLE = 5
FIFF.FIFFT_JULIAN = 6
FIFF.FIFFT_USHORT = 7
FIFF.FIFFT_UINT = 8
FIFF.FIFFT_ULONG = 9
FIFF.FIFFT_STRING = 10
FIFF.FIFFT_LONG = 11
FIFF.FIFFT_DAU_PACK13 = 13
FIFF.FIFFT_DAU_PACK14 = 14
FIFF.FIFFT_DAU_PACK16 = 16
FIFF.FIFFT_COMPLEX_FLOAT = 20
FIFF.FIFFT_COMPLEX_DOUBLE = 21
FIFF.FIFFT_OLD_PACK = 23
FIFF.FIFFT_CH_INFO_STRUCT = 30
FIFF.FIFFT_ID_STRUCT = 31
FIFF.FIFFT_DIR_ENTRY_STRUCT = 32
FIFF.FIFFT_DIG_POINT_STRUCT = 33
FIFF.FIFFT_CH_POS_STRUCT = 34
FIFF.FIFFT_COORD_TRANS_STRUCT = 35
FIFF.FIFFT_DIG_STRING_STRUCT = 36
FIFF.FIFFT_STREAM_SEGMENT_STRUCT = 37
#
# Units of measurement
#
FIFF.FIFF_UNIT_NONE = -1
#
# SI base units
#
FIFF.FIFF_UNIT_M = 1
FIFF.FIFF_UNIT_KG = 2
FIFF.FIFF_UNIT_SEC = 3
FIFF.FIFF_UNIT_A = 4
FIFF.FIFF_UNIT_K = 5
FIFF.FIFF_UNIT_MOL = 6
#
# SI Supplementary units
#
FIFF.FIFF_UNIT_RAD = 7
FIFF.FIFF_UNIT_SR = 8
#
# SI base candela
#
FIFF.FIFF_UNIT_CD = 9
#
# SI derived units
#
FIFF.FIFF_UNIT_HZ = 101
FIFF.FIFF_UNIT_N = 102
FIFF.FIFF_UNIT_PA = 103
FIFF.FIFF_UNIT_J = 104
FIFF.FIFF_UNIT_W = 105
FIFF.FIFF_UNIT_C = 106
FIFF.FIFF_UNIT_V = 107
FIFF.FIFF_UNIT_F = 108
FIFF.FIFF_UNIT_OHM = 109
FIFF.FIFF_UNIT_MHO = 110
FIFF.FIFF_UNIT_WB = 111
FIFF.FIFF_UNIT_T = 112
FIFF.FIFF_UNIT_H = 113
FIFF.FIFF_UNIT_CEL = 114
FIFF.FIFF_UNIT_LM = 115
FIFF.FIFF_UNIT_LX = 116
#
# Others we need
#
FIFF.FIFF_UNIT_T_M = 201 # T/m
FIFF.FIFF_UNIT_AM = 202 # Am
FIFF.FIFF_UNIT_AM_M2 = 203 # Am/m^2
FIFF.FIFF_UNIT_AM_M3 = 204 # Am/m^3
#
# Multipliers
#
FIFF.FIFF_UNITM_E = 18
FIFF.FIFF_UNITM_PET = 15
FIFF.FIFF_UNITM_T = 12
FIFF.FIFF_UNITM_MEG = 6
FIFF.FIFF_UNITM_K = 3
FIFF.FIFF_UNITM_H = 2
FIFF.FIFF_UNITM_DA = 1
FIFF.FIFF_UNITM_NONE = 0
FIFF.FIFF_UNITM_D = -1
FIFF.FIFF_UNITM_C = -2
FIFF.FIFF_UNITM_M = -3
FIFF.FIFF_UNITM_MU = -6
FIFF.FIFF_UNITM_N = -9
FIFF.FIFF_UNITM_P = -12
FIFF.FIFF_UNITM_F = -15
FIFF.FIFF_UNITM_A = -18
#
# Coil types
#
FIFF.FIFFV_COIL_NONE = 0 # The location info contains no data
FIFF.FIFFV_COIL_EEG = 1 # EEG electrode position in r0
FIFF.FIFFV_COIL_NM_122 = 2 # Neuromag 122 coils
FIFF.FIFFV_COIL_NM_24 = 3 # Old 24 channel system in HUT
FIFF.FIFFV_COIL_NM_MCG_AXIAL = 4 # The axial devices in the HUCS MCG system
FIFF.FIFFV_COIL_EEG_BIPOLAR = 5 # Bipolar EEG lead
FIFF.FIFFV_COIL_DIPOLE = 200 # Time-varying dipole definition
# The coil info contains dipole location (r0) and
# direction (ex)
FIFF.FIFFV_COIL_MCG_42 = 1000 # For testing the MCG software
FIFF.FIFFV_COIL_POINT_MAGNETOMETER = 2000 # Simple point magnetometer
FIFF.FIFFV_COIL_AXIAL_GRAD_5CM = 2001 # Generic axial gradiometer
FIFF.FIFFV_COIL_POINT_MAGNETOMETER_X = 2002 # Simple point magnetometer, x-direction
FIFF.FIFFV_COIL_POINT_MAGNETOMETER_Y = 2003 # Simple point magnetometer, y-direction
FIFF.FIFFV_COIL_VV_PLANAR_W = 3011 # VV prototype wirewound planar sensor
FIFF.FIFFV_COIL_VV_PLANAR_T1 = 3012 # Vectorview SQ20483N planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T2 = 3013 # Vectorview SQ20483N-A planar gradiometer
FIFF.FIFFV_COIL_VV_PLANAR_T3 = 3014 # Vectorview SQ20950N planar gradiometer
FIFF.FIFFV_COIL_VV_MAG_W = 3021 # VV prototype wirewound magnetometer
FIFF.FIFFV_COIL_VV_MAG_T1 = 3022 # Vectorview SQ20483N magnetometer
FIFF.FIFFV_COIL_VV_MAG_T2 = 3023 # Vectorview SQ20483-A magnetometer
FIFF.FIFFV_COIL_VV_MAG_T3 = 3024 # Vectorview SQ20950N magnetometer
FIFF.FIFFV_COIL_MAGNES_MAG = 4001 # Magnes WH magnetometer
FIFF.FIFFV_COIL_MAGNES_GRAD = 4002 # Magnes WH gradiometer
FIFF.FIFFV_COIL_MAGNES_R_MAG = 4003 # Magnes WH reference magnetometer
FIFF.FIFFV_COIL_MAGNES_R_GRAD_DIA = 4004 # Magnes WH reference diagonal gradioometer
FIFF.FIFFV_COIL_MAGNES_R_GRAD_OFF = 4005 # Magnes WH reference off-diagonal gradiometer
# MNE RealTime
FIFF.FIFF_MNE_RT_COMMAND = 3700 # realtime command
FIFF.FIFF_MNE_RT_CLIENT_ID = 3701 # realtime client
# MNE epochs bookkeeping
FIFF.FIFFB_MNE_EPOCHS_SELECTION = 3800 # the epochs selection
FIFF.FIFFB_MNE_EPOCHS_DROP_LOG = 3801 # the drop log
|
|
import os
import shutil
import unittest
from mcl.logging.tools import dump_to_csv
from mcl.logging.tools import dump_to_list
from mcl.logging.tools import dump_to_array
_DIRNAME = os.path.dirname(__file__)
TMP_PATH = os.path.join(_DIRNAME, 'tmp')
LOG_PATH = os.path.join(_DIRNAME, 'dataset')
SPT_PATH = os.path.join(_DIRNAME, 'dataset_split')
# -----------------------------------------------------------------------------
# Contents of log files
# -----------------------------------------------------------------------------
log_data = [{'data': 0.00, 'name': 'UnitTestMessageA', 'timestamp': 0.00},
{'data': 0.01, 'name': 'UnitTestMessageA', 'timestamp': 0.01},
{'data': 0.02, 'name': 'UnitTestMessageA', 'timestamp': 0.02},
{'data': 0.03, 'name': 'UnitTestMessageA', 'timestamp': 0.03},
{'data': 0.04, 'name': 'UnitTestMessageA', 'timestamp': 0.04},
{'data': 0.05, 'name': 'UnitTestMessageA', 'timestamp': 0.05},
{'data': 0.06, 'name': 'UnitTestMessageA', 'timestamp': 0.06},
{'data': 0.07, 'name': 'UnitTestMessageA', 'timestamp': 0.07},
{'data': 0.08, 'name': 'UnitTestMessageA', 'timestamp': 0.08},
{'data': 0.09, 'name': 'UnitTestMessageA', 'timestamp': 0.09},
{'data': 0.1, 'name': 'UnitTestMessageB', 'timestamp': 0.1},
{'data': 0.2, 'name': 'UnitTestMessageB', 'timestamp': 0.2},
{'data': 0.3, 'name': 'UnitTestMessageB', 'timestamp': 0.3},
{'data': 0.4, 'name': 'UnitTestMessageB', 'timestamp': 0.4},
{'data': 0.5, 'name': 'UnitTestMessageB', 'timestamp': 0.5},
{'data': 0.6, 'name': 'UnitTestMessageB', 'timestamp': 0.6},
{'data': 0.7, 'name': 'UnitTestMessageB', 'timestamp': 0.7},
{'data': 0.8, 'name': 'UnitTestMessageB', 'timestamp': 0.8},
{'data': 0.9, 'name': 'UnitTestMessageB', 'timestamp': 0.9},
{'data': 1.0, 'name': 'UnitTestMessageB', 'timestamp': 1.0}]
# -----------------------------------------------------------------------------
# dump_to_list()
# -----------------------------------------------------------------------------
class DumpListTests(unittest.TestCase):
def load_list(self, path, metadata):
"""Test dump_to_list() can load data."""
# Load logged data into a list.
lst = dump_to_list(path, metadata=metadata)
# Ensure loaded data is valid.
self.assertEqual(len(lst), len(log_data))
for i, item in enumerate(lst):
# Validate metadata.
if metadata:
self.assertEqual(item['topic'], '')
self.assertAlmostEqual(item['elapsed_time'],
log_data[i]['timestamp'])
item = item['payload']
self.assertEqual(item['name'], log_data[i]['name'])
self.assertAlmostEqual(item['data'], log_data[i]['data'])
self.assertAlmostEqual(item['timestamp'], log_data[i]['timestamp'])
def test_bad_args(self):
"""Test dump_to_list() catches bad input."""
# Validate log file inputs.
with self.assertRaises(IOError):
dump_to_list('does_not_exist.log')
# Ensure 'metadata' is a boolean.
with self.assertRaises(TypeError):
dump_to_list(os.path.join(LOG_PATH, 'UnitTestMessageA.log'),
metadata='bool')
def test_load(self):
"""Test dump_to_list() can load data."""
self.load_list(LOG_PATH, True)
def test_load_no_meta(self):
"""Test dump_to_list() can ignore metadata."""
self.load_list(LOG_PATH, True)
def test_load_time(self):
"""Test dump_to_list() can load a time range of data."""
# Load logged data into a list.
min_time = 0.005
max_time = 0.095
lst = dump_to_list(LOG_PATH, min_time=min_time, max_time=max_time)
# Ensure time range of data is valid.
for i, item in enumerate(lst):
self.assertAlmostEqual(item['payload']['timestamp'],
log_data[i + 1]['timestamp'])
# -----------------------------------------------------------------------------
# dump_to_array()
# -----------------------------------------------------------------------------
class DumpArrayTests(unittest.TestCase):
def test_bad_input(self):
"""Test dump_to_array() bad input."""
bad_path = os.path.join(LOG_PATH, 'UnitTestMessageC.log')
log_file = os.path.join(LOG_PATH, 'UnitTestMessageA.log')
# Catch invalid paths.
with self.assertRaises(IOError):
dump_to_array(bad_path, ['data'])
# Catch non iterable keys.
with self.assertRaises(TypeError):
dump_to_array(log_file, 5)
# Catch non-string key.
with self.assertRaises(TypeError):
dump_to_array(log_file, ['data', 5])
# Catch non-existent key
with self.assertRaises(KeyError):
dump_to_array(log_file, ['error'])
def test_mixed_type(self):
"""Test dump_to_array() mixed types."""
with self.assertRaises(TypeError):
dump_to_array(LOG_PATH, ['timestamp', 'data'])
def test_non_numeric(self):
"""Test dump_to_array() throws an exception on non-numeric types."""
with self.assertRaises(TypeError):
dump_to_array(os.path.join(LOG_PATH, 'UnitTestMessageA.log'),
['name'])
def test_no_data(self):
"""Test dump_to_array() no data."""
log_path = os.path.join(LOG_PATH, 'UnitTestMessageA.log')
array = dump_to_array(log_path, ['timestamp'], min_time=30.0)
self.assertEqual(array, None)
def test_load(self):
"""Test dump_to_array() can load data."""
# Load UnitTestMessageA data.
data = [msg for msg in log_data if msg['name'] == 'UnitTestMessageA']
# Load logged data into a list.
pth = os.path.join(LOG_PATH, 'UnitTestMessageA.log')
arr = dump_to_array(pth, ['timestamp', 'data'])
# Ensure loaded data is valid.
self.assertEqual(arr.ndim, 2)
self.assertEqual(len(arr), len(data))
for i, item in enumerate(data):
self.assertAlmostEqual(arr[i, 0], item['data'])
self.assertAlmostEqual(arr[i, 1], item['timestamp'])
def test_load_time(self):
"""Test dump_to_array() can load a time range of data."""
# Load UnitTestMessageB.
data = [msg for msg in log_data if msg['name'] == 'UnitTestMessageB']
# Load logged data into a list.
pth = os.path.join(LOG_PATH, 'UnitTestMessageB.log')
min_time = 0.15
max_time = 0.95
arr = dump_to_array(pth, ['timestamp'],
min_time=min_time,
max_time=max_time)
# Ensure loaded data is valid.
self.assertEqual(arr.ndim, 2)
self.assertEqual(len(arr), 8)
for i, item in enumerate(arr):
self.assertAlmostEqual(arr[i], data[i + 1]['timestamp'])
# -----------------------------------------------------------------------------
# dump_to_csv()
# -----------------------------------------------------------------------------
class DumpCSVTests(unittest.TestCase):
def setUp(self):
"""Create logging path if it does not exist."""
# Make temporary directory for CSV data.
if not os.path.exists(TMP_PATH):
os.makedirs(TMP_PATH)
with open(os.path.join(TMP_PATH, 'README'), 'w') as f:
f.write('This directory was created automatically\n')
f.write('for unit-testing & can be safely deleted.\n')
def tearDown(self):
"""Delete files created for test logging."""
# Remove temporary directory.
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
def test_bad_input(self):
"""Test dump_to_csv() bad input."""
bad_path = os.path.join(LOG_PATH, 'UnitTestMessageC.log')
log_file = os.path.join(LOG_PATH, 'UnitTestMessageA.log')
csv_file = os.path.join(TMP_PATH, 'data.csv')
# Catch invalid path.
with self.assertRaises(IOError):
dump_to_csv(bad_path, csv_file, ['timestamp', 'data'])
# Catch non iterable keys.
with self.assertRaises(TypeError):
dump_to_csv(log_file, csv_file, 5)
# Catch non-string key.
with self.assertRaises(TypeError):
dump_to_csv(log_file, csv_file, ['error', 5])
# Catch non-existent key
with self.assertRaises(KeyError):
dump_to_csv(log_file, csv_file, ['error'])
def test_mixed_type(self):
"""Test dump_to_csv() mixed types."""
csv_file = os.path.join(TMP_PATH, 'data.csv')
with self.assertRaises(TypeError):
dump_to_csv(LOG_PATH, csv_file, ['timestamp', 'data'])
def test_dump(self):
"""Test dump_to_csv() can write data to CSV file."""
# Create paths to files.
log_file = os.path.join(LOG_PATH, 'UnitTestMessageA.log')
csv_file = os.path.join(TMP_PATH, 'data.csv')
# Dump data to CSV file.
keys = ['name', 'data', 'timestamp']
dump_to_csv(log_file, csv_file, keys)
# Read data from CSV file and reference.
with open(csv_file, 'r') as f:
write_data = f.read()
with open(os.path.join(LOG_PATH, 'UnitTestMessageA.csv'), 'r') as f:
expected_data = f.read()
# Ensure CSV data is in the expected format.
self.assertEqual(write_data, expected_data)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains TF-Slim code for training models.
This script contains various functions for training models. These include
manipulating gradients, creating a `train_op` (an operation that computes the
loss and applies the gradients) and a training loop function. The training loop
allows the user to pass in the `train_op` and runs the optimization according
to user-specified arguments. Note that the training loop uses the tf.Supervisor
and its managed_session in its implementation to ensure the ability of worker
processes to recover from failures.
************************************
* A simple working training script *
************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Run training.
slim.learning.train(train_op, my_log_dir)
*************************
* Creating the train_op *
*************************
In order to train, TF-Slim's train loop needs a train_op: an `Operation` that
(a) computes the loss, (b) applies the gradients to update the weights and
(c) returns the value of the loss. slim.learning.create_train_op creates
such an `Operation`. This function also provides the ability to manipulate
the gradients using a few arguments:
# Create the train_op and clip the gradient norms:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=4)
# Create the train_op and scale the gradients by providing a map from variable
# name (or variable) to a scaling coefficient:
gradient_multipliers = {
'conv0/weights': 1.2,
'fc8/weights': 3.4,
}
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
gradient_multipliers=gradient_multipliers)
****************************************************************
* Performing additional (non-gradient) updates during training *
****************************************************************
Many networks utilize modules, like BatchNorm, that require performing a series
of non-gradient updates during training. slim.learning.create_train_op allows
a user to pass in a list of update_ops to call along with the gradient updates.
train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)
By default, slim.learning.create_train_op includes all update ops that are
part of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's
slim.batch_norm function adds the moving mean and moving variance updates to
this collection. Consequently, users who want to use slim.batch_norm will not
need to take any additional steps in order to have the moving mean and moving
variance updates be computed.
However, users with additional, specialized updates can either override the
default update ops or simply add additional update ops to the
`tf.GraphKeys.UPDATE_OPS` collection:
# Force TF-Slim NOT to use ANY update_ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=[])
# Use an alternative set of update ops:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=my_other_update_ops)
# Use an alternative set of update ops in addition to the default updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)
train_op = slim.learning.create_train_op(
total_loss,
optimizer)
# Which is the same as:
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))
******************************************
* Initializing a model from a checkpoint *
******************************************
It is common to want to 'warm-start' a model from a pre-trained checkpoint.
TF-Slim provides a convenient mechanism for doing so:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the initial assignment op
checkpoint_path = '/path/to/old_model_checkpoint'
variables_to_restore = slim.get_model_variables()
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
***************************************************************************
* Initializing a model from a checkpoint whose variable names don't match *
***************************************************************************
At times, a user may want to initialize a new model with values from a
checkpoint whose variable names do not match those of the current model. In this
case, one needs to create a mapping from the checkpoint variable names to the
current model variables. This requires only a small modification of the code
above:
...
# Creates a model with two variables, var0 and var1
predictions = MyModel(images)
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Create the mapping:
variables_to_restore = {
'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),
'name_var_1_in_checkpoint': slim.get_unique_variable('var1')
}
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
*************************************************
* Fine-Tuning Part of a model from a checkpoint *
*************************************************
Rather than initializing all of the weights of a given model, we sometimes
only want to restore some of the weights from a checkpoint. To do this, one
need only filter those variables to initialize as follows:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
checkpoint_path = '/path/to/old_model_checkpoint'
# Specify the variables to restore via a list of inclusion or exclusion
# patterns:
variables_to_restore = slim.get_variables_to_restore(
include=["conv"], exclude=["fc8", "fc9])
# or
variables_to_restore = slim.get_variables_to_restore(exclude=["conv"])
init_assign_op, init_feed_dict = slim.assign_from_checkpoint(
checkpoint_path, variables_to_restore)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
******************************************************
* Initializing model variables from values in memory *
******************************************************
One may want to initialize the weights of a model from values from an arbitrary
source (a text document, matlab file, etc). While this is technically feasible
using plain TensorFlow, it also results in the values of your weights being
stored in the graph. For large models, this becomes prohibitively large. TF-Slim
allows you to perform this initial assignment without having to store the values
of the initial model in the graph itself by using placeholders and a feed
dictionary:
...
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Create the mapping from variable names to values:
var0_initial_value = ReadFromDisk(...)
var1_initial_value = ReadFromDisk(...)
var_names_to_values = {
'var0': var0_initial_value,
'var1': var1_initial_value,
}
init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)
# Create an initial assignment function.
def InitAssignFn(sess):
sess.run(init_assign_op, init_feed_dict)
# Run training.
slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.training.python.training import training
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import timeline
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as tf_optimizer
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'add_gradients_summaries', 'clip_gradient_norms', 'multiply_gradients',
'create_train_op', 'train_step', 'train'
]
def clip_gradient_norms(gradients_to_variables, max_norm):
"""Clips the gradients by the given value.
Args:
gradients_to_variables: A list of gradient to variable pairs (tuples).
max_norm: the maximum norm value.
Returns:
A list of clipped gradient to variable pairs.
"""
clipped_grads_and_vars = []
for grad, var in gradients_to_variables:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
tmp = clip_ops.clip_by_norm(grad.values, max_norm)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad = clip_ops.clip_by_norm(grad, max_norm)
clipped_grads_and_vars.append((grad, var))
return clipped_grads_and_vars
def multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
gradient_multipliers: A map from either `Variables` or `Variable` op names
to the coefficient by which the associated gradient should be scaled.
Returns:
The updated list of gradient to variable pairs.
Raises:
ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`
is empty or None or if `gradient_multipliers` is not a dictionary.
"""
if not isinstance(grads_and_vars, list):
raise ValueError('`grads_and_vars` must be a list.')
if not gradient_multipliers:
raise ValueError('`gradient_multipliers` is empty.')
if not isinstance(gradient_multipliers, dict):
raise ValueError('`gradient_multipliers` must be a dict.')
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if var in gradient_multipliers or var.op.name in gradient_multipliers:
key = var if var in gradient_multipliers else var.op.name
if grad is None:
raise ValueError('Requested multiple of `None` gradient.')
if isinstance(grad, ops.IndexedSlices):
tmp = grad.values * constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)
else:
grad *= constant_op.constant(
gradient_multipliers[key], dtype=grad.dtype)
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
def add_gradients_summaries(grads_and_vars):
"""Add summaries to gradients.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The list of created summaries.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(
summary.histogram(var.op.name + '/gradient', grad_values))
summaries.append(
summary.scalar(var.op.name + '/gradient_norm',
clip_ops.global_norm([grad_values])))
else:
logging.info('Var %s has no gradient', var.op.name)
return summaries
_USE_GLOBAL_STEP = 0
def create_train_op(total_loss,
optimizer,
global_step=_USE_GLOBAL_STEP,
update_ops=None,
variables_to_train=None,
clip_gradient_norm=0,
summarize_gradients=False,
gate_gradients=tf_optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
gradient_multipliers=None):
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
global_step: A `Tensor` representing the global step variable. If left as
`_USE_GLOBAL_STEP`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
`tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but
it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,
a warning will be displayed.
variables_to_train: an optional list of variables to train. If None, it will
default to all tf.trainable_variables().
clip_gradient_norm: If greater than 0 then the gradients would be clipped
by it.
summarize_gradients: Whether or not add summaries for each gradient.
gate_gradients: How to gate the computation of gradients. See tf.Optimizer.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: Whether or not to try colocating the gradients
with the ops that generated them.
gradient_multipliers: A dictionary of either `Variables` or `Variable` op
names to the coefficient by which the associated gradient should be
scaled.
Returns:
A `Tensor` that when evaluated, computes the gradients and returns the total
loss value.
"""
def transform_grads_fn(grads):
if gradient_multipliers:
with ops.name_scope('multiply_grads'):
grads = multiply_gradients(grads, gradient_multipliers)
# Clip gradients.
if clip_gradient_norm > 0:
with ops.name_scope('clip_grads'):
grads = clip_gradient_norms(grads, clip_gradient_norm)
return grads
return training.create_train_op(
total_loss=total_loss,
optimizer=optimizer,
global_step=global_step,
update_ops=update_ops,
variables_to_train=variables_to_train,
transform_grads_fn=transform_grads_fn,
summarize_gradients=summarize_gradients,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
def _wait_for_step(sess, global_step, step):
"""Wait till the global step has reached at least 'step'.
Args:
sess: A session.
global_step: A Tensor.
step: Int. The global step to reach.
"""
while True:
if training_util.global_step(sess, global_step) >= step:
break
time.sleep(1.0)
def train_step(sess, train_op, global_step, train_step_kwargs):
"""Function that takes a gradient step and specifies whether to stop.
Args:
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
The total loss and a boolean indicating whether or not to stop training.
Raises:
ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.
"""
start_time = time.time()
trace_run_options = None
run_metadata = None
if 'should_trace' in train_step_kwargs:
if 'logdir' not in train_step_kwargs:
raise ValueError('logdir must be present in train_step_kwargs when '
'should_trace is present')
if sess.run(train_step_kwargs['should_trace']):
trace_run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
total_loss, np_global_step = sess.run([train_op, global_step],
options=trace_run_options,
run_metadata=run_metadata)
time_elapsed = time.time() - start_time
if run_metadata is not None:
tl = timeline.Timeline(run_metadata.step_stats)
trace = tl.generate_chrome_trace_format()
trace_filename = os.path.join(train_step_kwargs['logdir'],
'tf_trace-%d.json' % np_global_step)
logging.info('Writing trace to %s', trace_filename)
file_io.write_string_to_file(trace_filename, trace)
if 'summary_writer' in train_step_kwargs:
train_step_kwargs['summary_writer'].add_run_metadata(run_metadata,
'run_metadata-%d' %
np_global_step)
if 'should_log' in train_step_kwargs:
if sess.run(train_step_kwargs['should_log']):
logging.info('global step %d: loss = %.4f (%.2f sec/step)',
np_global_step, total_loss, time_elapsed)
# TODO(nsilberman): figure out why we can't put this into sess.run. The
# issue right now is that the stop check depends on the global step. The
# increment of global step often happens via the train op, which used
# created using optimizer.apply_gradients.
#
# Since running `train_op` causes the global step to be incremented, one
# would expected that using a control dependency would allow the
# should_stop check to be run in the same session.run call:
#
# with ops.control_dependencies([train_op]):
# should_stop_op = ...
#
# However, this actually seems not to work on certain platforms.
if 'should_stop' in train_step_kwargs:
should_stop = sess.run(train_step_kwargs['should_stop'])
else:
should_stop = False
return total_loss, should_stop
_USE_DEFAULT = 0
def train(train_op,
logdir,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training.
If the value is left as None, training proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer. If the
argument is supplied, gradient updates will be synchronous. If left as
`None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
if train_op is None:
raise ValueError('train_op cannot be None.')
if logdir is None:
if summary_op != _USE_DEFAULT:
raise ValueError('Cannot provide summary_op because logdir=None')
if saver is not None:
raise ValueError('Cannot provide saver because logdir=None')
if trace_every_n_steps is not None:
raise ValueError('Cannot provide trace_every_n_steps because '
'logdir=None')
if sync_optimizer is not None and startup_delay_steps > 0:
raise ValueError(
'startup_delay_steps must be zero when sync_optimizer is supplied.')
if number_of_steps is not None and number_of_steps <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
graph = graph or ops.get_default_graph()
with graph.as_default():
if global_step is None:
global_step = variables.get_or_create_global_step()
saver = saver or tf_saver.Saver()
with ops.name_scope('init_ops'):
if init_op == _USE_DEFAULT:
init_op = tf_variables.global_variables_initializer()
if ready_op == _USE_DEFAULT:
ready_op = tf_variables.report_uninitialized_variables()
if local_init_op == _USE_DEFAULT:
local_init_op = control_flow_ops.group(
tf_variables.local_variables_initializer(),
data_flow_ops.tables_initializer())
if sync_optimizer is not None and isinstance(
sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
with ops.control_dependencies([local_init_op] if local_init_op is
not None else []):
if is_chief:
local_init_op = sync_optimizer.chief_init_op
else:
local_init_op = sync_optimizer.local_step_init_op
ready_for_local_init_op = sync_optimizer.ready_for_local_init_op
else:
ready_for_local_init_op = None
if summary_op == _USE_DEFAULT:
summary_op = summary.merge_all()
if summary_writer == _USE_DEFAULT:
summary_writer = supervisor.Supervisor.USE_DEFAULT
cleanup_op = None
if is_chief and sync_optimizer is not None:
if not isinstance(sync_optimizer,
(sync_replicas_optimizer.SyncReplicasOptimizer)):
raise ValueError(
'`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = sync_optimizer.get_init_tokens_op()
chief_queue_runner = sync_optimizer.get_chief_queue_runner()
if isinstance(sync_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
cleanup_op = sync_optimizer.get_clean_up_op()
if train_step_kwargs == _USE_DEFAULT:
with ops.name_scope('train_step'):
train_step_kwargs = {}
if number_of_steps:
should_stop_op = math_ops.greater_equal(global_step, number_of_steps)
else:
should_stop_op = constant_op.constant(False)
train_step_kwargs['should_stop'] = should_stop_op
train_step_kwargs['should_log'] = math_ops.equal(
math_ops.mod(global_step, log_every_n_steps), 0)
if is_chief and trace_every_n_steps is not None:
train_step_kwargs['should_trace'] = math_ops.equal(
math_ops.mod(global_step, trace_every_n_steps), 0)
train_step_kwargs['logdir'] = logdir
sv = supervisor.Supervisor(
graph=graph,
is_chief=is_chief,
logdir=logdir,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
ready_op=ready_op,
summary_op=summary_op,
summary_writer=summary_writer,
global_step=global_step,
saver=saver,
save_summaries_secs=save_summaries_secs,
save_model_secs=save_interval_secs,
init_fn=init_fn)
if summary_writer is not None:
train_step_kwargs['summary_writer'] = sv.summary_writer
should_retry = True
while should_retry:
try:
should_retry = False
with sv.managed_session(
master, start_standard_services=False, config=session_config) as sess:
logging.info('Starting Session.')
if is_chief:
if logdir:
sv.start_standard_services(sess)
elif startup_delay_steps > 0:
_wait_for_step(sess, global_step,
min(startup_delay_steps, number_of_steps or
sys.maxint))
sv.start_queue_runners(sess)
logging.info('Starting Queues.')
if is_chief and sync_optimizer is not None:
sv.start_queue_runners(sess, [chief_queue_runner])
sess.run(init_tokens_op)
try:
try:
while not sv.should_stop():
total_loss, should_stop = train_step_fn(
sess, train_op, global_step, train_step_kwargs)
if should_stop:
logging.info('Stopping Training.')
break
except errors.OutOfRangeError:
# OutOfRangeError is thrown when epoch limit per
# tf.train.limit_epochs is reached.
logging.info('Caught OutOfRangeError. Stopping Training.')
if logdir and sv.is_chief:
logging.info('Finished training! Saving model to disk.')
sv.saver.save(sess, sv.save_path, global_step=sv.global_step)
except:
if sv.is_chief and cleanup_op is not None:
logging.info('About to execute sync_clean_up_op!')
sess.run(cleanup_op)
raise
except errors.AbortedError:
# Always re-run on AbortedError as it indicates a restart of one of the
# distributed tensorflow servers.
logging.info('Retrying training!')
should_retry = True
return total_loss
|
|
import sys
from os import path
from datetime import datetime
from fabric.api import sudo, put, env, run, settings, prompt, task, hide, puts, show, warn, cd
from fabric.contrib.files import upload_template
from ezjailremote.utils import kwargs2commandline, jexec, get_flavour, is_ip
EZJAIL_JAILDIR = '/usr/jails'
EZJAIL_RC = '/usr/local/etc/rc.d/ezjail'
EZJAIL_ADMIN = '/usr/local/bin/ezjail-admin'
env['shell'] = '/bin/sh -c'
# output['output'] = False
# output['running'] = False
@task
def bootstrap(admin=None,
keyfile=None,
primary_ip=None):
""" assuming we have ssh access as root sets up permanent ssh access, creates the admin user with
ssh access and sudo privileges, then shuts out root login again.
admin: username for the admin account, defaults to your local username
keyfile: full path to your public SSH key, defaults to ~/.ssh/identity.pub
primary_ip: the IP address for which to configure the jailhost, can be omitted if the host is given
as an IP address with the -H parameter
"""
# force user to root
orig_user = env['user']
env['user'] = 'root'
# check for admin user and key:
if admin is None:
admin = env['local_user']
if keyfile is None:
keyfile = path.expanduser("~/.ssh/identity.pub")
if not path.exists(keyfile):
sys.exit("No such keyfile '%s'" % keyfile)
pkg_info = run("pkg_info")
with settings(hide("everything"), warn_only=True):
user_info = run("pw usershow %s" % admin)
if primary_ip is None and is_ip.match(env['host']):
primary_ip = env['host']
if primary_ip is None:
warn("No primary IP address specified!")
else:
run("grep -v ListenAddress /etc/ssh/sshd_config > /etc/ssh/sshd_config.tmp")
run("echo 'ListenAddress %s' >> /etc/ssh/sshd_config.tmp" % primary_ip)
run("mv /etc/ssh/sshd_config /etc/ssh/sshd_config.bak")
run("mv /etc/ssh/sshd_config.tmp /etc/ssh/sshd_config")
# prevent syslogd listening on any addresses (to avoid warnings at jail startup)
run("echo syslogd_flags='-ss' >> /etc/rc.conf")
# enable ezjail
run("echo ezjail_enable='YES' >> /etc/rc.conf")
# create admin user
if "sudo" not in pkg_info:
puts("Installing sudo")
run("pkg_add -r sudo")
if "no such user" in user_info:
puts("Creating admin user %s" % admin)
run("pw useradd -n %(admin)s -u 1001 -m -d /home/%(admin)s -G wheel" % dict(admin=admin))
ssh_config = path.join('/', 'usr', 'home', admin, '.ssh')
run("mkdir -p %s" % ssh_config)
run("chown -R %s %s" % (admin, ssh_config))
remote_keyfile = path.join(ssh_config, 'authorized_keys')
put(keyfile, remote_keyfile)
run("echo '%wheel ALL=(ALL) NOPASSWD: ALL' >> /usr/local/etc/sudoers")
else:
puts("Not touching existing user %s" % admin)
# disable root login
puts("Setting up ssh login")
run("grep -v PermitRootLogin /etc/ssh/sshd_config > /etc/ssh/sshd_config.tmp")
run("echo 'PermitRootLogin no' >> /etc/ssh/sshd_config.tmp")
run("mv /etc/ssh/sshd_config /etc/ssh/sshd_config.bak")
run("mv /etc/ssh/sshd_config.tmp /etc/ssh/sshd_config")
run("echo sshd_enable='YES' >> /etc/rc.conf")
run("/etc/rc.d/sshd restart")
puts("You now should be able to login with `ssh %s`" % primary_ip)
env['user'] = orig_user
@task
def install(source='pkg_add', jailzfs=None, **kw):
""" assuming bootstrap has been run, install ezjail and run ezjail-admin install.
if `source` is 'pkg_add' it installs a binary package, if it's 'cvs' it install from current CVS:
if `jailzfs` is set, assume using ZFS and set the jailzfs path in ezjails configuration.
all **kw are passed to `ezjail-admin install`. i.e. to install with ports (`-p`):
ezjail-remote install:p=True
"""
# install ezjail
pkg_info = run("pkg_info")
if "ezjail" not in pkg_info:
puts("Installing ezjail (this could take a while")
if source == 'cvs':
run("cvs -d :pserver:anoncvs@cvs.erdgeist.org:/home/cvsroot co ezjail")
with cd("ezjail"):
sudo("make install")
else:
sudo("pkg_add -r ezjail")
sudo("cp /usr/local/etc/ezjail.conf.sample /usr/local/etc/ezjail.conf")
if jailzfs:
sudo("""echo 'ezjail_use_zfs="YES"' >> /usr/local/etc/ezjail.conf""")
sudo("""echo 'ezjail_use_zfs_for_jails="YES"' >> /usr/local/etc/ezjail.conf""")
sudo("""echo 'ezjail_jailzfs="%s"' >> /usr/local/etc/ezjail.conf""" % jailzfs)
# run ezjail's install command
install_basejail = "%s install%s" % (EZJAIL_ADMIN, kwargs2commandline(kw,
boolflags=['p', 'P', 'm', 'M', 's', 'S']))
sudo(install_basejail)
sudo("echo 'ezjail_enable=YES' >> /etc/rc.conf")
else:
puts('ezjail already installed. not doing anything!')
@task
def create(name,
ip,
admin=None,
keyfile=None,
flavour=None,
ctype=None,
sshd=True,
**kw):
"""<name>,<ip>(,<admin>,<keyfile>,flavour)
Create a jail instance with the given name and IP address.
Configures ssh access for the given admin user and ssh key.
admin: defaults to the current user
keyfile: defaults to ~/.ssh/identity.pub
flavour: defaults to 'basic' and refers to a LOCAL flavour, NOT any on the host
ctype: defaults to None and refers to the `-c` flag, meaning, you can set it to `simple`, `bde`, `eli` or `zfs`.
sshd: if True, enable sshd, upload and enable a public key
any additional keyword arguments are passed to the flavour
"""
if admin is None:
admin = env['local_user']
if keyfile is None:
keyfile = path.expanduser("~/.ssh/identity.pub")
if not path.exists(keyfile) and sshd:
sys.exit("No such keyfile '%s'" % keyfile)
print("name: %s, ip: %s, flavour: %s" % (name, ip, flavour))
from ezjailremote.flavours import basic
local_flavour_path = path.abspath(path.dirname(basic.__file__))
with settings(show("output"), warn_only=True):
tmp_flavour = 'basic-%s' % datetime.now().strftime('%Y%m%d%H%M%s')
remote_flavour_path = path.join(EZJAIL_JAILDIR, 'flavours', tmp_flavour)
sudo("mkdir -p %s" % remote_flavour_path)
sudo("chown %s %s" % (env['user'], remote_flavour_path))
put("%s/*" % local_flavour_path, remote_flavour_path)
local_flavour_script = path.join(local_flavour_path, 'ezjail.flavour')
upload_template(local_flavour_script,
path.join(remote_flavour_path, 'ezjail.flavour'),
context=locals(), backup=False)
# create the jail using the uploaded flavour
if ctype:
ctype = ' -c %s' % ctype
else:
ctype = ''
create_jail = sudo("%s create -f %s%s %s %s" % (EZJAIL_ADMIN, tmp_flavour, ctype, name, ip))
if create_jail.succeeded:
jail_path = path.join(EZJAIL_JAILDIR, name)
# copy resolv.conf from host
sudo("cp /etc/resolv.conf %s" % path.join(jail_path, 'etc', 'resolv.conf'))
# configure sshd
if sshd:
# copy the key file into flavour
ssh_config = path.join(jail_path, 'home', admin, '.ssh')
sudo("mkdir -p %s" % ssh_config)
remote_keyfile = path.join(ssh_config, 'authorized_keys')
put(keyfile, remote_keyfile)
sudo("chown -R %s %s" % (admin, ssh_config))
sudo("""echo 'sshd_enable="YES"' >> %s""" % path.join(jail_path, 'etc', 'rc.conf'))
sudoers = path.join(jail_path, 'usr', 'local', 'etc', 'sudoers')
sudo("chown 0 %s" % sudoers)
sudo("chmod 0440 %s" % sudoers)
# start up the jail:
sudo("%s start %s" % (EZJAIL_RC, name))
# perform any additional setup the flavour may provide
if flavour is not None:
jexec(ip, apply_flavour, flavour, **kw)
sudo("rm -rf %s" % remote_flavour_path)
@task
def apply_flavour(flavour, *args, **kwargs):
flavour_module = get_flavour(flavour)
if hasattr(flavour_module, 'setup'):
flavour_module.setup(*args, **kwargs)
@task
def show_info():
with settings(show("output"), warn_only=True):
run("hostname")
# run("ifconfig")
@task
def destroy(name):
"""<name>"""
really = prompt('Are you ABSOLUTELY sure you want to destroy the jail %s?\n'
'The jail will be stopped if running, deleted from ezjail and on the filesystem!!\n'
'Type YES to continue:' % name)
if really != 'YES':
sys.exit("Glad I asked...!")
sudo("%s delete -fw %s" % (EZJAIL_ADMIN, name))
@task(default=True, aliases=['archive', 'config', 'console', 'delete', 'list', 'restore', 'update', 'start', 'stop'])
def usage(*xargs, **kw):
"""(passed directly to ezjail-admin)"""
command = env.get('command')
if command == 'usage':
command = '--help'
args_string = ''
for item in kw.items():
args_string += '%s %s ' % item
with show('output'):
sudo("%s %s %s %s" % (EZJAIL_ADMIN, command, args_string, ' '.join(xargs)))
@task
def jls():
with show('output'):
run("jls")
|
|
# -*- coding: utf-8 -*-
import os
import six
import sys
import time
import traceback
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.backends import utils
from django.utils.six import PY3
from django_extensions_shell.management.shells import import_objects
from django_extensions_shell.management.utils import signalcommand
def use_vi_mode():
editor = os.environ.get('EDITOR')
if not editor:
return False
editor = os.path.basename(editor)
return editor.startswith('vi') or editor.endswith('vim')
class Command(BaseCommand):
help = "Like the 'shell' command but autoloads the models of all installed Django apps."
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not BPython nor IPython.')
parser.add_argument(
'--bpython', action='store_true', dest='bpython',
help='Tells Django to use BPython, not IPython.')
parser.add_argument(
'--ptpython', action='store_true', dest='ptpython',
help='Tells Django to use PTPython, not IPython.')
parser.add_argument(
'--ptipython', action='store_true', dest='ptipython',
help='Tells Django to use PT-IPython, not IPython.')
parser.add_argument(
'--ipython', action='store_true', dest='ipython',
help='Tells Django to use IPython, not BPython.')
parser.add_argument(
'--notebook', action='store_true', dest='notebook',
help='Tells Django to use IPython Notebook.')
parser.add_argument(
'--kernel', action='store_true', dest='kernel',
help='Tells Django to start an IPython Kernel.')
parser.add_argument('--connection-file', action='store', dest='connection_file',
help='Specifies the connection file to use if using the --kernel option'),
parser.add_argument(
'--use-pythonrc', action='store_true', dest='use_pythonrc',
help='Tells Django to execute PYTHONSTARTUP file '
'(BE CAREFULL WITH THIS!)')
parser.add_argument(
'--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed")
parser.add_argument(
'--dont-load', action='append', dest='dont_load', default=[],
help='Ignore autoloading of some apps/models. Can be used '
'several times.')
parser.add_argument(
'--quiet-load', action='store_true', default=False,
dest='quiet_load', help='Do not display loaded models messages')
parser.add_argument(
'--vi', action='store_true', default=use_vi_mode(), dest='vi_mode',
help='Load Vi key bindings (for --ptpython and --ptipython)')
parser.add_argument(
'--no-browser', action='store_true', default=False,
dest='no_browser',
help='Don\'t open the notebook in a browser after startup.')
def get_ipython_arguments(self, options):
return getattr(settings, 'IPYTHON_ARGUMENTS', [])
def get_notebook_arguments(self, options):
return getattr(settings, 'NOTEBOOK_ARGUMENTS', [])
@signalcommand
def handle(self, *args, **options):
use_kernel = options.get('kernel', False)
use_notebook = options.get('notebook', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
use_plain = options.get('plain', False)
use_ptpython = options.get('ptpython', False)
use_ptipython = options.get('ptipython', False)
use_pythonrc = options.get('use_pythonrc', True)
no_browser = options.get('no_browser', False)
verbosity = int(options.get('verbosity', 1))
print_sql = getattr(settings, 'SHELL_PLUS_PRINT_SQL', False)
if options.get("print_sql", False) or print_sql:
# Code from http://gist.github.com/118990
sqlparse = None
try:
import sqlparse
except ImportError:
pass
class PrintQueryWrapper(utils.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
execution_time = time.time() - starttime
raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params)
if sqlparse:
print(sqlparse.format(raw_sql, reindent=True))
else:
print(raw_sql)
print("")
print('Execution time: %.6fs [Database: %s]' % (execution_time, self.db.alias))
print("")
utils.CursorDebugWrapper = PrintQueryWrapper
def get_kernel():
try:
from IPython import release
if release.version_info[0] < 2:
print(self.style.ERROR("--kernel requires at least IPython version 2.0"))
return
from IPython import start_kernel
except ImportError:
return traceback.format_exc()
def run_kernel():
imported_objects = import_objects(options, self.style)
kwargs = dict(
argv=[],
user_ns=imported_objects,
)
connection_file = options.get('connection_file')
if connection_file:
kwargs['connection_file'] = connection_file
start_kernel(**kwargs)
return run_kernel
def get_notebook():
from IPython import release
try:
from notebook.notebookapp import NotebookApp
except ImportError:
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
if release.version_info[0] >= 3:
raise
try:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
except ImportError:
return traceback.format_exc()
def install_kernel_spec(app, display_name, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads django extensions"""
ksm = app.kernel_spec_manager
try_spec_names = getattr(settings, 'NOTEBOOK_KERNEL_SPEC_NAMES', [
'python3' if PY3 else 'python2',
'python',
])
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
ks = None
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except:
continue
if not ks:
raise CommandError("No notebook (Python) kernel specs found")
ks.argv.extend(ipython_arguments)
ks.display_name = display_name
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd():
pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath))
kernel_dir = os.path.join(ksm.user_kernel_dir, 'django_extensions_shell')
if not os.path.exists(kernel_dir):
os.makedirs(kernel_dir)
with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f:
f.write(ks.to_json())
def run_notebook():
app = NotebookApp.instance()
# Treat IPYTHON_ARGUMENTS from settings
ipython_arguments = self.get_ipython_arguments(options)
if 'django_extensions_shell.management.notebook_extension' not in ipython_arguments:
ipython_arguments.extend(['--ext', 'django_extensions_shell.management.notebook_extension'])
# Treat NOTEBOOK_ARGUMENTS from settings
notebook_arguments = self.get_notebook_arguments(options)
if no_browser and '--no-browser' not in notebook_arguments:
notebook_arguments.append('--no-browser')
if '--notebook-dir' not in notebook_arguments:
notebook_arguments.extend(['--notebook-dir', '.'])
# IPython < 3 passes through kernel args from notebook CLI
if release.version_info[0] < 3:
notebook_arguments.extend(ipython_arguments)
app.initialize(notebook_arguments)
# IPython >= 3 uses kernelspecs to specify kernel CLI args
if release.version_info[0] >= 3:
display_name = getattr(settings, 'IPYTHON_KERNEL_DISPLAY_NAME', "Django Shell-Plus")
install_kernel_spec(app, display_name, ipython_arguments)
app.start()
return run_notebook
def get_plain():
# Using normal Python shell
import code
imported_objects = import_objects(options, self.style)
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if use_pythonrc:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
global_ns = {}
with open(pythonrc) as rcfile:
try:
six.exec_(compile(rcfile.read(), pythonrc, 'exec'), global_ns)
imported_objects.update(global_ns)
except NameError:
pass
# This will import .pythonrc.py as a side-effect
try:
import user # NOQA
except ImportError:
pass
def run_plain():
code.interact(local=imported_objects)
return run_plain
def get_bpython():
try:
from bpython import embed
except ImportError:
return traceback.format_exc()
def run_bpython():
imported_objects = import_objects(options, self.style)
embed(imported_objects)
return run_bpython
def get_ipython():
try:
from IPython import start_ipython
def run_ipython():
imported_objects = import_objects(options, self.style)
ipython_arguments = self.get_ipython_arguments(options)
start_ipython(argv=ipython_arguments, user_ns=imported_objects)
return run_ipython
except ImportError:
str_exc = traceback.format_exc()
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
try:
from IPython.Shell import IPShell
except ImportError:
return str_exc + "\n" + traceback.format_exc()
def run_ipython():
imported_objects = import_objects(options, self.style)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
return run_ipython
def get_ptpython():
try:
from ptpython.repl import embed, run_config
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import embed, run_config
except ImportError:
return tb
def run_ptpython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(globals=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptpython
def get_ptipython():
try:
from ptpython.repl import run_config
from ptpython.ipython import embed
except ImportError:
tb = traceback.format_exc()
try: # prompt_toolkit < v0.27
from prompt_toolkit.contrib.repl import run_config
from prompt_toolkit.contrib.ipython import embed
except ImportError:
return tb
def run_ptipython():
imported_objects = import_objects(options, self.style)
history_filename = os.path.expanduser('~/.ptpython_history')
embed(user_ns=imported_objects, history_filename=history_filename,
vi_mode=options.get('vi_mode', False), configure=run_config)
return run_ptipython
def set_application_name():
"""Set the application_name on PostgreSQL connection
Use the fallback_application_name to let the user override
it with PGAPPNAME env variable
http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa
"""
supported_backends = ['django.db.backends.postgresql_psycopg2']
opt_name = 'fallback_application_name'
default_app_name = 'django_shell'
app_name = default_app_name
dbs = getattr(settings, 'DATABASES', [])
# lookup over all the databases entry
for db in dbs.keys():
if dbs[db]['ENGINE'] in supported_backends:
try:
options = dbs[db]['OPTIONS']
except KeyError:
options = {}
# dot not override a defined value
if opt_name in options.keys():
app_name = dbs[db]['OPTIONS'][opt_name]
else:
dbs[db].setdefault('OPTIONS', {}).update({opt_name: default_app_name})
app_name = default_app_name
return app_name
shells = (
('ptipython', get_ptipython),
('ptpython', get_ptpython),
('bpython', get_bpython),
('ipython', get_ipython),
('plain', get_plain),
)
SETTINGS_SHELL_PLUS = getattr(settings, 'SHELL_PLUS', None)
shell = None
shell_name = "any"
set_application_name()
if use_kernel:
shell = get_kernel()
shell_name = "IPython Kernel"
elif use_notebook:
shell = get_notebook()
shell_name = "IPython Notebook"
elif use_plain:
shell = get_plain()
shell_name = "plain"
elif use_ipython:
shell = get_ipython()
shell_name = "IPython"
elif use_bpython:
shell = get_bpython()
shell_name = "BPython"
elif use_ptpython:
shell = get_ptpython()
shell_name = "ptpython"
elif use_ptipython:
shell = get_ptipython()
shell_name = "ptipython"
elif SETTINGS_SHELL_PLUS:
shell_name = SETTINGS_SHELL_PLUS
shell = dict(shells)[shell_name]()
else:
for shell_name, func in shells:
shell = func()
if callable(shell):
if verbosity > 1:
print(self.style.NOTICE("Using shell %s." % shell_name))
break
if not callable(shell):
if shell:
print(shell)
print(self.style.ERROR("Could not load %s interactive Python environment." % shell_name))
return
shell()
|
|
import sys
from math import sqrt
import numpy as np
from scikits.audiolab import Sndfile, Format
try:
import libxmp
LIBXMP = True
except:
LIBXMP = False
from rawtrack import RawTrack
from fade import Fade
from segment import Segment
from volume import Volume
from ..utils import equal_power, RMS_energy, segment_array, wav_to_mp3
import radiotool.utils
class Composition(object):
"""
Create a composition made up of bits of different audio
tracks.
"""
def __init__(self, tracks=None, channels=2, segments=None, dynamics=None,
labels=None):
"""Initialize a composition with optional starting tracks/segments.
:param tracks: Initial tracks in the composition
:type tracks: list of :py:class:`radiotool.composer.Track`
:param channels: Number of channels in the composition
:type channels: integer
:param segments: Initial segments in the composition
:type segments: list of :py:class:`radiotool.composer.Segment`
:param dynamics: Initial dynamics in the composition
:type dynamics: list of :py:class:`radiotool.composer.Dynamic`
:returns: A new composition
:rtype: Composition
"""
if tracks is None:
self.tracks = set()
else:
self.tracks = set(tracks)
if segments is None:
self.segments = []
else:
self.segments = list(segments)
if dynamics is None:
self.dynamics = []
else:
self.dynamics = list(dynamics)
if labels is None:
self.labels = []
else:
self.labels = list(labels)
self.channels = channels
@property
def duration(self):
"""Get duration of composition
"""
return max([x.comp_location + x.duration
for x in self.segments])
def add_track(self, track):
"""Add track to the composition
:param track: Track to add to composition
:type track: :py:class:`radiotool.composer.Track`
"""
self.tracks.add(track)
def add_tracks(self, tracks):
"""Add a list of tracks to the composition
:param tracks: Tracks to add to composition
:type tracks: list of :py:class:`radiotool.composer.Track`
"""
self.tracks.update(tracks)
def add_segment(self, segment):
"""Add a segment to the composition
:param segment: Segment to add to composition
:type segment: :py:class:`radiotool.composer.Segment`
"""
self.tracks.add(segment.track)
self.segments.append(segment)
def add_segments(self, segments):
"""Add a list of segments to the composition
:param segments: Segments to add to composition
:type segments: list of :py:class:`radiotool.composer.Segment`
"""
self.tracks.update([seg.track for seg in segments])
self.segments.extend(segments)
def add_dynamic(self, dyn):
"""Add a dynamic to the composition
:param dyn: Dynamic to add to composition
:type dyn: :py:class:`radiotool.composer.Dynamic`
"""
self.dynamics.append(dyn)
def add_dynamics(self, dyns):
"""Add a list of dynamics to the composition
:param dyns: Dynamics to add to composition
:type dyns: list of :py:class:`radiotool.composer.Dynamic`
"""
self.dynamics.extend(dyns)
def add_label(self, label):
"""Add a label to the composition
:param label: Label to add to composition
:type label: :py:class:`radiotool.composer.Label`
"""
self.labels.append(label)
def add_labels(self, labels):
"""Add a list of labels to the composition
:param labels: List of labels to add to composition
:type label: list of :py:class:`radiotool.composer.Label`
"""
self.labels.extend(labels)
def fade_in(self, segment, duration, fade_type="linear"):
"""Adds a fade in to a segment in the composition
:param segment: Segment to fade in to
:type segment: :py:class:`radiotool.composer.Segment`
:param duration: Duration of fade-in (in seconds)
:type duration: float
:returns: The fade that has been added to the composition
:rtype: :py:class:`Fade`
"""
f = Fade(segment.track, segment.comp_location_in_seconds,
duration, 0.0, 1.0, fade_type=fade_type)
self.add_dynamic(f)
return f
def fade_out(self, segment, duration, fade_type="linear"):
"""Adds a fade out to a segment in the composition
:param segment: Segment to fade out
:type segment: :py:class:`radiotool.composer.Segment`
:param duration: Duration of fade-out (in seconds)
:type duration: float
:returns: The fade that has been added to the composition
:rtype: :py:class:`Fade`
"""
score_loc_in_seconds = segment.comp_location_in_seconds +\
segment.duration_in_seconds - duration
f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0,
fade_type=fade_type)
# bug fixing... perhaps
f.comp_location = segment.comp_location + segment.duration -\
int(duration * segment.track.samplerate)
self.add_dynamic(f)
return f
def extended_fade_in(self, segment, duration):
"""Add a fade-in to a segment that extends the beginning of the
segment.
:param segment: Segment to fade in
:type segment: :py:class:`radiotool.composer.Segment`
:param duration: Duration of fade-in (in seconds)
:returns: The fade that has been added to the composition
:rtype: :py:class:`Fade`
"""
dur = int(duration * segment.track.samplerate)
if segment.start - dur >= 0:
segment.start -= dur
else:
raise Exception(
"Cannot create fade-in that extends "
"past the track's beginning")
if segment.comp_location - dur >= 0:
segment.comp_location -= dur
else:
raise Exception(
"Cannot create fade-in the extends past the score's beginning")
segment.duration += dur
f = Fade(segment.track, segment.comp_location_in_seconds,
duration, 0.0, 1.0)
self.add_dynamic(f)
return f
def extended_fade_out(self, segment, duration):
"""Add a fade-out to a segment that extends the beginning of the
segment.
:param segment: Segment to fade out
:type segment: :py:class:`radiotool.composer.Segment`
:param duration: Duration of fade-out (in seconds)
:returns: The fade that has been added to the composition
:rtype: :py:class:`Fade`
"""
dur = int(duration * segment.track.samplerate)
if segment.start + segment.duration + dur <\
segment.track.duration:
segment.duration += dur
else:
raise Exception(
"Cannot create fade-out that extends past the track's end")
score_loc_in_seconds = segment.comp_location_in_seconds +\
segment.duration_in_seconds - duration
f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0)
self.add_dynamic(f)
return f
def cross_fade(self, seg1, seg2, duration):
"""Add a linear crossfade to the composition between two
segments.
:param seg1: First segment (fading out)
:type seg1: :py:class:`radiotool.composer.Segment`
:param seg2: Second segment (fading in)
:type seg2: :py:class:`radiotool.composer.Segment`
:param duration: Duration of crossfade (in seconds)
"""
if seg1.comp_location + seg1.duration - seg2.comp_location < 2:
dur = int(duration * seg1.track.samplerate)
if dur % 2 == 1:
dur -= 1
if dur / 2 > seg1.duration:
dur = seg1.duration * 2
if dur / 2 > seg2.duration:
dur = seg2.duration * 2
# we're going to compute the crossfade and then create a RawTrack
# for the resulting frames
if seg2.start - (dur / 2) < 0:
diff = seg2.start
seg2.start = 0
seg2.duration -= diff
seg2.comp_location -= diff
dur = 2 * diff
else:
seg2.start -= (dur / 2)
seg2.duration += (dur / 2)
seg2.comp_location -= (dur / 2)
seg1.duration += (dur / 2)
out_frames = seg1.get_frames(channels=self.channels)[-dur:]
seg1.duration -= dur
in_frames = seg2.get_frames(channels=self.channels)[:dur]
seg2.start += dur
seg2.duration -= dur
seg2.comp_location += dur
# compute the crossfade
in_frames = in_frames[:min(map(len, [in_frames, out_frames]))]
out_frames = out_frames[:min(map(len, [in_frames, out_frames]))]
cf_frames = radiotool.utils.linear(out_frames, in_frames)
# cf_frames = equal_power(out_frames, in_frames)
raw_track = RawTrack(cf_frames, name="crossfade",
samplerate=seg1.track.samplerate)
rs_comp_location = (seg1.comp_location + seg1.duration) /\
float(seg1.track.samplerate)
rs_duration = raw_track.duration / float(raw_track.samplerate)
raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration)
# will this fix a bug?
raw_seg.duration = raw_track.duration
raw_seg.comp_location = seg1.comp_location + seg1.duration
self.add_track(raw_track)
self.add_segment(raw_seg)
return raw_seg
else:
print seg1.comp_location + seg1.duration, seg2.comp_location
raise Exception("Segments must be adjacent"
"to add a crossfade ({}, {})".format(
seg1.comp_location + seg1.duration,
seg2.comp_location))
def cross_fade_linear(self, seg1, seg2, duration):
if seg1.comp_location + seg1.duration - seg2.comp_location < 2:
self.extended_fade_out(seg1, duration)
self.fade_in(seg2, duration)
# self.extended_fade_in(seg2, duration)
else:
print seg1.comp_location + seg1.duration, seg2.comp_location
raise Exception("Segments must be adjacent to add a crossfade (%d, %d)"
% (seg1.comp_location + seg1.duration, seg2.comp_location))
def empty_over_span(self, time, duration):
"""Helper method that tests whether composition contains any segments
at a given time for a given duration.
:param time: Time (in seconds) to start span
:param duration: Duration (in seconds) of span
:returns: `True` if there are no segments in the composition that overlap the span starting at `time` and lasting for `duration` seconds. `False` otherwise.
"""
for seg in self.segments:
# starts in range
if seg.comp_location_in_seconds >= time and\
seg.comp_location_in_seconds < time + duration:
return False
# or, ends in range
elif seg.comp_location_in_seconds + seg.duration_in_seconds >= time and\
seg.comp_location_in_seconds + seg.duration_in_seconds < time + duration:
return False
# or, spans entire range
elif seg.comp_location_in_seconds < time and\
seg.comp_location_in_seconds + seg.duration_in_seconds >= time + duration:
return False
return True
def contract(self, time, duration, min_contraction=0.0):
"""Remove empty gaps from the composition starting at a given
time for a given duration.
"""
# remove audio from the composition starting at time
# for duration
contract_dur = 0.0
contract_start = time
if self.empty_over_span(time, duration):
contract_dur = duration
contract_start = time
else:
starts = [s.comp_location_in_seconds for s in self.segments]
ends = [s.comp_location_in_seconds + s.duration_in_seconds
for s in self.segments]
key_starts = []
key_ends = []
for start in starts:
if start >= time and start < time + duration:
# does a segment cover the location right before this start?
is_key_start = True
for seg in self.segments:
if seg.comp_location_in_seconds < start and\
seg.comp_location_in_seconds + seg.duration_in_seconds >= start:
is_key_start = False
break
if is_key_start:
key_starts.append(start)
for end in ends:
if end >= time and end < time + duration:
# does a segment cover the location right before this start?
is_key_end = True
for seg in self.segments:
if seg.comp_location_in_seconds <= end and\
seg.comp_location_in_seconds + seg.duration_in_seconds > end:
is_key_end = False
break
if is_key_end:
key_ends.append(end)
if len(key_starts) + len(key_ends) == 0: return 0, 0
# combine key starts and key ends
key_both = [s for s in key_starts]
key_both.extend([s for s in key_ends])
key_both = sorted(key_both)
first_key = key_both[0]
if first_key in key_starts:
contract_start = time
contract_dur = first_key - time
else:
contract_start = first_key
if len(key_both) >= 2:
contract_dur = key_both[1] - first_key
else:
contract_dur = time + duration - first_key
if contract_dur > min_contraction:
for seg in self.segments:
if seg.comp_location_in_seconds > contract_start:
dur_samples = int(seg.samplerate * contract_dur)
seg.comp_location -= dur_samples
for dyn in self.dynamics:
if dyn.comp_location_in_seconds > contract_start:
dur_samples = int(seg.samplerate * contract_dur)
dyn.comp_location -= dur_samples
return contract_start, contract_dur
else:
return 0.0, 0.0
def add_music_cue(self, track, comp_cue, song_cue, duration=6.0,
padding_before=12.0, padding_after=12.0):
"""Add a music cue to the composition. This doesn't do any audio
analysis, it just aligns a specified point in the track
(presumably music) with a location in the composition. See
UnderScore_ for a visualization of what this is doing to the
music track.
.. _UnderScore: http://vis.berkeley.edu/papers/underscore/
:param track: Track to align in the composition
:type track: :py:class:`radiotool.composer.Track`
:param float comp_cue: Location in composition to align music cue (in seconds)
:param float song_cue: Location in the music track to align with the composition cue (in seconds)
:param float duration: Duration of music after the song cue before the music starts to fade out (in seconds)
:param float padding_before: Duration of music playing softly before the music cue/composition cue (in seconds)
:param float padding_after: Duration of music playing softly after the music cue/composition cue (in seconds)
"""
self.tracks.add(track)
pre_fade = 3
post_fade = 3
if padding_before + pre_fade > song_cue:
padding_before = song_cue - pre_fade
if padding_before + pre_fade > score_cue:
padding_before = score_cue - pre_fade
s = Segment(track, score_cue - padding_before - pre_fade,
song_cue - padding_before - pre_fade,
pre_fade + padding_before + duration +
padding_after + post_fade)
self.add_segment(s)
d = []
dyn_adj = 1
track.current_frame = 0
d.append(Fade(track, score_cue - padding_before - pre_fade, pre_fade,
0, .1*dyn_adj, fade_type="linear"))
d.append(Fade(track, score_cue - padding_before, padding_before,
.1*dyn_adj, .4*dyn_adj, fade_type="exponential"))
d.append(Volume(track, score_cue, duration, .4*dyn_adj))
d.append(Fade(track, score_cue + duration, padding_after,
.4*dyn_adj, 0, fade_type="exponential"))
d.append(Fade(track, score_cue + duration + padding_after, post_fade,
.1*dyn_adj, 0, fade_type="linear"))
self.add_dynamics(d)
def _remove_end_silence(self, frames):
subwindow_n_frames = int(1/16.0 * min(s.samplerate for s in self.tracks))
segments = segment_array(frames, subwindow_n_frames, overlap=.5)
volumes = np.apply_along_axis(RMS_energy, 1, segments)
min_subwindow_vol = min(np.sum(np.abs(segments), 1) /\
subwindow_n_frames)
min_subwindow_vol = min(volumes)
# some threshold? what if there are no zeros?
min_subwindow_vol_index = np.where(volumes <= 2.0 *
min_subwindow_vol)
# find longest span of "silence" and set to the beginning
# adapted from
# http://stackoverflow.com/questions/3109052/
# find-longest-span-of-consecutive-array-keys
last_key = -1
cur_list = []
long_list = []
for idx in min_subwindow_vol_index[0]:
if idx != last_key + 1:
cur_list = []
cur_list.append(idx)
if(len(cur_list) > len(long_list)):
long_list = cur_list
last_key = idx
new_cut_point = (long_list[0] + 1) * \
int(subwindow_n_frames / 2.0)
if long_list[-1] + 16 > len(volumes):
return frames[:new_cut_point]
return frames
def duration(self):
return max([x.comp_location + x.duration
for x in self.segments])
def duration_in_seconds(self):
return max([x.comp_location_in_seconds + x.duration_in_seconds
for x in self.segments])
def build(self, track_list=None, adjust_dynamics=False,
min_length=None, channels=None):
"""
Create a numpy array from the composition.
:param track_list: List of tracks to include in composition generation (``None`` means all tracks will be used)
:type track_list: list of :py:class:`radiotool.composer.Track`
:param int min_length: Minimum length of output array (in frames). Will zero pad extra length.
:param bool. adjust_dynamics: Automatically adjust dynamics. Will document later.
"""
if track_list is None:
track_list = self.tracks
if channels is None:
channels = self.channels
parts = {}
starts = {}
# for universal volume adjustment
all_frames = np.array([])
song_frames = np.array([])
speech_frames = np.array([])
longest_part = max([x.comp_location + x.duration
for x in self.segments])
if len(self.dynamics) > 0:
longest_part = max((longest_part,
max([x.comp_location + x.duration
for x in self.dynamics])))
for track_idx, track in enumerate(track_list):
segments = sorted([v for v in self.segments if v.track == track],
key=lambda k: k.comp_location + k.duration)
dyns = sorted([d for d in self.dynamics if d.track == track],
key=lambda k: k.comp_location)
if len(segments) > 0:
start_loc = min([x.comp_location for x in segments])
end_loc = max([x.comp_location + x.duration for x in segments])
if len(dyns) > 0:
start_loc = min((start_loc,
min([d.comp_location for d in dyns])))
end_loc = max((end_loc,
max([d.comp_location + d.duration for d in dyns])))
starts[track] = start_loc
parts[track] = np.zeros((end_loc - start_loc, channels))
for s in segments:
frames = s.get_frames(channels=channels).\
reshape(-1, channels)
# for universal volume adjustment
if adjust_dynamics:
all_frames = np.append(all_frames,
self._remove_end_silence(frames.flatten()))
if isinstance(track, Song):
song_frames = np.append(song_frames,
self._remove_end_silence(frames.flatten()))
elif isinstance(track, Speech):
speech_frames = np.append(speech_frames,
self._remove_end_silence(frames.flatten()))
parts[track][s.comp_location - start_loc:
s.comp_location - start_loc + s.duration,
:] = frames
for d in dyns:
vol_frames = d.to_array(channels)
parts[track][d.comp_location - start_loc :
d.comp_location - start_loc + d.duration,
:] *= vol_frames
if adjust_dynamics:
total_energy = RMS_energy(all_frames)
song_energy = RMS_energy(song_frames)
speech_energy = RMS_energy(speech_frames)
# dyn_adj = 0.10 / total_energy
# dyn_adj = speech_energy / sqrt(song_energy) * 5
if adjust_dynamics:
if not np.isnan(speech_energy) and not np.isnan(song_energy):
dyn_adj = sqrt(speech_energy / song_energy) * 1.15
else:
dyn_adj = 1
else:
dyn_adj = 1
if longest_part < min_length:
longest_part = min_length
out = np.zeros((longest_part, channels))
for track, part in parts.iteritems():
out[starts[track]:starts[track] + len(part)] += part
return out
def export(self, **kwargs):
"""
Generate audio file from composition.
:param str. filename: Output filename (no extension)
:param str. filetype: Output file type (only .wav supported for now)
:param integer samplerate: Sample rate of output audio
:param integer channels: Channels in output audio, if different than originally specified
:param bool. separate_tracks: Also generate audio file for each track in composition
:param int min_length: Minimum length of output array (in frames). Will zero pad extra length.
:param bool. adjust_dynamics: Automatically adjust dynamics (will document later)
"""
# get optional args
filename = kwargs.pop('filename', 'out')
filetype = kwargs.pop('filetype', 'wav')
adjust_dynamics = kwargs.pop('adjust_dynamics', False)
samplerate = kwargs.pop('samplerate', None)
channels = kwargs.pop('channels', self.channels)
separate_tracks = kwargs.pop('separate_tracks', False)
min_length = kwargs.pop('min_length', None)
if samplerate is None:
samplerate = np.min([track.samplerate for track in self.tracks])
encoding = 'pcm16'
to_mp3 = False
if filetype == 'ogg':
encoding = 'vorbis'
elif filetype == 'mp3':
filetype = 'wav'
to_mp3 = True
if separate_tracks:
# build the separate parts of the composition if desired
for track in self.tracks:
out = self.build(track_list=[track],
adjust_dynamics=adjust_dynamics,
min_length=min_length,
channels=channels)
out_file = Sndfile("%s-%s.%s" %
(filename, track.name, filetype),
'w',
Format(filetype, encoding=encoding),
channels, samplerate)
out_file.write_frames(out)
out_file.close()
# always build the complete composition
out = self.build(adjust_dynamics=adjust_dynamics,
min_length=min_length,
channels=channels)
out_filename = "%s.%s" % (filename, filetype)
out_file = Sndfile(out_filename, 'w',
Format(filetype, encoding=encoding),
channels, samplerate)
out_file.write_frames(out)
out_file.close()
if LIBXMP and filetype == "wav":
xmp = libxmp.XMPMeta()
ns = libxmp.consts.XMP_NS_DM
p = xmp.get_prefix_for_namespace(ns)
xpath = p + 'Tracks'
xmp.append_array_item(ns, xpath, None,
array_options={"prop_value_is_array": True},
prop_value_is_struct=True)
xpath += '[1]/' + p
xmp.set_property(ns, xpath + "trackName", "CuePoint Markers")
xmp.set_property(ns, xpath + "trackType", "Cue")
xmp.set_property(ns, xpath + "frameRate", "f%d" % samplerate)
for i, lab in enumerate(self.labels):
xmp.append_array_item(ns, xpath + "markers", None,
array_options={"prop_value_is_array": True},
prop_value_is_struct=True)
xmp.set_property(ns,
xpath + "markers[%d]/%sname" % (i + 1, p), lab.name)
xmp.set_property(ns,
xpath + "markers[%d]/%sstartTime" % (i + 1, p),
str(lab.sample(samplerate)))
xmpfile = libxmp.XMPFiles(file_path=out_filename, open_forupdate=True)
if xmpfile.can_put_xmp(xmp):
xmpfile.put_xmp(xmp)
xmpfile.close_file()
if to_mp3:
wav_to_mp3(out_filename, delete_wav=True)
return out
|
|
import os
import platform
import re
import subprocess
import xml.etree.ElementTree as ET
from subprocess import CalledProcessError, PIPE, STDOUT
from six.moves.urllib.parse import quote_plus, unquote, urlparse
from conans.client.tools import check_output
from conans.client.tools.env import environment_append, no_op
from conans.client.tools.files import chdir
from conans.errors import ConanException
from conans.model.version import Version
from conans.util.files import decode_text, to_file_bytes, walk
def _run_muted(cmd, folder=None):
with chdir(folder) if folder else no_op():
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
return process.returncode
def _check_repo(cmd, folder, msg=None):
msg = msg or "Not a valid '{}' repository".format(cmd[0])
try:
ret = _run_muted(cmd, folder=folder)
except Exception:
raise ConanException(msg)
else:
if bool(ret):
raise ConanException(msg)
class SCMBase(object):
cmd_command = None
def __init__(self, folder=None, verify_ssl=True, username=None, password=None,
force_english=True, runner=None, output=None):
self.folder = folder or os.getcwd()
if not os.path.exists(self.folder):
os.makedirs(self.folder)
self._verify_ssl = verify_ssl
self._force_eng = force_english
self._username = username
self._password = password
self._runner = runner
self._output = output
def run(self, command):
command = "%s %s" % (self.cmd_command, command)
with chdir(self.folder) if self.folder else no_op():
with environment_append({"LC_ALL": "en_US.UTF-8"}) if self._force_eng else no_op():
if not self._runner:
return check_output(command).strip()
else:
return self._runner(command)
def get_url_with_credentials(self, url):
if not self._username or not self._password:
return url
if urlparse(url).password:
return url
user_enc = quote_plus(self._username)
pwd_enc = quote_plus(self._password)
url = url.replace("://", "://" + user_enc + ":" + pwd_enc + "@", 1)
return url
@classmethod
def _remove_credentials_url(cls, url):
parsed = urlparse(url)
netloc = parsed.hostname
if parsed.port:
netloc += ":{}".format(parsed.port)
replaced = parsed._replace(netloc=netloc)
return replaced.geturl()
class Git(SCMBase):
cmd_command = "git"
def _configure_ssl_verify(self):
# TODO: This should be a context manager
return self.run("config http.sslVerify %s" % ("true" if self._verify_ssl else "false"))
def clone(self, url, branch=None, args=""):
url = self.get_url_with_credentials(url)
if os.path.exists(url):
url = url.replace("\\", "/") # Windows local directory
if os.path.exists(self.folder) and os.listdir(self.folder):
if not branch:
raise ConanException("The destination folder '%s' is not empty, "
"specify a branch to checkout (not a tag or commit) "
"or specify a 'subfolder' "
"attribute in the 'scm'" % self.folder)
output = self.run("init")
output += self._configure_ssl_verify()
output += self.run('remote add origin "%s"' % url)
output += self.run("fetch ")
output += self.run("checkout -t origin/%s" % branch)
else:
branch_cmd = "--branch %s" % branch if branch else ""
output = self.run('clone "%s" . %s %s' % (url, branch_cmd, args))
output += self._configure_ssl_verify()
return output
def checkout(self, element, submodule=None):
self.check_repo()
output = self.run('checkout "%s"' % element)
if submodule:
if submodule == "shallow":
output += self.run("submodule sync")
output += self.run("submodule update --init")
elif submodule == "recursive":
output += self.run("submodule sync --recursive")
output += self.run("submodule update --init --recursive")
else:
raise ConanException("Invalid 'submodule' attribute value in the 'scm'. "
"Unknown value '%s'. Allowed values: ['shallow', 'recursive']"
% submodule)
# Element can be a tag, branch or commit
return output
def excluded_files(self):
ret = []
try:
file_paths = [os.path.normpath(
os.path.join(
os.path.relpath(folder, self.folder), el)).replace("\\", "/")
for folder, dirpaths, fs in walk(self.folder)
for el in fs + dirpaths]
if file_paths:
p = subprocess.Popen(['git', 'check-ignore', '--stdin'],
stdout=PIPE, stdin=PIPE, stderr=STDOUT, cwd=self.folder)
paths = to_file_bytes("\n".join(file_paths))
grep_stdout = decode_text(p.communicate(input=paths)[0])
ret = grep_stdout.splitlines()
except (CalledProcessError, IOError, OSError) as e:
if self._output:
self._output.warn("Error checking excluded git files: %s. "
"Ignoring excluded files" % e)
ret = []
return ret
def get_remote_url(self, remote_name=None, remove_credentials=False):
self.check_repo()
remote_name = remote_name or "origin"
remotes = self.run("remote -v")
for remote in remotes.splitlines():
name, url = remote.split(None, 1)
if name == remote_name:
url, _ = url.rsplit(None, 1)
if remove_credentials and not os.path.exists(url): # only if not local
url = self._remove_credentials_url(url)
return url
return None
def is_local_repository(self):
url = self.get_remote_url()
return os.path.exists(url)
def get_commit(self):
self.check_repo()
try:
commit = self.run("rev-parse HEAD")
commit = commit.strip()
return commit
except Exception as e:
raise ConanException("Unable to get git commit from '%s': %s" % (self.folder, str(e)))
get_revision = get_commit
def get_commit_message(self):
self.check_repo()
try:
message = self.run("log -1 --format=%s%n%b")
return message.strip()
except Exception:
return None
def is_pristine(self):
self.check_repo()
status = self.run("status --porcelain").strip()
if not status:
return True
else:
return False
def get_repo_root(self):
self.check_repo()
return self.run("rev-parse --show-toplevel")
def get_branch(self):
self.check_repo()
try:
status = self.run("status -bs --porcelain")
# ## feature/scm_branch...myorigin/feature/scm_branch
branch = status.splitlines()[0].split("...")[0].strip("#").strip()
return branch
except Exception as e:
raise ConanException("Unable to get git branch from %s: %s" % (self.folder, str(e)))
def get_tag(self):
self.check_repo()
try:
status = self.run("describe --exact-match --tags")
tag = status.strip()
return tag
except Exception:
return None
def check_repo(self):
""" Check if it is a valid GIT repo """
_check_repo(["git", "status"], folder=self.folder)
class SVN(SCMBase):
cmd_command = "svn"
file_protocol = 'file:///' if platform.system() == "Windows" else 'file://'
API_CHANGE_VERSION = Version("1.9") # CLI changes in 1.9
def __init__(self, folder=None, runner=None, *args, **kwargs):
def runner_no_strip(command):
return check_output(command)
runner = runner or runner_no_strip
super(SVN, self).__init__(folder=folder, runner=runner, *args, **kwargs)
@staticmethod
def get_version():
try:
out, _ = subprocess.Popen(["svn", "--version"], stdout=subprocess.PIPE).communicate()
version_line = decode_text(out).split('\n', 1)[0]
version_str = version_line.split(' ', 3)[2]
return Version(version_str)
except Exception as e:
raise ConanException("Error retrieving SVN version: '{}'".format(e))
@property
def version(self):
if not hasattr(self, '_version'):
version = SVN.get_version()
setattr(self, '_version', version)
return getattr(self, '_version')
def run(self, command):
# Ensure we always pass some params
extra_options = " --no-auth-cache --non-interactive"
if not self._verify_ssl:
if self.version >= SVN.API_CHANGE_VERSION:
extra_options += " --trust-server-cert-failures=unknown-ca"
else:
extra_options += " --trust-server-cert"
return super(SVN, self).run(command="{} {}".format(command, extra_options))
def _show_item(self, item, target='.'):
self.check_repo()
if self.version >= SVN.API_CHANGE_VERSION:
value = self.run("info --show-item {item} \"{target}\"".format(item=item, target=target))
return value.strip()
else:
output = self.run("info --xml \"{target}\"".format(target=target))
root = ET.fromstring(output)
if item == 'revision':
return root.findall("./entry")[0].get("revision")
elif item == 'url':
return root.findall("./entry/url")[0].text
elif item == 'wc-root':
return root.findall("./entry/wc-info/wcroot-abspath")[0].text
elif item == 'last-changed-revision':
return root.findall("./entry/commit")[0].get("revision")
elif item == 'relative-url':
root_url = root.findall("./entry/repository/root")[0].text
url = self._show_item(item='url', target=target)
if url.startswith(root_url):
return url[len(root_url):]
raise ConanException("Retrieval of item '{}' not implemented for SVN<{}".format(
item, SVN.API_CHANGE_VERSION))
def checkout(self, url, revision="HEAD"):
output = ""
try:
self.check_repo()
except ConanException:
output += self.run('co "{url}" .'.format(url=url))
else:
assert url.lower() == self.get_remote_url().lower(), \
"%s != %s" % (url, self.get_remote_url())
output += self.run("revert . --recursive")
finally:
output += self.update(revision=revision)
return output
def update(self, revision='HEAD'):
self.check_repo()
return self.run("update -r {rev}".format(rev=revision))
def excluded_files(self):
self.check_repo()
excluded_list = []
output = self.run("status --no-ignore")
for it in output.splitlines():
if it.startswith('I'): # Only ignored files
filepath = it[8:].strip()
excluded_list.append(os.path.normpath(filepath))
return excluded_list
def get_remote_url(self, remove_credentials=False):
url = self._show_item('url')
if remove_credentials and not os.path.exists(url): # only if not local
url = self._remove_credentials_url(url)
return url
def get_qualified_remote_url(self, remove_credentials=False):
# Return url with peg revision
url = self.get_remote_url(remove_credentials=remove_credentials)
revision = self.get_revision()
return "{url}@{revision}".format(url=url, revision=revision)
def is_local_repository(self):
url = self.get_remote_url()
return (url.startswith(self.file_protocol) and
os.path.exists(unquote(url[len(self.file_protocol):])))
def is_pristine(self):
# Check if working copy is pristine/consistent
if self.version >= SVN.API_CHANGE_VERSION:
try:
output = self.run("status -u -r {} --xml".format(self.get_revision()))
except subprocess.CalledProcessError:
return False
else:
root = ET.fromstring(output)
pristine_item_list = ['external', 'ignored', 'none', 'normal']
pristine_props_list = ['normal', 'none']
for item in root.findall('.//wc-status'):
if item.get('item', 'none') not in pristine_item_list:
return False
if item.get('props', 'none') not in pristine_props_list:
return False
for item in root.findall('.//repos-status'):
if item.get('item', 'none') not in pristine_item_list:
return False
if item.get('props', 'none') not in pristine_props_list:
return False
return True
else:
if self._output:
self._output.warn("SVN::is_pristine for SVN v{} (less than {}) is not implemented,"
" it is returning not-pristine always because it cannot compare"
" with checked out version.".format(self.version,
SVN.API_CHANGE_VERSION))
return False
def get_revision(self):
return self._show_item('revision')
def get_revision_message(self):
output = self.run("log -r COMMITTED").splitlines()
return output[3] if len(output) > 2 else None
def get_repo_root(self):
return self._show_item('wc-root')
def get_last_changed_revision(self, use_wc_root=True):
if use_wc_root:
return self._show_item(item='last-changed-revision', target=self.get_repo_root())
else:
return self._show_item(item='last-changed-revision')
def get_branch(self):
item = self._get_item("branches/[^/]+|trunk", "branch")
return item.replace("branches/", "") if item else None
def get_tag(self):
item = self._get_item("tags/[^/]+", "tag")
return item.replace("tags/", "") if item else None
def _get_item(self, pattern, item_name):
try:
url = self._show_item('relative-url')
except Exception as e:
raise ConanException("Unable to get svn %s from %s: %s"
% (item_name, self.folder, str(e)))
item = re.search(pattern, url)
return item.group(0) if item else None
def check_repo(self):
""" Check if it is a valid SVN repo """
_check_repo(["svn", "info"], folder=self.folder)
|
|
from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.base import (clone, ClusterMixin, ClassifierMixin)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if "n_iter" in params:
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_regressors_classifiers_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
def check_transformer_sparse_data(name, Transformer):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
else:
transformer = Transformer()
set_fast_parameters(transformer)
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_cluster_overwrite_params(name, Clustering):
X, y = make_blobs(random_state=0, n_samples=9)
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
set_fast_parameters(clustering)
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_multiclass_classifier(name, Classifier):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
|
|
# hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
#
# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
# Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import gc
import os
import time
from ..i18n import _
from .common import (
ErrorResponse,
HTTP_SERVER_ERROR,
cspvalues,
get_contact,
get_mtime,
ismember,
paritygen,
staticfile,
statusmessage,
)
from .. import (
configitems,
encoding,
error,
extensions,
hg,
pathutil,
profiling,
pycompat,
rcutil,
registrar,
scmutil,
templater,
templateutil,
ui as uimod,
util,
)
from . import (
hgweb_mod,
request as requestmod,
webutil,
wsgicgi,
)
from ..utils import dateutil
def cleannames(items):
return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
def findrepos(paths):
repos = []
for prefix, root in cleannames(paths):
roothead, roottail = os.path.split(root)
# "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below
# /bar/ be served as as foo/N .
# '*' will not search inside dirs with .hg (except .hg/patches),
# '**' will search inside dirs with .hg (and thus also find subrepos).
try:
recurse = {b'*': False, b'**': True}[roottail]
except KeyError:
repos.append((prefix, root))
continue
roothead = os.path.normpath(util.abspath(roothead))
paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
repos.extend(urlrepos(prefix, roothead, paths))
return repos
def urlrepos(prefix, roothead, paths):
"""yield url paths and filesystem paths from a list of repo paths
>>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
>>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
[('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
>>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
[('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
"""
for path in paths:
path = os.path.normpath(path)
yield (
prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
).strip(b'/'), path
def readallowed(ui, req):
"""Check allow_read and deny_read config options of a repo's ui object
to determine user permissions. By default, with neither option set (or
both empty), allow all users to read the repo. There are two ways a
user can be denied read access: (1) deny_read is not empty, and the
user is unauthenticated or deny_read contains user (or *), and (2)
allow_read is not empty and the user is not in allow_read. Return True
if user is allowed to read the repo, else return False."""
user = req.remoteuser
deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
if deny_read and (not user or ismember(ui, user, deny_read)):
return False
allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
# by default, allow reading if no allow_read option has been set
if not allow_read or ismember(ui, user, allow_read):
return True
return False
def rawindexentries(ui, repos, req, subdir=b''):
descend = ui.configbool(b'web', b'descend')
collapse = ui.configbool(b'web', b'collapse')
seenrepos = set()
seendirs = set()
for name, path in repos:
if not name.startswith(subdir):
continue
name = name[len(subdir) :]
directory = False
if b'/' in name:
if not descend:
continue
nameparts = name.split(b'/')
rootname = nameparts[0]
if not collapse:
pass
elif rootname in seendirs:
continue
elif rootname in seenrepos:
pass
else:
directory = True
name = rootname
# redefine the path to refer to the directory
discarded = b'/'.join(nameparts[1:])
# remove name parts plus accompanying slash
path = path[: -len(discarded) - 1]
try:
hg.repository(ui, path)
directory = False
except (IOError, error.RepoError):
pass
parts = [
req.apppath.strip(b'/'),
subdir.strip(b'/'),
name.strip(b'/'),
]
url = b'/' + b'/'.join(p for p in parts if p) + b'/'
# show either a directory entry or a repository
if directory:
# get the directory's time information
try:
d = (get_mtime(path), dateutil.makedate()[1])
except OSError:
continue
# add '/' to the name to make it obvious that
# the entry is a directory, not a regular repository
row = {
b'contact': b"",
b'contact_sort': b"",
b'name': name + b'/',
b'name_sort': name,
b'url': url,
b'description': b"",
b'description_sort': b"",
b'lastchange': d,
b'lastchange_sort': d[1] - d[0],
b'archives': templateutil.mappinglist([]),
b'isdirectory': True,
b'labels': templateutil.hybridlist([], name=b'label'),
}
seendirs.add(name)
yield row
continue
u = ui.copy()
if rcutil.use_repo_hgrc():
try:
u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
except Exception as e:
u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
continue
def get(section, name, default=uimod._unset):
return u.config(section, name, default, untrusted=True)
if u.configbool(b"web", b"hidden", untrusted=True):
continue
if not readallowed(u, req):
continue
# update time with local timezone
try:
r = hg.repository(ui, path)
except IOError:
u.warn(_(b'error accessing repository at %s\n') % path)
continue
except error.RepoError:
u.warn(_(b'error accessing repository at %s\n') % path)
continue
try:
d = (get_mtime(r.spath), dateutil.makedate()[1])
except OSError:
continue
contact = get_contact(get)
description = get(b"web", b"description")
seenrepos.add(name)
name = get(b"web", b"name", name)
labels = u.configlist(b'web', b'labels', untrusted=True)
row = {
b'contact': contact or b"unknown",
b'contact_sort': contact.upper() or b"unknown",
b'name': name,
b'name_sort': name,
b'url': url,
b'description': description or b"unknown",
b'description_sort': description.upper() or b"unknown",
b'lastchange': d,
b'lastchange_sort': d[1] - d[0],
b'archives': webutil.archivelist(u, b"tip", url),
b'isdirectory': None,
b'labels': templateutil.hybridlist(labels, name=b'label'),
}
yield row
def _indexentriesgen(
context, ui, repos, req, stripecount, sortcolumn, descending, subdir
):
rows = rawindexentries(ui, repos, req, subdir=subdir)
sortdefault = None, False
if sortcolumn and sortdefault != (sortcolumn, descending):
sortkey = b'%s_sort' % sortcolumn
rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
for row, parity in zip(rows, paritygen(stripecount)):
row[b'parity'] = parity
yield row
def indexentries(
ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
):
args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
return templateutil.mappinggenerator(_indexentriesgen, args=args)
class hgwebdir(object):
"""HTTP server for multiple repositories.
Given a configuration, different repositories will be served depending
on the request path.
Instances are typically used as WSGI applications.
"""
def __init__(self, conf, baseui=None):
self.conf = conf
self.baseui = baseui
self.ui = None
self.lastrefresh = 0
self.motd = None
self.refresh()
if not baseui:
# set up environment for new ui
extensions.loadall(self.ui)
extensions.populateui(self.ui)
def refresh(self):
if self.ui:
refreshinterval = self.ui.configint(b'web', b'refreshinterval')
else:
item = configitems.coreitems[b'web'][b'refreshinterval']
refreshinterval = item.default
# refreshinterval <= 0 means to always refresh.
if (
refreshinterval > 0
and self.lastrefresh + refreshinterval > time.time()
):
return
if self.baseui:
u = self.baseui.copy()
else:
u = uimod.ui.load()
u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
# displaying bundling progress bar while serving feels wrong and may
# break some wsgi implementations.
u.setconfig(b'progress', b'disable', b'true', b'hgweb')
if not isinstance(self.conf, (dict, list, tuple)):
map = {b'paths': b'hgweb-paths'}
if not os.path.exists(self.conf):
raise error.Abort(_(b'config file %s not found!') % self.conf)
u.readconfig(self.conf, remap=map, trust=True)
paths = []
for name, ignored in u.configitems(b'hgweb-paths'):
for path in u.configlist(b'hgweb-paths', name):
paths.append((name, path))
elif isinstance(self.conf, (list, tuple)):
paths = self.conf
elif isinstance(self.conf, dict):
paths = self.conf.items()
extensions.populateui(u)
repos = findrepos(paths)
for prefix, root in u.configitems(b'collections'):
prefix = util.pconvert(prefix)
for path in scmutil.walkrepos(root, followsym=True):
repo = os.path.normpath(path)
name = util.pconvert(repo)
if name.startswith(prefix):
name = name[len(prefix) :]
repos.append((name.lstrip(b'/'), repo))
self.repos = repos
self.ui = u
encoding.encoding = self.ui.config(b'web', b'encoding')
self.style = self.ui.config(b'web', b'style')
self.templatepath = self.ui.config(
b'web', b'templates', untrusted=False
)
self.stripecount = self.ui.config(b'web', b'stripes')
if self.stripecount:
self.stripecount = int(self.stripecount)
prefix = self.ui.config(b'web', b'prefix')
if prefix.startswith(b'/'):
prefix = prefix[1:]
if prefix.endswith(b'/'):
prefix = prefix[:-1]
self.prefix = prefix
self.lastrefresh = time.time()
def run(self):
if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
b"CGI/1."
):
raise RuntimeError(
b"This function is only intended to be "
b"called while running as a CGI script."
)
wsgicgi.launch(self)
def __call__(self, env, respond):
baseurl = self.ui.config(b'web', b'baseurl')
req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
res = requestmod.wsgiresponse(req, respond)
return self.run_wsgi(req, res)
def run_wsgi(self, req, res):
profile = self.ui.configbool(b'profiling', b'enabled')
with profiling.profile(self.ui, enabled=profile):
try:
for r in self._runwsgi(req, res):
yield r
finally:
# There are known cycles in localrepository that prevent
# those objects (and tons of held references) from being
# collected through normal refcounting. We mitigate those
# leaks by performing an explicit GC on every request.
# TODO remove this once leaks are fixed.
# TODO only run this on requests that create localrepository
# instances instead of every request.
gc.collect()
def _runwsgi(self, req, res):
try:
self.refresh()
csp, nonce = cspvalues(self.ui)
if csp:
res.headers[b'Content-Security-Policy'] = csp
virtual = req.dispatchpath.strip(b'/')
tmpl = self.templater(req, nonce)
ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
# Global defaults. These can be overridden by any handler.
res.status = b'200 Script output follows'
res.headers[b'Content-Type'] = ctype
# a static file
if virtual.startswith(b'static/') or b'static' in req.qsparams:
if virtual.startswith(b'static/'):
fname = virtual[7:]
else:
fname = req.qsparams[b'static']
static = self.ui.config(b"web", b"static", untrusted=False)
staticfile(self.templatepath, static, fname, res)
return res.sendresponse()
# top-level index
repos = dict(self.repos)
if (not virtual or virtual == b'index') and virtual not in repos:
return self.makeindex(req, res, tmpl)
# nested indexes and hgwebs
if virtual.endswith(b'/index') and virtual not in repos:
subdir = virtual[: -len(b'index')]
if any(r.startswith(subdir) for r in repos):
return self.makeindex(req, res, tmpl, subdir)
def _virtualdirs():
# Check the full virtual path, and each parent
yield virtual
for p in pathutil.finddirs(virtual):
yield p
for virtualrepo in _virtualdirs():
real = repos.get(virtualrepo)
if real:
# Re-parse the WSGI environment to take into account our
# repository path component.
uenv = req.rawenv
if pycompat.ispy3:
uenv = {
k.decode('latin1'): v
for k, v in pycompat.iteritems(uenv)
}
req = requestmod.parserequestfromenv(
uenv,
reponame=virtualrepo,
altbaseurl=self.ui.config(b'web', b'baseurl'),
# Reuse wrapped body file object otherwise state
# tracking can get confused.
bodyfh=req.bodyfh,
)
try:
# ensure caller gets private copy of ui
repo = hg.repository(self.ui.copy(), real)
return hgweb_mod.hgweb(repo).run_wsgi(req, res)
except IOError as inst:
msg = encoding.strtolocal(inst.strerror)
raise ErrorResponse(HTTP_SERVER_ERROR, msg)
except error.RepoError as inst:
raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
# browse subdirectories
subdir = virtual + b'/'
if [r for r in repos if r.startswith(subdir)]:
return self.makeindex(req, res, tmpl, subdir)
# prefixes not found
res.status = b'404 Not Found'
res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
return res.sendresponse()
except ErrorResponse as e:
res.status = statusmessage(e.code, pycompat.bytestr(e))
res.setbodygen(
tmpl.generate(b'error', {b'error': e.message or b''})
)
return res.sendresponse()
finally:
del tmpl
def makeindex(self, req, res, tmpl, subdir=b""):
self.refresh()
sortable = [b"name", b"description", b"contact", b"lastchange"]
sortcolumn, descending = None, False
if b'sort' in req.qsparams:
sortcolumn = req.qsparams[b'sort']
descending = sortcolumn.startswith(b'-')
if descending:
sortcolumn = sortcolumn[1:]
if sortcolumn not in sortable:
sortcolumn = b""
sort = [
(
b"sort_%s" % column,
b"%s%s"
% (
(not descending and column == sortcolumn) and b"-" or b"",
column,
),
)
for column in sortable
]
self.refresh()
entries = indexentries(
self.ui,
self.repos,
req,
self.stripecount,
sortcolumn=sortcolumn,
descending=descending,
subdir=subdir,
)
mapping = {
b'entries': entries,
b'subdir': subdir,
b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
b'sortcolumn': sortcolumn,
b'descending': descending,
}
mapping.update(sort)
res.setbodygen(tmpl.generate(b'index', mapping))
return res.sendresponse()
def templater(self, req, nonce):
def config(*args, **kwargs):
kwargs.setdefault('untrusted', True)
return self.ui.config(*args, **kwargs)
vars = {}
styles, (style, mapfile, fp) = hgweb_mod.getstyle(
req, config, self.templatepath
)
if style == styles[0]:
vars[b'style'] = style
sessionvars = webutil.sessionvars(vars, b'?')
logourl = config(b'web', b'logourl')
logoimg = config(b'web', b'logoimg')
staticurl = (
config(b'web', b'staticurl')
or req.apppath.rstrip(b'/') + b'/static/'
)
if not staticurl.endswith(b'/'):
staticurl += b'/'
defaults = {
b"encoding": encoding.encoding,
b"url": req.apppath + b'/',
b"logourl": logourl,
b"logoimg": logoimg,
b"staticurl": staticurl,
b"sessionvars": sessionvars,
b"style": style,
b"nonce": nonce,
}
templatekeyword = registrar.templatekeyword(defaults)
@templatekeyword(b'motd', requires=())
def motd(context, mapping):
if self.motd is not None:
yield self.motd
else:
yield config(b'web', b'motd')
return templater.templater.frommapfile(
mapfile, fp=fp, defaults=defaults
)
|
|
# -*- coding: utf-8 -*-
"""Copyright 2015 Roger R Labbe Jr.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division
import numpy as np
from numpy import dot, zeros, eye
import scipy.linalg as linalg
class HInfinityFilter(object):
def __init__(self, dim_x, dim_z, dim_u, gamma):
""" Create an H-Infinity filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will
not give you a functional filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int
Number of control inputs for the Gu part of the prediction step.
"""
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.gamma = gamma
self.x = zeros((dim_x,1)) # state
self.G = 0 # control transistion matrx
self.F = 0 # state transition matrix
self.H = 0 # Measurement function
self.P = eye(dim_x) # uncertainty covariance
self._V_inv = zeros((dim_z, dim_z))
self.W = zeros((dim_x, dim_x))
self.Q = eye(dim_x) # process uncertainty
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = 0 # kalman gain
self.residual = zeros((dim_z, 1))
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
def update(self, Z):
"""
Add a new measurement (Z) to the kalman filter. If Z is None, nothing
is changed.
Parameters
----------
Z : np.array
measurement for this update.
"""
if Z is None:
return
# rename for readability and a tiny extra bit of speed
I = self._I
gamma = self.gamma
Q = self.Q
H = self.H
P = self.P
x = self.x
V_inv = self._V_inv
F = self.F
W = self.W
# common subexpression H.T * V^-1
HTVI = dot(H.T, V_inv)
L = linalg.inv(I - gamma * dot(Q, P) + dot(HTVI, H).dot(P))
#common subexpression P*L
PL = dot(P, L)
K = dot(F, PL).dot(HTVI)
self.residual = Z - dot(H, x)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = self.x + dot(K, self.residual)
self.P = dot(F, PL).dot(F.T) + W
# force P to be symmetric
self.P = (self.P + self.P.T) / 2
'''def update_safe(self, Z):
""" same as update(), except we perform a check to ensure that the
eigenvalues are < 1. An exception is thrown if not. """
update(Z)
evalue = linalg.eig(self.P)'
'''
def predict(self, u=0):
""" Predict next position.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by G
to create the control input into the system.
"""
# x = Fx + Gu
self.x = dot(self.F, self.x) + dot(self.G, u)
def batch_filter(self, Zs, Rs=None, update_first=False):
""" Batch processes a sequences of measurements.
Parameters
----------
Zs : list-like
list of measurements at each time step `self.dt` Missing
measurements must be represented by 'None'.
Rs : list-like, optional
optional list of values to use for the measurement error
covariance; a value of None in any position will cause the filter
to use `self.R` for that time step.
update_first : bool, optional,
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
Returns
-------
means: np.array((n,dim_x,1))
array of the state for each time step. Each entry is an np.array.
In other words `means[k,:]` is the state at step `k`.
covariance: np.array((n,dim_x,dim_x))
array of the covariances for each time step. In other words
`covariance[k,:,:]` is the covariance at step `k`.
"""
n = np.size(Zs,0)
if Rs is None:
Rs = [None]*n
# mean estimates from Kalman Filter
means = zeros((n,self.dim_x,1))
# state covariances from Kalman Filter
covariances = zeros((n,self.dim_x,self.dim_x))
if update_first:
for i,(z,r) in enumerate(zip(Zs,Rs)):
self.update(z,r)
means[i,:] = self.x
covariances[i,:,:] = self.P
self.predict()
else:
for i,(z,r) in enumerate(zip(Zs,Rs)):
self.predict()
self.update(z,r)
means[i,:] = self.x
covariances[i,:,:] = self.P
return (means, covariances)
def get_prediction(self, u=0):
""" Predicts the next state of the filter and returns it. Does not
alter the state of the filter.
Parameters
----------
u : np.array
optional control input
Returns
-------
x : numpy.ndarray
State vecto of the prediction.
"""
x = dot(self.F, self.x) + dot(self.G, u)
return x
def residual_of(self, z):
""" returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x)
def measurement_of_state(self, x):
""" Helper function that converts a state into a measurement.
Parameters
----------
x : np.array
kalman state vector
Returns
-------
z : np.array
measurement corresponding to the given state
"""
return dot(self.H, x)
@property
def V(self):
return self._V
@V.setter
def V(self, value):
if np.isscalar(value):
self._V = np.array([[value]], dtype=float)
else:
self._V = value
self._V_inv = linalg.inv(self._V)
|
|
"""
This script is used to design the design matrix for our linear regression.
We explore the influence of linear and quadratic drifts on the model
performance.
Script for the filtered data.
Run with:
python noise-pca_filtered_script.py
from this directory
"""
from __future__ import print_function, division
import sys, os, pdb
from scipy import ndimage
from scipy.ndimage import gaussian_filter
from matplotlib import colors
from os.path import splitext
from scipy.stats import t as t_dist
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
import nibabel as nib
import scipy
import pprint as pp
import json
#Specicy the path for functions
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
sys.path.append(os.path.join(os.path.dirname(__file__), "./"))
from smoothing import *
from diagnostics import *
from glm_func import *
from plot_mosaic import *
from mask_filtered_data import *
# Locate the paths
project_path = '../../../'
data_path = project_path+'data/ds005/'
path_dict = {'data_filtered':{
'type' : 'filtered',
'feat' : '.feat',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/'
},
'data_original':{
'type' : '',
'feat': '',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
#run_list = [str(i) for i in range(1,4)]
# Run only for subject 1 and 5 - run 1
run_list = [str(i) for i in range(1,2)]
subject_list = ['1', '5']
d_path = path_dict['data_filtered'] #OR original or filtered
images_paths = [('ds005' + '_sub' + s.zfill(3) + '_t1r' + r, \
data_path + 'sub%s/'%(s.zfill(3)) + d_path['run_path'] \
+ 'task001_run%s%s/%s' %(r.zfill(3),d_path['feat'],\
d_path['bold_img_name'])) \
for r in run_list \
for s in subject_list]
# set gray colormap and nearest neighbor interpolation by default
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['image.interpolation'] = 'nearest'
# Mask
# To be used with the normal data
thres = 375 #From analysis of the histograms
# To be used with the filtered data
mask_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_mask_2mm.nii'
template_path = project_path+'data/mni_icbm152_t1_tal_nlin_asym_09c_2mm.nii'
sm = ''
#sm='not_smooth/'
project_path = project_path + sm
# Create the needed directories if they do not exist
dirs = [project_path+'fig/',\
project_path+'fig/BOLD',\
project_path+'fig/drifts',\
project_path+'fig/pca',\
project_path+'fig/pca/projections/',\
project_path+'fig/linear_model/mosaic',\
project_path+'fig/linear_model/mosaic/middle_slice',\
project_path+'txt_output/',\
project_path+'txt_output/MRSS/',\
project_path+'txt_output/pca/',\
project_path+'txt_output/drifts/']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# Progress bar
print("\nStarting noise-pca for filtered data analysis\n")
for image_path in images_paths:
name = image_path[0]
if d_path['type']=='filtered':
#in_brain_img = nib.load('../../../'+
# 'data/ds005/sub001/model/model001/task001_run001.feat/'\
# + 'masked_filtered_func_data_mni.nii.gz')
# Image shape (91, 109, 91, 240)
md = os.path.join(\
str('/'.join(image_path[1].split('/')[:-1])), \
'masked_' + str(d_path['bold_img_name']))
if not os.path.exists(md):
print("Filtering brain image for: ")
print(str(name))
in_brain_img = make_mask_filtered_data(image_path[1],mask_path)
print("brain image filtered\n")
else:
print("Loading filtered brain image for: ")
print(str(name))
in_brain_img = nib.load(md)
print("brain image loaded\n")
data_int = in_brain_img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
template = nib.load(template_path)
template_data_int = template.get_data()
template_data = template_data_int.astype(float)
Transpose = False
in_brain_mask = (mean_data - 0.0) < 0.01
plt.imshow(plot_mosaic(template_data, transpose=Transpose),\
cmap='gray', alpha=1)
else:
img = nib.load(image_path[1])
data_int = img.get_data()
data = data_int.astype(float)
mean_data = np.mean(data, axis=-1)
in_brain_mask = mean_data > thres
Transpose = True
plt.contour(plot_mosaic(in_brain_mask, transpose=Transpose), \
cmap='gray' , alpha=1)
# Smoothing with Gaussian filter
smooth_data = smoothing(data,1,range(data.shape[-1]))
# Selecting the voxels in the brain
in_brain_tcs = smooth_data[in_brain_mask, :]
#in_brain_tcs = data[in_brain_mask, :]
vol_shape = data.shape[:-1]
# Plotting the voxels in the brain
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.colorbar()
plt.title('In brain voxel mean values' + '\n' + (d_path['type'] + str(name)))
plt.savefig(project_path+'fig/BOLD/%s_mean_voxels.png'\
%(d_path['type'] + str(name)))
#plt.show()
#plt.clf()
# Convolution with 1 to 4 conditions
convolved = np.zeros((240,5))
for i in range(1,5):
#convolved = np.loadtxt(\
# '../../../txt_output/conv_normal/%s_conv_00%s_canonical.txt'\
# %(str(name),str(i)))
convolved[:,i] = np.loadtxt(\
'../../../txt_output/conv_high_res/%s_conv_00%s_high_res.txt'\
%(str(name),str(i)))
reg_str = ['Intercept','Task', 'Gain', 'Loss', 'Distance', 'Linear Drift',\
'Quadratic drift', 'PC#1', 'PC#2', 'PC#3', 'PC#4']
# Create design matrix X - Including drifts
P = 7 #number of regressors of X including the ones for intercept
n_trs = data.shape[-1]
X = np.ones((n_trs, P))
for i in range(1,5):
X[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X[:,6] = quadratic_drift
# Save the design matrix
np.savetxt(project_path+\
'txt_output/drifts/%s_design_matrix_with_drift.txt'\
%(d_path['type'] + str(name)), X)
# Linear Model - Including drifts
Y = in_brain_tcs.T
betas = npl.pinv(X).dot(Y)
# Save the betas for the linear model including drifts
np.savetxt(project_path+\
'txt_output/drifts/%s_betas_with_drift.txt'%(d_path['type'] + str(name)), betas)
betas_vols = np.zeros(vol_shape + (P,))
betas_vols[in_brain_mask] = betas.T
# Plot
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
betas_vols[~in_brain_mask] = np.nan
nice_cmap_values = np.loadtxt('actc.txt')
nice_cmap = colors.ListedColormap(nice_cmap_values, 'actc')
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P):
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
#plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(betas_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with drift) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withdrift_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
plt.close()
#plt.show()
#plt.clf()
#Show the middle slice only
plt.imshow(betas_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel - Slice 18 \n' \
'Projection on %s - %s'\
%(str(reg_str[k]), d_path['type'] + str(name)))
plt.savefig(\
project_path+'fig/linear_model/mosaic/middle_slice/%s_withdrift_middleslice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# PCA Analysis
Y_demeaned = Y - np.mean(Y, axis=1).reshape([-1, 1])
unscaled_cov = Y_demeaned.dot(Y_demeaned.T)
U, S, V = npl.svd(unscaled_cov)
projections = U.T.dot(Y_demeaned)
projection_vols = np.zeros(data.shape)
projection_vols[in_brain_mask, :] = projections.T
# Plot the projection of the data on the 5 first principal component
# from SVD
for i in range(1,5):
plt.plot(U[:, i])
plt.title('U' + str(i) + ' vector from SVD \n' + str(name))
plt.imshow(projection_vols[:, :, 18, i])
plt.colorbar()
plt.title('PCA - 18th slice projection on PC#' + str(i) + ' from SVD \n ' +\
d_path['type'] + str(name))
plt.savefig(project_path+'fig/pca/projections/%s_PC#%s.png' \
%((d_path['type'] + str(name),str(i))))
#plt.show()
#plt.clf()
plt.close()
# Variance Explained analysis
s = []
#S is diag -> trace = sum of the elements of S
for i in S:
s.append(i/np.sum(S))
np.savetxt(project_path+\
'txt_output/pca/%s_variance_explained' % (d_path['type'] + str(name)) +\
'.txt', np.array(s[:40]))
ind = np.arange(len(s[1:40]))
plt.bar(ind, s[1:40], width=0.5)
plt.xlabel('Principal Components indices')
plt.ylabel('Explained variance in percent')
plt.title('Variance explained graph \n' + (d_path['type'] + str(name)))
plt.savefig(project_path+\
'fig/pca/%s_variance_explained.png' %(d_path['type'] + str(name)))
#plt.show()
plt.close()
# Linear Model - including PCs from PCA analysis
PC = 3 # Number of PCs to include in the design matrix
P_pca = P + PC
X_pca = np.ones((n_trs, P_pca))
for i in range(1,5):
X_pca[:,i] = convolved[:,i]
linear_drift = np.linspace(-1, 1, n_trs)
X_pca[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_pca[:,6] = quadratic_drift
for i in range(3):
X_pca[:,7+i] = U[:, i]
# Save the design matrix - with PCs
np.savetxt(project_path+'txt_output/pca/%s_design_matrix_pca.txt'\
%(d_path['type'] + str(name)), X_pca)
#plt.imshow(X_pca, aspect=0.25)
B_pca = npl.pinv(X_pca).dot(Y)
np.savetxt(project_path+'txt_output/pca/%s_betas_pca.txt'\
%(d_path['type'] + str(name)), B_pca)
b_pca_vols = np.zeros(vol_shape + (P_pca,))
b_pca_vols[in_brain_mask, :] = B_pca.T
# Save betas as nii files
# Plot - with PCs
# Set regions outside mask as missing with np.nan
mean_data[~in_brain_mask] = np.nan
b_pca_vols[~in_brain_mask] = np.nan
# Plot each slice on the 3rd dimension of the image in a mosaic
for k in range(1,P_pca):
fig = plt.figure(figsize = (8, 5))
#plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap='gray', alpha=0.5)
plt.imshow(plot_mosaic(mean_data, transpose=Transpose), cmap='gray', alpha=1)
plt.imshow(plot_mosaic(b_pca_vols[...,k], transpose=Transpose), cmap=nice_cmap, alpha=1)
plt.colorbar()
plt.title('Beta (with PCA) values for brain voxel related to ' \
+ str(reg_str[k]) + '\n' + d_path['type'] + str(name))
plt.savefig(project_path+'fig/linear_model/mosaic/%s_withPCA_%s'\
%(d_path['type'] + str(name), str(reg_str[k]))+'.png')
#plt.show()
plt.close()
#Show the middle slice only
plt.imshow(b_pca_vols[:, :, 18, k], cmap='gray', alpha=0.5)
plt.colorbar()
plt.title('In brain voxel model - Slice 18 \n' \
'Projection on X%s \n %s'\
%(str(reg_str[k]),d_path['type'] + str(name)))
plt.savefig(\
project_path+\
'fig/linear_model/mosaic/middle_slice/%s_withPCA_middle_slice_%s'\
%(d_path['type'] + str(name), str(k))+'.png')
#plt.show()
#plt.clf()
plt.close()
# Residuals
MRSS_dict = {}
MRSS_dict['ds005' + d_path['type']] = {}
MRSS_dict['ds005' + d_path['type']]['drifts'] = {}
MRSS_dict['ds005' + d_path['type']]['pca'] = {}
for z in MRSS_dict['ds005' + d_path['type']]:
MRSS_dict['ds005' + d_path['type']][z]['MRSS'] = []
residuals = Y - X.dot(betas)
df = X.shape[0] - npl.matrix_rank(X)
MRSS = np.sum(residuals ** 2 , axis=0) / df
residuals_pca = Y - X_pca.dot(B_pca)
df_pca = X_pca.shape[0] - npl.matrix_rank(X_pca)
MRSS_pca = np.sum(residuals_pca ** 2 , axis=0) / df_pca
MRSS_dict['ds005' + d_path['type']]['drifts']['mean_MRSS'] = np.mean(MRSS)
MRSS_dict['ds005' + d_path['type']]['pca']['mean_MRSS'] = np.mean(MRSS_pca)
# Save the mean MRSS values to compare the performance
# of the design matrices
for design_matrix, beta, mrss, name in \
[(X, betas, MRSS, 'drifts'), (X_pca, B_pca, MRSS_pca, 'pca')]:
MRSS_dict['ds005' + d_path['type']][name]['p-values'] = []
MRSS_dict['ds005' + d_path['type']][name]['t-test'] = []
with open(project_path+'txt_output/MRSS/ds005%s_MRSS.json'\
%(d_path['type']), 'w') as file_out:
json.dump(MRSS_dict, file_out)
# SE = np.zeros(beta.shape)
# for i in range(design_matrix.shape[-1]):
# c = np.zeros(design_matrix.shape[-1])
# c[i]=1
# c = np.atleast_2d(c).T
# SE[i,:]= np.sqrt(\
# mrss* c.T.dot(npl.pinv(design_matrix.T.dot(design_matrix)).dot(c)))
# zeros = np.where(SE==0)
# SE[zeros] = 1
# t = beta / SE
# t[:,zeros] = 0
# # Get p value for t value using CDF of t didstribution
# ltp = t_dist.cdf(abs(t), df)
# p = 1 - ltp # upper tail
# t_brain = t[in_brain_mask]
# p_brain = p[in_brain_mask]
#
# # Save 3D data in .nii files
# for k in range(1,4):
# t_nib = nib.Nifti1Image(t_brain[..., k], affine)
# nib.save(t-test, project_path+'txt_output/%s/%s_t-test_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# p_nib = nib.Nifti1Image(p_brain[..., k], affine)
# nib.save(p-values,project_path+'txt_output/%s/%s_p-values_%s.nii.gz'\
# %(name, d_path['type'] + str(name),str(reg_str[k])))
# pdb.set_trace()
# pdb.set_trace()
plt.close()
print("=")
print("======================================")
print("\n Noise and PCA analysis for filtered data done")
print("Design Matrix including drift terms stored in project_epsilon/txt_output/drifts/ \n\n")
print("Design Matrix including PCs terms stored in project_epsilon/txt_output/pca/\n\n")
print("Mean MRSS models results in project_epsilon/txt_output/MRSS/ds005filtered_MRSS.json\n\n")
|
|
# -*- coding: utf-8 -*-
from mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
from zerver.lib.webhooks.git import COMMITS_LIMIT
class GitlabHookTests(WebhookTestCase):
STREAM_NAME = 'gitlab'
URL_TEMPLATE = "/api/v1/external/gitlab?&api_key={api_key}&stream={stream}"
FIXTURE_DIR_NAME = 'gitlab'
def test_push_event_specified_topic(self) -> None:
self.url = self.build_webhook_url("topic=Specific%20topic")
expected_topic = u"Specific topic"
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 2 commits to branch tomek.\n\n* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n* c ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))"
self.send_and_test_stream_message('push', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_event_message(self) -> None:
expected_topic = u"my-awesome-project / tomek"
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 2 commits to branch tomek.\n\n* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n* c ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))"
self.send_and_test_stream_message('push', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_local_branch_without_commits(self) -> None:
expected_topic = u"my-awesome-project / changes"
expected_message = u"Eeshan Garg [pushed](https://gitlab.com/eeshangarg/my-awesome-project/compare/0000000000000000000000000000000000000000...68d7a5528cf423dfaac37dd62a56ac9cc8a884e3) the branch changes."
self.send_and_test_stream_message('push_local_branch_without_commits', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_event_message_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,tomek')
expected_topic = u"my-awesome-project / tomek"
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 2 commits to branch tomek.\n\n* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n* c ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))"
self.send_and_test_stream_message('push', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_multiple_committers(self) -> None:
expected_topic = u"my-awesome-project / tomek"
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 2 commits to branch tomek. Commits by Ben (1) and Tomasz Kolek (1).\n\n* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n* c ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))"
self.send_and_test_stream_message('push_multiple_committers', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_multiple_committers_with_others(self) -> None:
expected_topic = u"my-awesome-project / tomek"
commit_info = u"* b ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))\n"
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 7 commits to branch tomek. Commits by Ben (3), baxterthehacker (2), James (1) and others (1).\n\n{}* b ([eb6ae1e](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9))".format(commit_info * 6)
self.send_and_test_stream_message('push_multiple_committers_with_others', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_commits_more_than_limit_event_message(self) -> None:
expected_topic = u"my-awesome-project / tomek"
commits_info = u'* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n'
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 50 commits to branch tomek.\n\n{}[and {} more commit(s)]".format(
commits_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.send_and_test_stream_message('push_commits_more_than_limit', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_push_commits_more_than_limit_message_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,tomek')
expected_topic = u"my-awesome-project / tomek"
commits_info = u'* b ([66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7))\n'
expected_message = u"Tomasz Kolek [pushed](https://gitlab.com/tomaszkolek0/my-awesome-project/compare/5fcdd5551fc3085df79bece2c32b1400802ac407...eb6ae1e591e0819dc5bf187c6bfe18ec065a80e9) 50 commits to branch tomek.\n\n{}[and {} more commit(s)]".format(
commits_info * COMMITS_LIMIT,
50 - COMMITS_LIMIT,
)
self.send_and_test_stream_message('push_commits_more_than_limit', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_remove_branch_event_message(self) -> None:
expected_topic = u"my-awesome-project / tomek"
expected_message = u"Tomasz Kolek deleted branch tomek."
self.send_and_test_stream_message('remove_branch', expected_topic, expected_message, HTTP_X_GITLAB_EVENT="Push Hook")
def test_add_tag_event_message(self) -> None:
expected_topic = u"my-awesome-project"
expected_message = u"Tomasz Kolek pushed tag xyz."
self.send_and_test_stream_message(
'add_tag',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Tag Push Hook",
)
def test_remove_tag_event_message(self) -> None:
expected_topic = u"my-awesome-project"
expected_message = u"Tomasz Kolek removed tag xyz."
self.send_and_test_stream_message(
'remove_tag',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Tag Push Hook"
)
def test_create_issue_without_assignee_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #1 Issue title"
expected_message = u"Tomasz Kolek created [Issue #1](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1):\n\n~~~ quote\nIssue description\n~~~"
self.send_and_test_stream_message(
'issue_created_without_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_confidential_issue_without_assignee_event_message(self) -> None:
expected_subject = u"testing / Issue #1 Testing"
expected_message = u"Joe Bloggs created [Issue #1](https://gitlab.example.co.uk/joe.bloggs/testing/issues/1):\n\n~~~ quote\nTesting\n~~~"
self.send_and_test_stream_message(
'confidential_issue_created_without_assignee',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek created [Issue #1 Issue title](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1):\n\n~~~ quote\nIssue description\n~~~"
self.send_and_test_stream_message(
'issue_created_without_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_assignee_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #1 Issue title"
expected_message = u"Tomasz Kolek created [Issue #1](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1) (assigned to Tomasz Kolek):\n\n~~~ quote\nIssue description\n~~~"
self.send_and_test_stream_message(
'issue_created_with_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_two_assignees_event_message(self) -> None:
expected_subject = u"Zulip GitLab Test / Issue #2 Zulip Test Issue 2"
expected_message = u"Adam Birds created [Issue #2](https://gitlab.com/adambirds/zulip-gitlab-test/issues/2) (assigned to adambirds and eeshangarg):\n\n~~~ quote\nZulip Test Issue 2\n~~~"
self.send_and_test_stream_message(
'issue_created_with_two_assignees',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_three_assignees_event_message(self) -> None:
expected_subject = u"Zulip GitLab Test / Issue #2 Zulip Test Issue 2"
expected_message = u"Adam Birds created [Issue #2](https://gitlab.com/adambirds/zulip-gitlab-test/issues/2) (assigned to adambirds, eeshangarg and timabbott):\n\n~~~ quote\nZulip Test Issue 2\n~~~"
self.send_and_test_stream_message(
'issue_created_with_three_assignees',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_confidential_issue_with_assignee_event_message(self) -> None:
expected_subject = u"testing / Issue #2 Testing"
expected_message = u"Joe Bloggs created [Issue #2](https://gitlab.example.co.uk/joe.bloggs/testing/issues/2) (assigned to joe.bloggs):\n\n~~~ quote\nTesting\n~~~"
self.send_and_test_stream_message(
'confidential_issue_created_with_assignee',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_hidden_comment_in_description(self) -> None:
expected_topic = u"public-repo / Issue #3 New Issue with hidden comment"
expected_message = u"Eeshan Garg created [Issue #3](https://gitlab.com/eeshangarg/public-repo/issues/3):\n\n~~~ quote\nThis description actually has a hidden comment in it!\n~~~"
self.send_and_test_stream_message(
'issue_created_with_hidden_comment_in_description',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_confidential_issue_with_hidden_comment_in_description(self) -> None:
expected_subject = u"testing / Issue #1 Testing"
expected_message = u"Joe Bloggs created [Issue #1](https://gitlab.example.co.uk/joe.bloggs/testing/issues/1):\n\n~~~ quote\nThis description actually has a hidden comment in it!\n~~~"
self.send_and_test_stream_message(
'confidential_issue_created_with_hidden_comment_in_description',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_create_issue_with_null_description(self) -> None:
expected_topic = u"my-awesome-project / Issue #7 Issue without description"
expected_message = u"Eeshan Garg created [Issue #7](https://gitlab.com/eeshangarg/my-awesome-project/issues/7)."
self.send_and_test_stream_message(
'issue_opened_with_null_description',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_update_issue_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #1 Issue title_new"
expected_message = u"Tomasz Kolek updated [Issue #1](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1)."
self.send_and_test_stream_message(
'issue_updated',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_update_confidential_issue_event_message(self) -> None:
expected_subject = u"testing / Issue #1 Testing"
expected_message = u"Joe Bloggs updated [Issue #1](https://gitlab.example.co.uk/joe.bloggs/testing/issues/1)."
self.send_and_test_stream_message(
'confidential_issue_updated',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_update_issue_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek updated [Issue #1 Issue title_new](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1)."
self.send_and_test_stream_message(
'issue_updated',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_close_issue_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #1 Issue title_new"
expected_message = u"Tomasz Kolek closed [Issue #1](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1)."
self.send_and_test_stream_message(
'issue_closed',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_close_confidential_issue_event_message(self) -> None:
expected_subject = u"testing / Issue #1 Testing Test"
expected_message = u"Joe Bloggs closed [Issue #1](https://gitlab.example.co.uk/joe.bloggs/testing/issues/1)."
self.send_and_test_stream_message(
'confidential_issue_closed',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_reopen_issue_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #1 Issue title_new"
expected_message = u"Tomasz Kolek reopened [Issue #1](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/1)."
self.send_and_test_stream_message(
'issue_reopened',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_reopen_confidential_issue_event_message(self) -> None:
expected_subject = u"testing / Issue #1 Testing Test"
expected_message = u"Joe Bloggs reopened [Issue #1](https://gitlab.example.co.uk/joe.bloggs/testing/issues/1)."
self.send_and_test_stream_message(
'confidential_issue_reopened',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Issue Hook"
)
def test_note_commit_event_message(self) -> None:
expected_topic = u"my-awesome-project"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7#note_14169211) on [66abd2d](https://gitlab.com/tomaszkolek0/my-awesome-project/commit/66abd2da28809ffa128ed0447965cf11d7f863a7):\n~~~ quote\nnice commit\n~~~"
self.send_and_test_stream_message(
'commit_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_merge_request_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #1 Tomek"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/1#note_14171860) on [MR #1](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/1):\n\n~~~ quote\nNice merge request!\n~~~"
self.send_and_test_stream_message(
'merge_request_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_merge_request_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/1#note_14171860) on [MR #1 Tomek](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/1):\n\n~~~ quote\nNice merge request!\n~~~"
self.send_and_test_stream_message(
'merge_request_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_issue_event_message(self) -> None:
expected_topic = u"my-awesome-project / Issue #2 abc"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/2#note_14172057) on [Issue #2](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/2):\n\n~~~ quote\nNice issue\n~~~"
self.send_and_test_stream_message(
'issue_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_confidential_issue_event_message(self) -> None:
expected_subject = u"Test / Issue #3 Test"
expected_message = u"Joe Bloggs [commented](https://gitlab.com/joebloggs/test/issues/3#note_101638770) on [Issue #3](https://gitlab.com/joebloggs/test/issues/3):\n\n~~~ quote\nTest\n~~~"
self.send_and_test_stream_message(
'confidential_issue_note',
expected_subject,
expected_message,
HTTP_X_GITLAB_EVENT="Confidential Note Hook"
)
def test_note_issue_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/2#note_14172057) on [Issue #2 abc](https://gitlab.com/tomaszkolek0/my-awesome-project/issues/2):\n\n~~~ quote\nNice issue\n~~~"
self.send_and_test_stream_message(
'issue_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_snippet_event_message(self) -> None:
expected_topic = u"my-awesome-project / Snippet #2 test"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/snippets/2#note_14172058) on [Snippet #2](https://gitlab.com/tomaszkolek0/my-awesome-project/snippets/2):\n\n~~~ quote\nNice snippet\n~~~"
self.send_and_test_stream_message(
'snippet_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_note_snippet_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek [commented](https://gitlab.com/tomaszkolek0/my-awesome-project/snippets/2#note_14172058) on [Snippet #2 test](https://gitlab.com/tomaszkolek0/my-awesome-project/snippets/2):\n\n~~~ quote\nNice snippet\n~~~"
self.send_and_test_stream_message(
'snippet_note',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Note Hook"
)
def test_merge_request_created_without_assignee_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #2 NEW MR"
expected_message = u"Tomasz Kolek created [MR #2](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/2) from `tomek` to `master`:\n\n~~~ quote\ndescription of merge request\n~~~"
self.send_and_test_stream_message(
'merge_request_created_without_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek created [MR #2 NEW MR](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/2) from `tomek` to `master`:\n\n~~~ quote\ndescription of merge request\n~~~"
self.send_and_test_stream_message(
'merge_request_created_without_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_created_with_assignee_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #3 New Merge Request"
expected_message = u"Tomasz Kolek created [MR #3](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/3) (assigned to Tomasz Kolek) from `tomek` to `master`:\n\n~~~ quote\ndescription of merge request\n~~~"
self.send_and_test_stream_message(
'merge_request_created_with_assignee',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_closed_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #2 NEW MR"
expected_message = u"Tomasz Kolek closed [MR #2](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/2)."
self.send_and_test_stream_message(
'merge_request_closed',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_closed_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic='notifications')
expected_topic = u"notifications"
expected_message = u"Tomasz Kolek closed [MR #2 NEW MR](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/2)."
self.send_and_test_stream_message(
'merge_request_closed',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_reopened_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #1 Update the README with author ..."
expected_message = u"Eeshan Garg reopened [MR #1](https://gitlab.com/eeshangarg/my-awesome-project/merge_requests/1)."
self.send_and_test_stream_message(
'merge_request_reopened',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_approved_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #1 Update the README with author ..."
expected_message = u"Eeshan Garg approved [MR #1](https://gitlab.com/eeshangarg/my-awesome-project/merge_requests/1)."
self.send_and_test_stream_message(
'merge_request_approved',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_updated_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #3 New Merge Request"
expected_message = u"Tomasz Kolek updated [MR #3](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/3) (assigned to Tomasz Kolek) from `tomek` to `master`:\n\n~~~ quote\nupdated desc\n~~~"
self.send_and_test_stream_message(
'merge_request_updated',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_added_commit_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #3 New Merge Request"
expected_message = u"Tomasz Kolek added commit(s) to [MR #3](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/3)."
self.send_and_test_stream_message(
'merge_request_added_commit',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_merge_request_merged_event_message(self) -> None:
expected_topic = u"my-awesome-project / MR #3 New Merge Request"
expected_message = u"Tomasz Kolek merged [MR #3](https://gitlab.com/tomaszkolek0/my-awesome-project/merge_requests/3)."
self.send_and_test_stream_message(
'merge_request_merged',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Merge Request Hook"
)
def test_wiki_page_opened_event_message(self) -> None:
expected_topic = u"my-awesome-project"
expected_message = u"Tomasz Kolek created [Wiki Page \"how to\"](https://gitlab.com/tomaszkolek0/my-awesome-project/wikis/how-to)."
self.send_and_test_stream_message(
'wiki_page_opened',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Wiki Page Hook"
)
def test_wiki_page_edited_event_message(self) -> None:
expected_topic = u"my-awesome-project"
expected_message = u"Tomasz Kolek updated [Wiki Page \"how to\"](https://gitlab.com/tomaszkolek0/my-awesome-project/wikis/how-to)."
self.send_and_test_stream_message(
'wiki_page_edited',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Wiki Page Hook"
)
def test_build_created_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage was created."
self.send_and_test_stream_message(
'build_created',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Job Hook"
)
def test_build_started_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage started."
self.send_and_test_stream_message(
'build_started',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Job Hook"
)
def test_build_succeeded_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage changed status to success."
self.send_and_test_stream_message(
'build_succeeded',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Job Hook"
)
def test_build_created_event_message_legacy_event_name(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage was created."
self.send_and_test_stream_message(
'build_created',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Build Hook"
)
def test_build_started_event_message_legacy_event_name(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage started."
self.send_and_test_stream_message(
'build_started',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Build Hook"
)
def test_build_succeeded_event_message_legacy_event_name(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Build job_name from test stage changed status to success."
self.send_and_test_stream_message(
'build_succeeded',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Build Hook"
)
def test_pipeline_succeeded_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Pipeline changed status to success with build(s):\n* job_name2 - success\n* job_name - success."
self.send_and_test_stream_message(
'pipeline_succeeded',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Pipeline Hook"
)
def test_pipeline_started_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Pipeline started with build(s):\n* job_name - running\n* job_name2 - pending."
self.send_and_test_stream_message(
'pipeline_started',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Pipeline Hook"
)
def test_pipeline_pending_event_message(self) -> None:
expected_topic = u"my-awesome-project / master"
expected_message = u"Pipeline was created with build(s):\n* job_name2 - pending\n* job_name - created."
self.send_and_test_stream_message(
'pipeline_pending',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Pipeline Hook"
)
def test_issue_type_test_payload(self) -> None:
expected_topic = u'public-repo'
expected_message = u"Webhook for **public-repo** has been configured successfully! :tada:"
self.send_and_test_stream_message(
'issue_test_payload',
expected_topic,
expected_message,
HTTP_X_GITLAB_EVENT="Test Hook"
)
@patch('zerver.lib.webhooks.common.check_send_webhook_message')
def test_push_event_message_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push')
result = self.client_post(self.url, payload, HTTP_X_GITLAB_EVENT='Push Hook', content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.lib.webhooks.common.check_send_webhook_message')
def test_push_commits_more_than_limit_message_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='master,development')
payload = self.get_body('push_commits_more_than_limit')
result = self.client_post(self.url, payload, HTTP_X_GITLAB_EVENT='Push Hook', content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
|
|
"""
Linear dynamical system model for the AP text dataset.
Each document is modeled as a draw from an LDS with
categorical observations.
"""
from __future__ import print_function
import os
import re
import gzip
import time
import cPickle
import operator
import collections
import numpy as np
from scipy.misc import logsumexp
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
from hips.plotting.layout import create_axis_at_location, create_figure
import brewer2mpl
from pgmult.lds import MultinomialLDS
from pgmult.particle_lds import LogisticNormalMultinomialLDS, ParticleSBMultinomialLDS
from pgmult.hmm import MultinomialHMM
from pgmult.utils import pi_to_psi
from pylds.models import DefaultLDS, NonstationaryLDS
from pybasicbayes.distributions import GaussianFixed, Multinomial, Regression
from pybasicbayes.util.text import progprint_xrange
from autoregressive.distributions import AutoRegression
colors = brewer2mpl.get_map("Set1", "Qualitative", 9).mpl_colors
goodcolors = np.array([0,1,2,4,6,7,8])
colors = np.array(colors)[goodcolors]
np.seterr(invalid="warn")
np.random.seed(0)
np.seterr(invalid="warn")
np.random.seed(0)
# Model parameters
K = 200 # Number of words
# Data handling
def load(filename=None):
if filename is None:
bigstr = download_ap()
else:
with open(filename,'r') as infile:
bigstr = infile.read()
docs = re.findall(r'(?<=<TEXT> ).*?(?= </TEXT>)',bigstr.translate(None,'\n'))
vectorizer = CountVectorizer(stop_words='english',max_features=K).fit(docs)
docs = [make_onehot_seq(doc, vectorizer) for doc in docs]
# words = vectorizer.get_feature_names()
words = vectorizer.vocabulary_.keys()
# Sort by usage
# usage = np.array([doc.sum(0) for doc in docs]).sum(0)
# perm = np.argsort(usage)[::-1]
# docs = [doc[:,perm] for doc in docs]
# words = np.array(words)[perm]
words = np.array(words)
return docs, words
def download_ap():
from cStringIO import StringIO
from urllib2 import urlopen
import tarfile
print("Downloading AP data...")
response = urlopen('http://www.cs.princeton.edu/~blei/lda-c/ap.tgz')
tar = tarfile.open(fileobj=StringIO(response.read()))
return tar.extractfile('ap/ap.txt').read()
def filter_wordseq(doc, vectorizer):
return [w for w in doc if w in vectorizer.vocabulary_]
def make_onehot_seq(doc, vectorizer):
lst = filter_wordseq(vectorizer.build_analyzer()(doc), vectorizer)
indices = {word:idx for idx, word in enumerate(vectorizer.vocabulary_.keys())}
out = np.zeros((len(lst),len(indices)))
for wordidx, word in enumerate(lst):
out[wordidx, indices[word]] = 1
return out
# Inference stuff
# model, lls, test_lls, pred_lls, pis, psis, zs, timestamps
Results = collections.namedtuple("Results", ["lls", "test_lls", "pred_lls", "samples", "timestamps"])
def fit_lds_model(Xs, Xtest, D, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
mus = [X.sum(0) + 0.1 for X in Xs]
mus = [mu/mu.sum() for mu in mus]
# mus = [np.ones(K)/float(K) for _ in Xs]
models = [MultinomialLDS(K, D,
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
sigma_C=1., mu_pi=mus[i]) for i in xrange(Nx)]
for X, model in zip(Xs, models):
model.add_data(X)
[model.resample_parameters() for model in models]
def compute_pred_ll():
pred_ll = 0
for Xt, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xt, M=1)[0]
return pred_ll
init_results = (0, models, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
map(np.array, zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_hmm(Xs, Xtest, D_hmm, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting HMM with %d states" % D_hmm)
models = [MultinomialHMM(K, D_hmm, alpha_0=10.0) for _ in xrange(Nx)]
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xtr, Xte, model in zip(Xs, Xtest, models):
pred_ll += model.log_likelihood(np.vstack((Xtr, Xte))) - model.log_likelihood(Xtr)
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
map(np.array, zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_gaussian_lds_model(Xs, Xtest, D_gauss_lds, N_samples=100):
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting Gaussian (Raw) LDS with %d states" % D_gauss_lds)
from pylds.models import NonstationaryLDS
models = [NonstationaryLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)))
for _ in xrange(Nx)]
Xs_centered = [X - np.mean(X, axis=0)[None,:] + 1e-3*np.random.randn(*X.shape) for X in Xs]
for X, model in zip(Xs_centered, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xtr, Xte, model in zip(Xs_centered, Xtest, models):
# Monte Carlo sample to get pi density implied by Gaussian LDS
Npred = 10
Tpred = Xte.shape[0]
preds = model.sample_predictions(Xtr, Tpred, Npred=Npred)
# Convert predictions to a distribution by finding the
# largest dimension for each predicted Gaussian.
# Preds is T x K x Npred, inds is TxNpred
inds = np.argmax(preds, axis=1)
pi = np.array([np.bincount(inds[t], minlength=K) for t in xrange(Tpred)]) / float(Npred)
assert np.allclose(pi.sum(axis=1), 1.0)
pi = np.clip(pi, 1e-8, 1.0)
pi /= pi.sum(axis=1)[:,None]
# Compute the log likelihood under pi
pred_ll += np.sum([Multinomial(weights=pi[t], K=K).log_likelihood(Xte[t][None,:])
for t in xrange(Tpred)])
return pred_ll
# TODO: Get initial pred ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
map(np.array, zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_ln_lds_model(Xs, Xtest, D, N_samples=100):
"""
Fit a logistic normal LDS model with pMCMC
"""
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting Logistic Normal LDS with %d states" % D)
mus = [X.sum(0) + 0.1 for X in Xs]
mus = [np.log(mu/mu.sum()) for mu in mus]
models = [LogisticNormalMultinomialLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=D*np.eye(D),M_0=np.zeros((D,D)),K_0=D*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)),
sigma_C=1.0, mu=mu) \
for mu in mus]
for model in models:
model.A = 0.5*np.eye(D)
model.sigma_states = np.eye(D)
model.C = 1.0*np.random.randn(K,D)
model.sigma_obs = 0.1*np.eye(K)
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xte, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xte, Npred=1)[0]
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
map(np.array, zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def fit_lds_model_with_pmcmc(Xs, Xtest, D, N_samples=100):
"""
Fit a logistic normal LDS model with pMCMC
"""
Nx = len(Xs)
assert len(Xtest) == Nx
print("Fitting SBM-LDS with %d states using pMCMC" % D)
models = [ParticleSBMultinomialLDS(
init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
dynamics_distn=AutoRegression(nu_0=D+1,S_0=D*np.eye(D),M_0=np.zeros((D,D)),K_0=D*np.eye(D)),
emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)),
mu=pi_to_psi(np.ones(K)/K),
sigma_C=1.0)
for _ in xrange(Nx)]
for model in models:
model.A = 0.5*np.eye(D)
model.sigma_states = np.eye(D)
model.C = np.random.randn(K-1,D)
model.sigma_obs = 0.1*np.eye(K)
for X, model in zip(Xs, models):
model.add_data(X)
def compute_pred_ll():
pred_ll = 0
for Xte, model in zip(Xtest, models):
pred_ll += model.predictive_log_likelihood(Xte, Npred=100)[0]
return pred_ll
init_results = (0, None, np.nan, np.nan, compute_pred_ll())
def resample():
tic = time.time()
[model.resample_model() for model in models]
toc = time.time() - tic
return toc, None, np.nan, np.nan, compute_pred_ll()
times, samples, lls, test_lls, pred_lls = \
map(np.array, zip(*([init_results] +
[resample() for _ in progprint_xrange(N_samples, perline=5)])))
timestamps = np.cumsum(times)
return Results(lls, test_lls, pred_lls, samples, timestamps)
def plot_log_likelihood(results, names, results_dir, outname="pred_ll_vs_time.pdf"):
# Plot the log likelihood
plt.figure(figsize=(3,3.2))
for i,(result, name) in enumerate(zip(results, names)):
plt.plot(result.timestamps, result.lls, lw=2, color=colors[i], label=name)
# plt.plot(gauss_lds_lls, lw=2, color=colors[2], label="Gaussian LDS")
plt.legend(loc="lower right")
plt.xlabel('Time (s)')
plt.ylabel("Log Likelihood")
# plt.title("Chr22 DNA Model")
plt.savefig(os.path.join(results_dir, outname))
plt.tight_layout()
def plot_pred_log_likelihood(results, names, results_dir, outname="pred_ll_vs_time.pdf", smooth=True):
# Plot the log likelihood
plt.figure(figsize=(3,3.2))
for i,(result, name) in enumerate(zip(results, names)):
if result.pred_lls.ndim == 2:
pred_ll = result.pred_lls[:,0]
else:
pred_ll = result.pred_lls
# Smooth the log likelihood
if smooth:
win = 10
pad_pred_ll = np.concatenate((pred_ll[0] * np.ones(win), pred_ll))
smooth_pred_ll = np.array([logsumexp(pad_pred_ll[j-win:j+1])-np.log(win)
for j in xrange(win, pad_pred_ll.size)])
plt.plot(np.clip(result.timestamps, 1e-3,np.inf), smooth_pred_ll,
lw=2, color=colors[i], label=name)
else:
plt.plot(np.clip(result.timestamps, 1e-3,np.inf), result.pred_lls,
lw=2, color=colors[i], label=name)
# if result.pred_lls.ndim == 2:
# plt.errorbar(np.clip(result.timestamps, 1e-3,np.inf),
# result.pred_lls[:,0],
# yerr=result.pred_lls[:,1],
# lw=2, color=colors[i], label=name)
# else:
# plt.plot(np.clip(result.timestamps, 1e-3,np.inf), result.pred_lls, lw=2, color=colors[i], label=name)
# plt.plot(gauss_lds_lls, lw=2, color=colors[2], label="Gaussian LDS")
# plt.legend(loc="lower right")
plt.xlabel('Time (s)')
plt.xscale("log")
plt.ylabel("Pred. Log Likelihood")
# plt.ylim(-700, -500)
# plt.title("Chr22 DNA Model")
plt.savefig(os.path.join(results_dir, outname))
plt.tight_layout()
def plot_pred_ll_vs_D(all_results, Ds, Xtrain, Xtest,
results_dir, models=None):
# Create a big matrix of shape (len(Ds) x 5 x T) for the pred lls
N = len(Ds) # Number of dimensions tests
M = len(all_results[0]) # Number of models tested
T = len(all_results[0][0].pred_lls) # Number of MCMC iters
pred_lls = np.zeros((N,M,T))
for n in xrange(N):
for m in xrange(M):
if all_results[n][m].pred_lls.ndim == 2:
pred_lls[n,m] = all_results[n][m].pred_lls[:,0]
else:
pred_lls[n,m] = all_results[n][m].pred_lls
# Compute the mean and standard deviation on burned in samples
burnin = T // 2
pred_ll_mean = logsumexp(pred_lls[:,:,burnin:], axis=-1) - np.log(T-burnin)
# Use bootstrap to compute error bars
pred_ll_std = np.zeros_like(pred_ll_mean)
for n in xrange(N):
for m in xrange(M):
samples = np.random.choice(pred_lls[n,m,burnin:], size=(100, (T-burnin)), replace=True)
pll_samples = logsumexp(samples, axis=1) - np.log(T-burnin)
pred_ll_std[n,m] = pll_samples.std()
# Get the baseline pred ll
baseline = 0
normalizer = 0
for Xtr, Xte in zip(Xtrain, Xtest):
pi_emp = Xtr.sum(0) / float(Xtr.sum())
pi_emp = np.clip(pi_emp, 1e-8, np.inf)
pi_emp /= pi_emp.sum()
baseline += Multinomial(weights=pi_emp, K=Xtr.shape[1]).log_likelihood(Xte).sum()
normalizer += Xte.sum()
# Make a bar chart with errorbars
from hips.plotting.layout import create_figure
fig = create_figure(figsize=(1.25,2.5), transparent=True)
fig.set_tight_layout(True)
ax = fig.add_subplot(111)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
width = np.min(np.diff(Ds)) / (M+1.0) if len(Ds)>1 else 1.
for m in xrange(M):
ax.bar(Ds+m*width,
(pred_ll_mean[:,m] - baseline) / normalizer,
yerr=pred_ll_std[:,m] / normalizer,
width=0.9*width, color=colors[m], ecolor='k')
#
# ax.text(Ds+(m-1)*width, yloc, rankStr, horizontalalignment=align,
# verticalalignment='center', color=clr, weight='bold')
# Plot the zero line
ax.plot([Ds.min()-width, Ds.max()+(M+1)*width], np.zeros(2), '-k')
# Set the tick labels
ax.set_xlim(Ds.min()-width, Ds.max()+(M+1)*width)
# ax.set_xticks(Ds + (M*width)/2.)
# ax.set_xticklabels(Ds)
# ax.set_xticks(Ds + width * np.arange(M) + width/2. )
# ax.set_xticklabels(models, rotation=45)
ax.set_xticks([])
# ax.set_xlabel("D")
ax.set_ylabel("Pred. Log Lkhd. (nats/word)")
ax.set_title("AP News")
plt.savefig(os.path.join(results_dir, "pred_ll_vs_D.pdf"))
def compute_singular_vectors(model, words):
# Compute the left and right singular vectors of the model's
# dynamics matrix, A, then project these through C to get the
# corresponding vector psi, which can be transformed into a
# vector of word probabilities, pi, and sorted.
A, C, mu = model.A, model.C, model.emission_distn.mu
U,S,V = np.linalg.svd(A)
def top_k(k, pi):
# Get the top k words ranked by pi
perm = np.argsort(pi)[::-1]
return words[perm][:k]
for d in xrange(min(5, A.shape[0])):
ud = U[:,d]
vd = V[d,:]
psi_ud = C.dot(ud) + mu
psi_vd = C.dot(vd) + mu
from pgmult.internals.utils import psi_to_pi
baseline = psi_to_pi(mu)
pi_ud = psi_to_pi(psi_ud) - baseline
pi_vd = psi_to_pi(psi_vd) - baseline
# pi_ud = C.dot(ud)
# pi_vd = C.dot(vd)
print("")
print("Singular vector ", d, " Singular value, ", S[d])
print("Right: ")
print(top_k(5, pi_vd))
print("Left: ")
print(top_k(5, pi_ud))
if __name__ == "__main__":
run = 3
results_dir = os.path.join("results", "ap_indiv", "run%03d" % run)
# Make sure the results directory exists
from pgmult.internals.utils import mkdir
if not os.path.exists(results_dir):
print("Making results directory: ", results_dir)
mkdir(results_dir)
# Load the AP news documents
Xs, words = load()
# N_docs = 1
docs = slice(0,20)
T_split = 10
# Filter out documents shorter than 2 * T_split
Xfilt = filter(lambda X: X.shape[0] > 5*T_split, Xs)
Xtrain = [X[:-T_split] for X in Xfilt[docs]]
Xtest = [X[-T_split:] for X in Xfilt[docs]]
# Perform inference for a range of latent state dimensions and models
N_samples = 200
all_results = []
Ds = np.array([10])
models = ["SBM-LDS", "HMM", "Raw LDS" , "LNM-LDS"]
methods = [fit_lds_model, fit_hmm, fit_gaussian_lds_model, fit_ln_lds_model]
# models = ["SBM-LDS", "HMM", "LNM-LDS"]
# methods = [fit_lds_model, fit_hmm, fit_ln_lds_model]
for D in Ds:
D_results = []
for model, method in zip(models, methods):
results_file = os.path.join(results_dir, "results_%s_D%d.pkl.gz" % (model, D))
if os.path.exists(results_file):
print("Loading from: ", results_file)
with gzip.open(results_file, "r") as f:
D_model_results = cPickle.load(f)
else:
print("Fitting ", model, " for D=",D)
D_model_results = method(Xtrain, Xtest, D, N_samples)
with gzip.open(results_file, "w") as f:
print("Saving to: ", results_file)
cPickle.dump(D_model_results, f, protocol=-1)
D_results.append(D_model_results)
all_results.append(D_results)
# Plot log likelihoods for the results using one D
res_index = 0
# plot_log_likelihood(all_results[res_index],
# models,
# results_dir,
# outname="train_ll_vs_time_D%d.pdf" % Ds[res_index])
#
plot_pred_log_likelihood(all_results[res_index],
models,
results_dir,
outname="pred_ll_vs_time_D%d.pdf" % Ds[res_index])
# Make a bar chart of all the results
plot_pred_ll_vs_D(all_results, Ds, Xtrain, Xtest, results_dir, models)
plt.show()
# Compute the singular vectors
print("Doc 0")
print(np.array(words)[np.where(Xfilt[docs][0])[1]])
compute_singular_vectors(all_results[0][0].samples[0][0], np.array(words))
|
|
# -*- coding: utf-8 -*-
# =============================================================================
# Tasks to be callable async
# =============================================================================
tasks = {}
# -----------------------------------------------------------------------------
def maintenance(period="daily"):
"""
Run all maintenance tasks which should be done daily
- these are read from the template
"""
mod = "applications.%s.private.templates.%s.maintenance as maintenance" % \
(appname, settings.get_template())
try:
exec("import %s" % mod)
except ImportError, e:
# No Custom Maintenance available, use the default
exec("import applications.%s.private.templates.default.maintenance as maintenance" % appname)
if period == "daily":
result = maintenance.Daily()()
else:
result = "NotImplementedError"
db.commit()
return result
tasks["maintenance"] = maintenance
# -----------------------------------------------------------------------------
if settings.has_module("doc"):
# -----------------------------------------------------------------------------
def document_create_index(document, user_id=None):
import os
from xlrd import open_workbook
from pyth.plugins.rtf15.reader import Rtf15Reader
from pyth.plugins.plaintext.writer import PlaintextWriter
import sunburnt
document = json.loads(document)
table = s3db.doc_document
id = document["id"]
name = document["name"]
filename = document["filename"]
filename = "%s/%s/uploads/%s" % (os.path.abspath("applications"), \
request.application, filename)
si = sunburnt.SolrInterface(settings.get_base_solr_url())
extension = os.path.splitext(filename)[1][1:]
if extension == "pdf":
data = os.popen("pdf2txt.py " + filename).read()
elif extension == "doc":
data = os.popen("antiword " + filename).read()
elif extension == "xls":
wb = open_workbook(filename)
data=" "
for s in wb.sheets():
for row in range(s.nrows):
values = []
for col in range(s.ncols):
values.append(str(s.cell(row, col).value))
data = data + ",".join(values) + "\n"
elif extension == "rtf":
doct = Rtf15Reader.read(open(filename))
data = PlaintextWriter.write(doct).getvalue()
else:
data = os.popen("strings " + filename).read()
# The text needs to be in unicode or ascii, with no contol characters
data = str(unicode(data, errors="ignore"))
data = "".join(c if ord(c) >= 32 else " " for c in data)
# Put the data according to the Multiple Fields
# @ToDo: Also, would change this according to requirement of Eden
document = {"id": str(id), # doc_document.id
"name": data, # the data of the file
"url": filename, # the encoded file name stored in uploads/
"filename": name, # the filename actually uploaded by the user
"filetype": extension # x.pdf -> pdf is the extension of the file
}
# Add and commit Indices
si.add(document)
si.commit()
# After Indexing, set the value for has_been_indexed to True in the database
db(table.id == id).update(has_been_indexed = True)
db.commit()
tasks["document_create_index"] = document_create_index
# -----------------------------------------------------------------------------
def document_delete_index(document, user_id=None):
import sunburnt
document = json.loads(document)
table = s3db.doc_document
id = document["id"]
filename = document["filename"]
si = sunburnt.SolrInterface(settings.get_base_solr_url())
# Delete and Commit the indicies of the deleted document
si.delete(id)
si.commit()
# After removing the index, set has_been_indexed value to False in the database
db(table.id == id).update(has_been_indexed = False)
db.commit()
tasks["document_delete_index"] = document_delete_index
# -----------------------------------------------------------------------------
def gis_download_kml(record_id, filename, session_id_name, session_id,
user_id=None):
"""
Download a KML file
- will normally be done Asynchronously if there is a worker alive
@param record_id: id of the record in db.gis_layer_kml
@param filename: name to save the file as
@param session_id_name: name of the session
@param session_id: id of the session
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = gis.download_kml(record_id, filename, session_id_name, session_id)
db.commit()
return result
tasks["gis_download_kml"] = gis_download_kml
# -----------------------------------------------------------------------------
def gis_update_location_tree(feature, user_id=None):
"""
Update the Location Tree for a feature
- will normally be done Asynchronously if there is a worker alive
@param feature: the feature (in JSON format)
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
feature = json.loads(feature)
path = gis.update_location_tree(feature)
db.commit()
return path
tasks["gis_update_location_tree"] = gis_update_location_tree
# -----------------------------------------------------------------------------
def org_facility_geojson(user_id=None):
"""
Export GeoJSON[P] Of Facility data
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
s3db.org_facility_geojson()
tasks["org_facility_geojson"] = org_facility_geojson
# -----------------------------------------------------------------------------
if settings.has_module("msg"):
# -------------------------------------------------------------------------
def msg_process_outbox(contact_method, user_id=None):
"""
Process Outbox
- will normally be done Asynchronously if there is a worker alive
@param contact_method: one from s3msg.MSG_CONTACT_OPTS
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.process_outbox(contact_method)
db.commit()
return result
tasks["msg_process_outbox"] = msg_process_outbox
# -------------------------------------------------------------------------
def msg_twitter_search(search_id, user_id=None):
"""
Perform a Search of Twitter
- will normally be done Asynchronously if there is a worker alive
@param search_id: one of s3db.msg_twitter_search.id
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.twitter_search(search_id)
db.commit()
return result
tasks["msg_twitter_search"] = msg_twitter_search
# -------------------------------------------------------------------------
def msg_process_keygraph(search_id, user_id=None):
"""
Process Twitter Search Results with KeyGraph
- will normally be done Asynchronously if there is a worker alive
@param search_id: one of s3db.msg_twitter_search.id
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.process_keygraph(search_id)
db.commit()
return result
tasks["msg_process_keygraph"] = msg_process_keygraph
# -------------------------------------------------------------------------
def msg_poll(tablename, channel_id, user_id=None):
"""
Poll an inbound channel
"""
if user_id:
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.poll(tablename, channel_id)
db.commit()
return result
tasks["msg_poll"] = msg_poll
# -----------------------------------------------------------------------------
def msg_parse(channel_id, function_name, user_id=None):
"""
Parse Messages coming in from a Source Channel
"""
if user_id:
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = msg.parse(channel_id, function_name)
db.commit()
return result
tasks["msg_parse"] = msg_parse
# -------------------------------------------------------------------------
def notify_check_subscriptions(user_id=None):
"""
Scheduled task to check subscriptions for updates,
creates notify_notify tasks where updates exist.
"""
if user_id:
auth.s3_impersonate(user_id)
notify = s3base.S3Notifications()
return notify.check_subscriptions()
tasks["notify_check_subscriptions"] = notify_check_subscriptions
# -------------------------------------------------------------------------
def notify_notify(resource_id, user_id=None):
"""
Asynchronous task to notify a subscriber about resource
updates. This task is created by notify_check_subscriptions.
@param subscription: JSON with the subscription data
@param now: lookup date (@todo: remove this)
"""
if user_id:
auth.s3_impersonate(user_id)
notify = s3base.S3Notifications
return notify.notify(resource_id)
tasks["notify_notify"] = notify_notify
# -----------------------------------------------------------------------------
if settings.has_module("req"):
def req_add_from_template(req_id, user_id=None):
"""
Add a Request from template
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.req_add_from_template(req_id)
db.commit()
return result
tasks["req_add_from_template"] = req_add_from_template
# -----------------------------------------------------------------------------
if settings.has_module("setup"):
def deploy(playbook, private_key, host=["127.0.0.1"], only_tags="all", user_id=None):
pb = s3db.setup_create_playbook(playbook, host, private_key, only_tags)
pb.run()
processed_hosts = sorted(pb.stats.processed.keys())
for h in processed_hosts:
t = pb.stats.summarize(h)
if t["failures"] > 0:
raise Exception("One of the tasks failed")
elif t["unreachable"] > 0:
raise Exception("Host unreachable")
tasks["deploy"] = deploy
def setup_management(_type, instance_id, deployment_id, user_id=None):
import ansible.runner
s3db = current.s3db
db = current.db
# get all servers associated
stable = s3db.setup_server
servers = db(stable.deployment_id == deployment_id).select(stable.role,
stable.host_ip,
orderby=stable.role
)
# get deployment
dtable = s3db.setup_deployment
deployment = db(dtable.id == deployment_id).select(dtable.private_key,
dtable.remote_user,
limitby=(0, 1)).first()
private_key = os.path.join(current.request.folder, "uploads", deployment.private_key)
hosts = [server.host_ip for server in servers]
inventory = ansible.inventory.Inventory(hosts)
tasks = []
runner = ansible.runner.Runner
itable = s3db.setup_instance
instance = db(itable.id == instance_id).select(itable.type,
limitby=(0, 1)).first()
instance_types = ["prod", "test", "demo"]
if _type == "clean":
host_ip = servers[0].host_ip
arguments = [dict(module_name = "service",
module_args={"name": "uwsgi",
"status": "stop",
},
remote_user=deployment.remote_user,
private_key_file=private_key,
pattern=host_ip,
inventory=inventory,
sudo=True
),
dict(module_name = "command",
module_args="clean %s" % instance_types[instance.type - 1],
remote_user=deployment.remote_user,
private_key_file=private_key,
pattern=host_ip,
inventory=inventory,
sudo=True
),
dict(module_name = "command",
module_args="clean_eden %s" % instance_types[instance.type - 1],
remote_user=deployment.remote_user,
private_key_file=private_key,
pattern=servers[0].host_ip,
inventory=inventory,
sudo=True
),
dict(module_name = "service",
module_args={"name": "uwsgi",
"status": "start",
},
remote_user=deployment.remote_user,
private_key_file=private_key,
pattern=host_ip,
inventory=inventory,
sudo=True
),
]
if len(servers) > 1:
host_ip = servers[2].host_ip
arguments[0]["pattern"] = host_ip
arguments[2]["pattern"] = host_ip
arguments[3]["pattern"] = host_ip
for argument in arguments:
tasks.append(runner(**argument))
# run the tasks
for task in tasks:
response = task.run()
if response["dark"]:
raise Exception("Error contacting the server")
elif _type == "eden":
argument = dict(module_name="command",
module_args="pull %s" % [instance_types[instance.type - 1]],
remote_user=deployment.remote_user,
private_key_file=private_key,
pattern=servers[0].host_ip,
inventory=inventory,
sudo=True
)
if len(servers) > 1:
argument["pattern"] = servers[2].host_ip
task = runner(**argument)
response = task.run()
if response["dark"]:
raise Exception("Error contacting the server")
tasks["setup_management"] = setup_management
# --------------------e--------------------------------------------------------
if settings.has_module("stats"):
def stats_demographic_update_aggregates(records=None, user_id=None):
"""
Update the stats_demographic_aggregate table for the given
stats_demographic_data record(s)
@param records: JSON of Rows of stats_demographic_data records to
update aggregates for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.stats_demographic_update_aggregates(records)
db.commit()
return result
tasks["stats_demographic_update_aggregates"] = stats_demographic_update_aggregates
# -------------------------------------------------------------------------
def stats_demographic_update_location_aggregate(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
user_id=None):
"""
Update the stats_demographic_aggregate table for the given location and parameter
- called from within stats_demographic_update_aggregates
@param location_level: gis level at which the data needs to be accumulated
@param root_location_id: id of the location
@param parameter_id: parameter for which the stats are being updated
@param start_date: start date of the period in question
@param end_date: end date of the period in question
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.stats_demographic_update_location_aggregate(location_level,
root_location_id,
parameter_id,
start_date,
end_date,
)
db.commit()
return result
tasks["stats_demographic_update_location_aggregate"] = stats_demographic_update_location_aggregate
# -------------------------------------------------------------------------
if settings.has_module("vulnerability"):
def vulnerability_update_aggregates(records=None, user_id=None):
"""
Update the vulnerability_aggregate table for the given
vulnerability_data record(s)
@param records: JSON of Rows of vulnerability_data records to update aggregates for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.vulnerability_update_aggregates(records)
db.commit()
return result
tasks["vulnerability_update_aggregates"] = vulnerability_update_aggregates
# ---------------------------------------------------------------------
def vulnerability_update_location_aggregate(#location_level,
root_location_id,
parameter_id,
start_date,
end_date,
user_id=None):
"""
Update the vulnerability_aggregate table for the given location and parameter
- called from within vulnerability_update_aggregates
@param location_level: gis level at which the data needs to be accumulated
@param root_location_id: id of the location
@param parameter_id: parameter for which the stats are being updated
@param start_date: start date of the period in question
@param end_date: end date of the period in question
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.vulnerability_update_location_aggregate(#location_level,
root_location_id,
parameter_id,
start_date,
end_date,
)
db.commit()
return result
tasks["vulnerability_update_location_aggregate"] = vulnerability_update_location_aggregate
# --------------------e--------------------------------------------------------
if settings.has_module("disease"):
def disease_stats_update_aggregates(records=None, all=False, user_id=None):
"""
Update the disease_stats_aggregate table for the given
disease_stats_data record(s)
@param records: JSON of Rows of disease_stats_data records to
update aggregates for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.disease_stats_update_aggregates(records, all)
db.commit()
return result
tasks["disease_stats_update_aggregates"] = disease_stats_update_aggregates
# -------------------------------------------------------------------------
def disease_stats_update_location_aggregates(location_id,
children,
parameter_id,
dates,
user_id=None):
"""
Update the disease_stats_aggregate table for the given location and parameter
- called from within disease_stats_update_aggregates
@param location_id: location to aggregate at
@param children: locations to aggregate from
@param parameter_id: parameter to aggregate
@param dates: dates to aggregate for
@param user_id: calling request's auth.user.id or None
"""
if user_id:
# Authenticate
auth.s3_impersonate(user_id)
# Run the Task & return the result
result = s3db.disease_stats_update_location_aggregates(location_id,
children,
parameter_id,
dates,
)
db.commit()
return result
tasks["disease_stats_update_location_aggregates"] = disease_stats_update_location_aggregates
# -----------------------------------------------------------------------------
if settings.has_module("sync"):
def sync_synchronize(repository_id, user_id=None, manual=False):
"""
Run all tasks for a repository, to be called from scheduler
"""
auth.s3_impersonate(user_id)
rtable = s3db.sync_repository
query = (rtable.deleted != True) & \
(rtable.id == repository_id)
repository = db(query).select(limitby=(0, 1)).first()
if repository:
sync = s3base.S3Sync()
status = sync.get_status()
if status.running:
message = "Synchronization already active - skipping run"
sync.log.write(repository_id=repository.id,
resource_name=None,
transmission=None,
mode=None,
action="check",
remote=False,
result=sync.log.ERROR,
message=message)
db.commit()
return sync.log.ERROR
sync.set_status(running=True, manual=manual)
try:
sync.synchronize(repository)
finally:
sync.set_status(running=False, manual=False)
db.commit()
return s3base.S3SyncLog.SUCCESS
tasks["sync_synchronize"] = sync_synchronize
# -----------------------------------------------------------------------------
# Instantiate Scheduler instance with the list of tasks
s3.tasks = tasks
s3task = s3base.S3Task()
current.s3task = s3task
# -----------------------------------------------------------------------------
# Reusable field for scheduler task links
scheduler_task_id = S3ReusableField("scheduler_task_id",
"reference %s" % s3base.S3Task.TASK_TABLENAME,
ondelete="CASCADE")
s3.scheduler_task_id = scheduler_task_id
# END =========================================================================
|
|
import logging,traceback,os,platform,ctypes
''' Joystick abstraction layer '''
HAT_N = 1
HAT_E = 2
HAT_S = 4
HAT_W = 8
HAT_NE = HAT_N | HAT_E
HAT_SE = HAT_S | HAT_E
HAT_SW = HAT_S | HAT_W
HAT_NW = HAT_N | HAT_W
class Joystick:
MAX_AXIS = 16
def __init__(self, nameOrIndex):
self._axishistory = []
for i in range(Joystick.MAX_AXIS):
self._axishistory.append([])
if isinstance(nameOrIndex, int):
if nameOrIndex < numJoysticks():
index = nameOrIndex
else:
for j in range(0, numJoysticks()) :
if nameOrIndex == _joysticks[j].name:
index = j
try:
self.index = index;
except:
raise EnvironmentError("joysticks.get('%s') is not available" % nameOrIndex)
self._handle = ctypes.c_void_p()
# see http://nedbatchelder.com/text/unipain.html on Python 2/3 differences on unicode handling re decode()
self.name = _sdl.SDL_JoystickName(self.index).decode().strip()
def _acquire(self):
if self._handle:
return
self._handle = _sdl.SDL_JoystickOpen(self.index)
if not self._handle:
raise EnvironmentError("joysticks.get('%s') can't be acquired" % self.index)
def numAxis(self):
return _sdl.SDL_JoystickNumAxes(self._handle)
def getHat(self, i):
return _sdl.SDL_JoystickGetHat(self._handle, 0)
def getAxis(self, i, deadzone=0.01, smoothing=1):
assert i<Joystick.MAX_AXIS
val = _sdl.SDL_JoystickGetAxis(self._handle, i) / float(32767)
deadzone = abs(deadzone)
if val < -1+deadzone:
val = -1
if val > 1-deadzone:
val = 1
assert not smoothing<1
history = self._axishistory[i]
if len(history)>=smoothing:
del history[0:len(history)-smoothing]
if smoothing==1:
return val;
history.append(val)
return sum(history)/len(history)
def setAxis(self, a, value):
raise EnvironmentError("%s is not a virtual voystick" % self.name)
def numButtons(self):
return _sdl.SDL_JoystickNumButtons(self._handle)
def getButton(self, i):
# no idea why but if I don't touch the logging subsystem here then Python 2.7 simply bails without exit hooks
_log.isEnabledFor(0)
return _sdl.SDL_JoystickGetButton(self._handle, i)
def setButton(self, b, value):
raise EnvironmentError("%s is not a virtual voystick" % self.name)
def _sync(self):
pass
def __str__(self):
# button/axis information isn't available before acquired
return "joysticks.get('%s') # index %d" % (self.name, self.index)
class VirtualJoystick:
_DEVICE_NAME = 'vJoy Device'
_AXIS_KEYS = [
(0x30, "wAxisX"),
(0x31, "wAxisY"),
(0x32, "wAxisZ"),
(0x33, "wAxisXRot"),
(0x34, "wAxisYRot"),
(0x35, "wAxisZRot"),
(0x36, "wSlider"),
(0x37, "wDial"),
(0x38, "wWheel")
]
class Position(ctypes.Structure):
_fields_ = [
("index", ctypes.c_byte),
("wThrottle", ctypes.c_long),
("wRudder", ctypes.c_long),
("wAileron", ctypes.c_long),
("wAxisX", ctypes.c_long),
("wAxisY", ctypes.c_long),
("wAxisZ", ctypes.c_long),
("wAxisXRot", ctypes.c_long),
("wAxisYRot", ctypes.c_long),
("wAxisZRot", ctypes.c_long),
("wSlider", ctypes.c_long),
("wDial", ctypes.c_long),
("wWheel", ctypes.c_long),
("wAxisVX", ctypes.c_long),
("wAxisVY", ctypes.c_long),
("wAxisVZ", ctypes.c_long),
("wAxisVBRX", ctypes.c_long),
("wAxisVBRY", ctypes.c_long),
("wAxisVBRZ", ctypes.c_long),
("lButtons", ctypes.c_long), # 32 buttons: 0x00000001 to 0x80000000
("bHats", ctypes.c_long), # Lower 4 bits: HAT switch or 16-bit of continuous HAT switch
("bHatsEx1", ctypes.c_long), # Lower 4 bits: HAT switch or 16-bit of continuous HAT switch
("bHatsEx2", ctypes.c_long), # Lower 4 bits: HAT switch or 16-bit of continuous HAT switch
("bHatsEx3", ctypes.c_long) # Lower 4 bits: HAT switch or 16-bit of continuous HAT switch
]
def __init__(self, joystick, virtualIndex):
self.index = joystick.index
self.name = joystick.name
self._position = VirtualJoystick.Position()
self._position.index = virtualIndex+1
self._acquired = False
self._dirty = False
self._buttons = _vjoy.GetVJDButtonNumber(self._position.index)
self._axis = []
for akey, pkey in VirtualJoystick._AXIS_KEYS:
if _vjoy.GetVJDAxisExist(self._position.index, akey):
amax = ctypes.c_long()
amin = ctypes.c_long()
_vjoy.GetVJDAxisMin(self._position.index, akey, ctypes.byref(amin))
_vjoy.GetVJDAxisMax(self._position.index, akey, ctypes.byref(amax))
self._axis.append((pkey, amin.value,amax.value))
self._position.__setattr__(pkey, int(amin.value + (amax.value-amin.value)/2))
def _acquire(self):
if self._acquired:
return
if not _vjoy.AcquireVJD(self._position.index):
raise EnvironmentError("joysticks.get('%s') is not a free Virtual Joystick" % self.index)
self._acquired = True
def numAxis(self):
return len(self._axis)
def getAxis(self, i):
if i<0 or i>=len(self._axis):
raise EnvironmentError("joysticks.get('%s') doesn't have axis %d" % i)
pkey, amin, amax = self._axis[i]
return (self._position.__getattribute__(pkey) - amin) / (amax-amin) * 2 - 1
def setAxis(self, a, value):
if a<0 or a>=len(self._axis):
raise EnvironmentError("joysticks.get('%s') doesn't have axis %d" % (self.index, a))
if value < -1 or value > 1:
raise EnvironmentError("joysticks.get('%s') value for axis %d not -1.0 < %d < 1.0" % (self.index, a, value))
pkey, amin, amax = self._axis[a]
self._position.__setattr__(pkey, int( (value+1)/2 * (amax-amin) + amin))
self._dirty = True
def numButtons(self):
return self._buttons
def getButton(self, i):
if i<0 or i>=self._buttons:
raise EnvironmentError("joysticks.get('%s') doesn't have button %d" % (self.name, i))
return self._position.lButtons & (1<<i)
def setButton(self, i, value):
if i<0 or i>=self._buttons:
raise EnvironmentError("joysticks.get('%s') doesn't have button %d" % (self.name, i))
if value:
self._position.lButtons |= 1<<i
else:
self._position.lButtons &= ~(1<<i)
self._dirty = True
def _sync(self):
if not self._dirty:
return
if not self._acquired:
return
if not _vjoy.UpdateVJD(self._position.index, ctypes.byref(self._position)):
_log.warning("joysticks.get('%s') couldn't be set" % self.name)
self._acquired = False
self._dirty = False
def __str__(self):
return "joysticks.get('%s') # VirtualJoystick index %d" % (self.name, self.index)
def numJoysticks():
if not _sdl:
return 0
return max(_sdl.SDL_NumJoysticks(), len(_joysticks))
def get(nameOrIndex):
try:
joy = _name2joystick[nameOrIndex if isinstance(nameOrIndex,int) else nameOrIndex.lower()]
except:
raise EnvironmentError("No joystick %s" % nameOrIndex)
joy._acquire()
return joy
def button(nameOrIndexAndButton):
""" test button eg button 1 of Saitek Pro Flight Quadrant via button('Saitek Pro Flight Quadrant.1') """
nameOrIndex, button = nameOrIndexAndButton.split(".")
return get(nameOrIndex).button(int(button))
def sync():
if _sdl:
_sdl.SDL_JoystickUpdate()
for joy in _joysticks:
joy._sync()
def _init():
global _sdl, _vjoy, _log, _joysticks, _name2joystick
_sdl = None
_vjoy = None
_log = logging.getLogger(__name__)
_joysticks = []
_name2joystick = dict()
# preload all available joysticks for reporting
if not _sdl:
try:
_sdl = ctypes.CDLL(os.path.join("contrib","sdl",platform.architecture()[0],"SDL.dll"))
_sdl.SDL_Init(0x200)
_sdl.SDL_JoystickName.restype = ctypes.c_char_p
_sdl.SDL_JoystickGetAxis.restype = ctypes.c_int16
for index in range(0, _sdl.SDL_NumJoysticks()) :
joy = Joystick(index)
_joysticks.append(joy)
except Exception as e:
_log.warning("Cannot initialize support for physical Joysticks (%s)" % e)
_log.debug(traceback.format_exc())
# wrap virtual joysticks where applicable
if not _vjoy:
try:
_vjoy = ctypes.CDLL(os.path.join("contrib", "vjoy", platform.architecture()[0], "vJoyInterface.dll"))
if not _vjoy.vJoyEnabled():
_log.info("No Virtual Joystick Driver active")
else:
numVirtuals = 0
for i,joy in enumerate(_joysticks):
if joy.name == VirtualJoystick._DEVICE_NAME:
try:
virtual = VirtualJoystick(joy, numVirtuals)
_joysticks[i] = virtual
except Exception as e:
_log.warning("Cannot initialize support for virtual Joystick %s (%s)" % (joy.name, e))
_log.debug(traceback.format_exc())
numVirtuals += 1
except Exception as e:
_log.warning("Cannot initialize support for virtual Joysticks (%s)" % e)
_log.debug(traceback.format_exc())
# build dictionary
for joy in _joysticks:
_name2joystick[joy.name.lower()] = joy
_name2joystick[joy.index] = joy
_log.info(joy)
_init()
|
|
"""Philips Hue lights platform tests."""
import asyncio
import logging
import aiohue
from homeassistant import config_entries
from homeassistant.components import hue
from homeassistant.components.hue import light as hue_light
from homeassistant.util import color
from tests.async_mock import Mock
_LOGGER = logging.getLogger(__name__)
HUE_LIGHT_NS = "homeassistant.components.light.hue."
GROUP_RESPONSE = {
"1": {
"name": "Group 1",
"lights": ["1", "2"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 254,
"hue": 10000,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
"2": {
"name": "Group 2",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
}
LIGHT_1_CAPABILITIES = {
"certified": True,
"control": {
"mindimlevel": 5000,
"maxlumen": 600,
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
"ct": {"min": 153, "max": 500},
},
"streaming": {"renderer": True, "proxy": False},
}
LIGHT_1_ON = {
"state": {
"on": True,
"bri": 144,
"hue": 13088,
"sat": 212,
"xy": [0.5128, 0.4147],
"ct": 467,
"alert": "none",
"effect": "none",
"colormode": "xy",
"reachable": True,
},
"capabilities": LIGHT_1_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 1",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "456",
}
LIGHT_1_OFF = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "xy",
"reachable": True,
},
"capabilities": LIGHT_1_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 1",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "456",
}
LIGHT_2_CAPABILITIES = {
"certified": True,
"control": {
"mindimlevel": 5000,
"maxlumen": 600,
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
},
"streaming": {"renderer": True, "proxy": False},
}
LIGHT_2_OFF = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": LIGHT_2_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 2",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
LIGHT_2_ON = {
"state": {
"on": True,
"bri": 100,
"hue": 13088,
"sat": 210,
"xy": [0.5, 0.4],
"ct": 420,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": LIGHT_2_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 2 new",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
LIGHT_RESPONSE = {"1": LIGHT_1_ON, "2": LIGHT_2_OFF}
LIGHT_RAW = {
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"swversion": "66009461",
}
LIGHT_GAMUT = color.GamutType(
color.XYPoint(0.704, 0.296),
color.XYPoint(0.2151, 0.7106),
color.XYPoint(0.138, 0.08),
)
LIGHT_GAMUT_TYPE = "A"
async def setup_bridge(hass, mock_bridge):
"""Load the Hue light platform with the provided bridge."""
hass.config.components.add(hue.DOMAIN)
config_entry = config_entries.ConfigEntry(
1,
hue.DOMAIN,
"Mock Title",
{"host": "mock-host"},
"test",
config_entries.CONN_CLASS_LOCAL_POLL,
system_options={},
)
mock_bridge.config_entry = config_entry
hass.data[hue.DOMAIN] = {config_entry.entry_id: mock_bridge}
await hass.config_entries.async_forward_entry_setup(config_entry, "light")
# To flush out the service call to update the group
await hass.async_block_till_done()
async def test_not_load_groups_if_old_bridge(hass, mock_bridge):
"""Test that we don't try to load gorups if bridge runs old software."""
mock_bridge.api.config.apiversion = "1.12.0"
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 0
async def test_no_lights_or_groups(hass, mock_bridge):
"""Test the update_lights function when no lights are found."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append({})
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 0
async def test_lights(hass, mock_bridge):
"""Test the update_lights function with some lights."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
# 2 lights
assert len(hass.states.async_all()) == 2
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 145
assert lamp_1.attributes["hs_color"] == (36.067, 69.804)
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.state == "off"
async def test_lights_color_mode(hass, mock_bridge):
"""Test that lights only report appropriate color mode."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 145
assert lamp_1.attributes["hs_color"] == (36.067, 69.804)
assert "color_temp" not in lamp_1.attributes
new_light1_on = LIGHT_1_ON.copy()
new_light1_on["state"] = new_light1_on["state"].copy()
new_light1_on["state"]["colormode"] = "ct"
mock_bridge.mock_light_responses.append({"1": new_light1_on})
mock_bridge.mock_group_responses.append({})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_2"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
lamp_1 = hass.states.get("light.hue_lamp_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 145
assert lamp_1.attributes["color_temp"] == 467
assert "hs_color" not in lamp_1.attributes
async def test_groups(hass, mock_bridge):
"""Test the update_lights function with some lights."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
# 2 hue group lights
assert len(hass.states.async_all()) == 2
lamp_1 = hass.states.get("light.group_1")
assert lamp_1 is not None
assert lamp_1.state == "on"
assert lamp_1.attributes["brightness"] == 255
assert lamp_1.attributes["color_temp"] == 250
lamp_2 = hass.states.get("light.group_2")
assert lamp_2 is not None
assert lamp_2.state == "on"
async def test_new_group_discovered(hass, mock_bridge):
"""Test if 2nd update has a new group."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 2
new_group_response = dict(GROUP_RESPONSE)
new_group_response["3"] = {
"name": "Group 3",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
}
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(new_group_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 1x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 4
assert len(hass.states.async_all()) == 3
new_group = hass.states.get("light.group_3")
assert new_group is not None
assert new_group.state == "on"
assert new_group.attributes["brightness"] == 154
assert new_group.attributes["color_temp"] == 250
async def test_new_light_discovered(hass, mock_bridge):
"""Test if 2nd update has a new light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 2
new_light_response = dict(LIGHT_RESPONSE)
new_light_response["3"] = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": LIGHT_1_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 3",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "789",
}
mock_bridge.mock_light_responses.append(new_light_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 3
light = hass.states.get("light.hue_lamp_3")
assert light is not None
assert light.state == "off"
async def test_group_removed(hass, mock_bridge):
"""Test if 2nd update has removed group."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 2
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append({"1": GROUP_RESPONSE["1"]})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 1x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 4
assert len(hass.states.async_all()) == 1
group = hass.states.get("light.group_1")
assert group is not None
removed_group = hass.states.get("light.group_2")
assert removed_group is None
async def test_light_removed(hass, mock_bridge):
"""Test if 2nd update has removed light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 2
mock_bridge.mock_light_responses.clear()
mock_bridge.mock_light_responses.append({"1": LIGHT_RESPONSE.get("1")})
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 1
light = hass.states.get("light.hue_lamp_1")
assert light is not None
removed_light = hass.states.get("light.hue_lamp_2")
assert removed_light is None
async def test_other_group_update(hass, mock_bridge):
"""Test changing one group that will impact the state of other light."""
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(GROUP_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 2
assert len(hass.states.async_all()) == 2
group_2 = hass.states.get("light.group_2")
assert group_2 is not None
assert group_2.name == "Group 2"
assert group_2.state == "on"
assert group_2.attributes["brightness"] == 154
assert group_2.attributes["color_temp"] == 250
updated_group_response = dict(GROUP_RESPONSE)
updated_group_response["2"] = {
"name": "Group 2 new",
"lights": ["3", "4", "5"],
"type": "LightGroup",
"action": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"effect": "none",
"xy": [0, 0],
"ct": 0,
"alert": "none",
"colormode": "ct",
},
"state": {"any_on": False, "all_on": False},
}
mock_bridge.mock_light_responses.append({})
mock_bridge.mock_group_responses.append(updated_group_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.group_1"}, blocking=True
)
# 2x group update, 1x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 4
assert len(hass.states.async_all()) == 2
group_2 = hass.states.get("light.group_2")
assert group_2 is not None
assert group_2.name == "Group 2 new"
assert group_2.state == "off"
async def test_other_light_update(hass, mock_bridge):
"""Test changing one light that will impact state of other light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 1
assert len(hass.states.async_all()) == 2
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.name == "Hue Lamp 2"
assert lamp_2.state == "off"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["2"] = {
"state": {
"on": True,
"bri": 100,
"hue": 13088,
"sat": 210,
"xy": [0.5, 0.4],
"ct": 420,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": LIGHT_2_CAPABILITIES,
"type": "Extended color light",
"name": "Hue Lamp 2 new",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
mock_bridge.mock_light_responses.append(updated_light_response)
# Calling a service will trigger the updates to run
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert len(hass.states.async_all()) == 2
lamp_2 = hass.states.get("light.hue_lamp_2")
assert lamp_2 is not None
assert lamp_2.name == "Hue Lamp 2 new"
assert lamp_2.state == "on"
assert lamp_2.attributes["brightness"] == 100
async def test_update_timeout(hass, mock_bridge):
"""Test bridge marked as not available if timeout error during update."""
mock_bridge.api.lights.update = Mock(side_effect=asyncio.TimeoutError)
mock_bridge.api.groups.update = Mock(side_effect=asyncio.TimeoutError)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
async def test_update_unauthorized(hass, mock_bridge):
"""Test bridge marked as not authorized if unauthorized during update."""
mock_bridge.api.lights.update = Mock(side_effect=aiohue.Unauthorized)
mock_bridge.api.groups.update = Mock(side_effect=aiohue.Unauthorized)
await setup_bridge(hass, mock_bridge)
assert len(mock_bridge.mock_requests) == 0
assert len(hass.states.async_all()) == 0
assert len(mock_bridge.handle_unauthorized_error.mock_calls) == 1
async def test_light_turn_on_service(hass, mock_bridge):
"""Test calling the turn on service on a light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
light = hass.states.get("light.hue_lamp_2")
assert light is not None
assert light.state == "off"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["2"] = LIGHT_2_ON
mock_bridge.mock_light_responses.append(updated_light_response)
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": "light.hue_lamp_2", "brightness": 100, "color_temp": 300},
blocking=True,
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert mock_bridge.mock_requests[1]["json"] == {
"bri": 100,
"on": True,
"ct": 300,
"alert": "none",
}
assert len(hass.states.async_all()) == 2
light = hass.states.get("light.hue_lamp_2")
assert light is not None
assert light.state == "on"
# test hue gamut in turn_on service
await hass.services.async_call(
"light",
"turn_on",
{"entity_id": "light.hue_lamp_2", "rgb_color": [0, 0, 255]},
blocking=True,
)
assert len(mock_bridge.mock_requests) == 5
assert mock_bridge.mock_requests[3]["json"] == {
"on": True,
"xy": (0.138, 0.08),
"alert": "none",
}
async def test_light_turn_off_service(hass, mock_bridge):
"""Test calling the turn on service on a light."""
mock_bridge.mock_light_responses.append(LIGHT_RESPONSE)
await setup_bridge(hass, mock_bridge)
light = hass.states.get("light.hue_lamp_1")
assert light is not None
assert light.state == "on"
updated_light_response = dict(LIGHT_RESPONSE)
updated_light_response["1"] = LIGHT_1_OFF
mock_bridge.mock_light_responses.append(updated_light_response)
await hass.services.async_call(
"light", "turn_off", {"entity_id": "light.hue_lamp_1"}, blocking=True
)
# 2x light update, 1 turn on request
assert len(mock_bridge.mock_requests) == 3
assert mock_bridge.mock_requests[1]["json"] == {"on": False, "alert": "none"}
assert len(hass.states.async_all()) == 2
light = hass.states.get("light.hue_lamp_1")
assert light is not None
assert light.state == "off"
def test_available():
"""Test available property."""
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(allow_unreachable=False),
is_group=False,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.available is False
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(allow_unreachable=True),
is_group=False,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.available is True
light = hue_light.HueLight(
light=Mock(
state={"reachable": False},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(allow_unreachable=False),
is_group=True,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.available is True
def test_hs_color():
"""Test hs_color property."""
light = hue_light.HueLight(
light=Mock(
state={"colormode": "ct", "hue": 1234, "sat": 123},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(),
is_group=False,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.hs_color is None
light = hue_light.HueLight(
light=Mock(
state={"colormode": "hs", "hue": 1234, "sat": 123},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(),
is_group=False,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.hs_color is None
light = hue_light.HueLight(
light=Mock(
state={"colormode": "xy", "hue": 1234, "sat": 123, "xy": [0.4, 0.5]},
raw=LIGHT_RAW,
colorgamuttype=LIGHT_GAMUT_TYPE,
colorgamut=LIGHT_GAMUT,
),
coordinator=Mock(last_update_success=True),
bridge=Mock(),
is_group=False,
supported_features=hue_light.SUPPORT_HUE_EXTENDED,
)
assert light.hs_color == color.color_xy_to_hs(0.4, 0.5, LIGHT_GAMUT)
async def test_group_features(hass, mock_bridge):
"""Test group features."""
color_temp_type = "Color temperature light"
extended_color_type = "Extended color light"
group_response = {
"1": {
"name": "Group 1",
"lights": ["1", "2"],
"type": "Room",
"action": {
"on": True,
"bri": 254,
"hue": 10000,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
"2": {
"name": "Group 2",
"lights": ["3", "4"],
"type": "Room",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
"3": {
"name": "Group 3",
"lights": ["1", "3"],
"type": "Room",
"action": {
"on": True,
"bri": 153,
"hue": 4345,
"sat": 254,
"effect": "none",
"xy": [0.5, 0.5],
"ct": 250,
"alert": "select",
"colormode": "ct",
},
"state": {"any_on": True, "all_on": False},
},
}
light_1 = {
"state": {
"on": True,
"bri": 144,
"ct": 467,
"alert": "none",
"effect": "none",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": color_temp_type,
"name": "Hue Lamp 1",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "456",
}
light_2 = {
"state": {
"on": False,
"bri": 0,
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "xy",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": color_temp_type,
"name": "Hue Lamp 2",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "4567",
}
light_3 = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": extended_color_type,
"name": "Hue Lamp 3",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "123",
}
light_4 = {
"state": {
"on": True,
"bri": 100,
"hue": 13088,
"sat": 210,
"xy": [0.5, 0.4],
"ct": 420,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"capabilities": {
"control": {
"colorgamuttype": "A",
"colorgamut": [[0.704, 0.296], [0.2151, 0.7106], [0.138, 0.08]],
}
},
"type": extended_color_type,
"name": "Hue Lamp 4",
"modelid": "LCT001",
"swversion": "66009461",
"manufacturername": "Philips",
"uniqueid": "1234",
}
light_response = {
"1": light_1,
"2": light_2,
"3": light_3,
"4": light_4,
}
mock_bridge.allow_groups = True
mock_bridge.mock_light_responses.append(light_response)
mock_bridge.mock_group_responses.append(group_response)
await setup_bridge(hass, mock_bridge)
color_temp_feature = hue_light.SUPPORT_HUE["Color temperature light"]
extended_color_feature = hue_light.SUPPORT_HUE["Extended color light"]
group_1 = hass.states.get("light.group_1")
assert group_1.attributes["supported_features"] == color_temp_feature
group_2 = hass.states.get("light.group_2")
assert group_2.attributes["supported_features"] == extended_color_feature
group_3 = hass.states.get("light.group_3")
assert group_3.attributes["supported_features"] == extended_color_feature
|
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod, abstractproperty
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@abstractproperty
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractproperty
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@abstractproperty
def numerator(self):
raise NotImplementedError
@abstractproperty
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
|
|
# -*- coding: utf-8 -*-
import sublime, sublime_plugin
import sys
import os
if sys.version_info < (3, 0):
from chardet.universaldetector import UniversalDetector
NONE_COMMAND = (None, None, 0)
ST3 = False
else:
from .chardet.universaldetector import UniversalDetector
NONE_COMMAND = ('', None, 0)
ST3 = True
import codecs
import threading
import json
import time
import hashlib
import shutil
SKIP_ENCODINGS = ('ASCII', 'UTF-8', 'UTF-16LE', 'UTF-16BE')
SUPERSETS = {
'GB2312': 'GBK',
'GBK': 'GB18030',
'BIG5': 'CP950', # CP950 is common in Taiwan
'CP950': 'BIG5-HKSCS', # HK official Big5 variant
'EUC-KR': 'CP949' # CP949 is a superset of euc-kr!
}
SETTINGS = {}
REVERTING_FILES = []
CONFIRM_IS_AVAILABLE = ('ok_cancel_dialog' in dir(sublime))
ENCODINGS_NAME = []
ENCODINGS_CODE = []
class EncodingCache(object):
def __init__(self):
self.file = os.path.join(sublime.packages_path(), 'User', 'encoding_cache.json')
self.cache = []
self.max_size = -1
self.dirty = False
self.load()
self.save_on_dirty()
def save_on_dirty(self):
if self.dirty:
self.save()
sublime.set_timeout(self.save_on_dirty, 10000)
def shrink(self):
if self.max_size < 0:
return
if len(self.cache) > self.max_size:
self.dirty = True
del self.cache[self.max_size:]
def set_max_size(self, max_size):
self.max_size = max_size
self.shrink()
def load(self):
if not os.path.exists(self.file):
return
fp = open(self.file, 'r')
self.cache = json.load(fp)
fp.close()
if len(self.cache) > 0:
if 'file' in self.cache[0]:
# old style cache
new_cache = []
for item in self.cache:
new_cache.append({
item['file']: item['encoding']
})
self.cache = new_cache
self.dirty = True
def save(self):
self.shrink()
fp = open(self.file, 'w')
json.dump(self.cache, fp)
fp.close()
self.dirty = False
def pop(self, file_name):
for item in self.cache:
if file_name in item:
self.cache.remove(item)
self.dirty = True
return item.get(file_name)
return None
def set(self, file_name, encoding):
if self.max_size < 1:
return
self.pop(file_name)
self.cache.insert(0, {
file_name: encoding
})
self.dirty = True
encoding_cache = None
def get_settings():
global ENCODINGS_NAME, ENCODINGS_CODE
settings = sublime.load_settings('ConvertToUTF8.sublime-settings')
encoding_list = settings.get('encoding_list', [])
ENCODINGS_NAME = [pair[0] for pair in encoding_list]
ENCODINGS_CODE = [pair[1] for pair in encoding_list]
encoding_cache.set_max_size(settings.get('max_cache_size', 100))
SETTINGS['max_detect_lines'] = settings.get('max_detect_lines', 600)
SETTINGS['preview_action'] = settings.get('preview_action', 'no_action')
SETTINGS['default_encoding_on_create'] = settings.get('default_encoding_on_create', '')
SETTINGS['convert_on_load'] = settings.get('convert_on_load', 'always')
SETTINGS['convert_on_save'] = settings.get('convert_on_save', 'always')
SETTINGS['lazy_reload'] = settings.get('lazy_reload', True)
def get_setting(view, key):
# read project specific settings first
return view.settings().get(key, SETTINGS[key]);
TMP_DIR = None
def get_temp_name(name):
if not name:
return None
name = name.encode('UTF-8')
return hashlib.md5(name).hexdigest()
def clean_temp_folder():
tmp_files = os.listdir(TMP_DIR)
for win in sublime.windows():
for view in win.views():
file_name = view.file_name()
tmp_name = get_temp_name(file_name)
if tmp_name in tmp_files:
if not view.is_dirty():
tmp_file = os.path.join(TMP_DIR, tmp_name)
# check mtime
mtime1 = os.path.getmtime(file_name)
mtime2 = os.path.getmtime(tmp_file)
if mtime1 != mtime2:
# file was changed outside
view.settings().erase('prevent_detect')
continue
shutil.move(tmp_file, file_name)
tmp_files.remove(tmp_name)
for tmp_name in tmp_files:
tmp_file = os.path.join(TMP_DIR, tmp_name)
os.unlink(tmp_file)
def init_settings():
global encoding_cache, TMP_DIR
encoding_cache = EncodingCache()
get_settings()
sublime.load_settings('ConvertToUTF8.sublime-settings').add_on_change('get_settings', get_settings)
TMP_DIR = os.path.join(sublime.packages_path(), 'User', 'c2u_tmp')
if not os.path.exists(TMP_DIR):
os.mkdir(TMP_DIR)
def setup_views():
clean_temp_folder()
# check existing views
for win in sublime.windows():
for view in win.views():
if get_setting(view, 'convert_on_load') == 'never':
break
if view.is_dirty() or view.settings().get('origin_encoding'):
show_encoding_status(view)
continue
file_name = view.file_name()
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def plugin_loaded():
init_settings()
setup_views()
def wait_for_ready():
if sublime.windows():
setup_views()
else:
sublime.set_timeout(wait_for_ready, 100)
if not ST3:
init_settings()
wait_for_ready()
def detect(view, file_name, cnt):
if not file_name or not os.path.exists(file_name):
return
encoding = encoding_cache.pop(file_name)
if encoding:
sublime.set_timeout(lambda: init_encoding_vars(view, encoding, detect_on_fail=True), 0)
return
sublime.set_timeout(lambda: view.set_status('origin_encoding', 'Detecting encoding, please wait...'), 0)
detector = UniversalDetector()
fp = open(file_name, 'rb')
for line in fp:
# cut MS-Windows CR code
line = line.replace(b'\r',b'')
detector.feed(line)
cnt -= 1
if detector.done or cnt == 0:
break
fp.close()
detector.close()
encoding = detector.result['encoding']
if encoding:
encoding = encoding.upper()
confidence = detector.result['confidence']
sublime.set_timeout(lambda: check_encoding(view, encoding, confidence), 0)
def check_encoding(view, encoding, confidence):
view_encoding = view.encoding()
result = 'Detected {0} vs {1} with {2:.0%} confidence'.format(encoding, view_encoding, confidence) if encoding else 'Encoding can not be detected'
view.set_status('origin_encoding', result)
print(result)
not_detected = not encoding or confidence < 0.95 or encoding == view_encoding
# ST can't detect the encoding
if view_encoding in ('Undefined', view.settings().get('fallback_encoding')):
if not_detected:
show_selection(view)
return
else:
if not_detected:
# using encoding detected by ST
encoding = view_encoding
else:
show_selection(view, [
['{0} ({1:.0%})'.format(encoding, confidence), encoding],
['{0}'.format(view_encoding), view_encoding]
])
return
init_encoding_vars(view, encoding)
def show_encoding_status(view):
encoding = view.settings().get('force_encoding')
if not encoding:
encoding = view.settings().get('origin_encoding')
if not encoding:
return
view.set_status('origin_encoding', encoding)
def init_encoding_vars(view, encoding, run_convert=True, stamp=None, detect_on_fail=False):
if not encoding:
return
view.settings().set('origin_encoding', encoding)
show_encoding_status(view)
if encoding in SKIP_ENCODINGS or encoding == view.encoding():
encoding_cache.set(view.file_name(), encoding)
return
view.settings().set('in_converting', True)
if run_convert:
if stamp == None:
stamp = '{0}'.format(time.time())
translate_tabs_to_spaces = view.settings().get('translate_tabs_to_spaces')
view.settings().set('translate_tabs_to_spaces', False)
view.run_command('convert_to_utf8', {'detect_on_fail': detect_on_fail, 'stamp': stamp})
view.settings().set('translate_tabs_to_spaces', translate_tabs_to_spaces)
def clean_encoding_vars(view):
view.settings().erase('in_converting')
view.settings().erase('origin_encoding')
view.erase_status('origin_encoding')
view.set_scratch(False)
encoding_cache.pop(view.file_name())
def remove_reverting(file_name):
while file_name in REVERTING_FILES:
REVERTING_FILES.remove(file_name)
class EncodingSelection(threading.Thread):
def __init__(self, view, names, codes):
threading.Thread.__init__(self)
self.view = view
self.names = names
self.codes = codes
def run(self):
sublime.set_timeout(self.show_panel, 0)
def show_panel(self):
window = self.view.window()
if window:
window.show_quick_panel(self.names, self.on_done)
def on_done(self, selected):
if selected == -1:
clean_encoding_vars(self.view)
else:
init_encoding_vars(self.view, self.codes[selected])
def show_selection(view, encoding_list = None):
if encoding_list:
names = [pair[0] for pair in encoding_list]
codes = [pair[1] for pair in encoding_list]
else:
names = ENCODINGS_NAME
codes = ENCODINGS_CODE
EncodingSelection(view, names, codes).start()
class ReloadWithEncoding(threading.Thread):
def __init__(self, view, encoding):
threading.Thread.__init__(self)
self.view = view
self.encoding = encoding
def run(self):
sublime.set_timeout(self.reload, 0)
def reload(self):
init_encoding_vars(self.view, self.encoding)
def reload_encoding(view, encoding):
ReloadWithEncoding(view, encoding).start()
stamps = {}
class ShowEncodingSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
show_selection(self.view)
class ReloadWithEncodingCommand(sublime_plugin.TextCommand):
def run(self, edit, encoding):
reload_encoding(self.view, encoding)
class PyInstructionCommand(sublime_plugin.TextCommand):
def get_branch(self, platform, arch):
return [{
'linux-x64': 'master',
'linux-x32': 'x32',
}, {
'linux-x64': 'linux-x64',
'linux-x32': 'linux-x32',
'osx-x64': 'osx',
}][ST3].get(platform + '-' + arch)
def run(self, edit, encoding, file_name):
self.view.set_name('ConvertToUTF8 Instructions')
self.view.set_scratch(True)
self.view.settings().set("word_wrap", True)
msg = 'Oops! The file {0} is detected as {1} which is not supported by your Sublime Text.\n\nPlease check whether it is in the list of Python\'s Standard Encodings (http://docs.python.org/library/codecs.html#standard-encodings) or not.\n\nIf yes, '.format(file_name, encoding)
branch = self.get_branch(sublime.platform(), sublime.arch())
if branch:
ver = '33' if ST3 else '26'
msg = msg + 'please install Codecs{0} (https://github.com/seanliang/Codecs{0}/tree/{1}) and restart Sublime Text to make ConvertToUTF8 work properly. If it is still not working, '.format(ver, branch)
import platform
msg = msg + 'please kindly send the following information to sunlxy (at) yahoo.com:\n====== Debug Information ======\nVersion: {0}-{1}\nPlatform: {2}\nPath: {3}\nEncoding: {4}\n'.format(
sublime.version(), sublime.arch(), platform.platform(), sys.path, encoding
)
self.view.insert(edit, 0, msg)
self.view.set_read_only(True)
self.view.window().focus_view(self.view)
class ConvertToUtf8Command(sublime_plugin.TextCommand):
def run(self, edit, encoding=None, stamp=None, detect_on_fail=False):
view = self.view
if encoding:
view.settings().set('force_encoding', encoding)
origin_encoding = view.settings().get('origin_encoding')
# convert only when ST can't load file properly
run_convert = (view.encoding() == view.settings().get('fallback_encoding'))
if origin_encoding:
if origin_encoding == encoding:
return
view.set_scratch(False)
run_convert = False
init_encoding_vars(view, encoding, run_convert, stamp)
return
else:
encoding = view.settings().get('origin_encoding')
if not encoding:
return
file_name = view.file_name()
if not (file_name and os.path.exists(file_name)):
return
# try fast decode
fp = None
try:
fp = codecs.open(file_name, 'rb', encoding, errors='strict')
contents = fp.read()
except LookupError as e:
clean_encoding_vars(view)
view.window().new_file().run_command('py_instruction', {'encoding': encoding, 'file_name': file_name})
return
except UnicodeDecodeError as e:
if detect_on_fail:
detect(view, file_name, get_setting(view, 'max_detect_lines'))
return
superset = SUPERSETS.get(encoding)
if superset:
print('Try encoding {0} instead of {1}.'.format(superset, encoding))
init_encoding_vars(view, superset, True, stamp)
return
if CONFIRM_IS_AVAILABLE:
if sublime.ok_cancel_dialog(u'Errors occurred while converting {0} with {1} encoding.\n\n'
'WARNING: Continue to load this file using {1}, malformed data will be ignored.'
'\n\nPress "Cancel" to choose another encoding manually.'.format
(os.path.basename(file_name), encoding)):
fp.close()
fp = codecs.open(file_name, 'rb', encoding, errors='ignore')
contents = fp.read()
else:
show_selection(view)
return
else:
view.set_status('origin_encoding', u'Errors occurred while converting {0} with {1} encoding'.format
(os.path.basename(file_name), encoding))
show_selection(view)
return
finally:
if fp:
fp.close()
encoding_cache.set(file_name, encoding)
contents = contents.replace('\r\n', '\n').replace('\r', '\n')
regions = sublime.Region(0, view.size())
sel = view.sel()
rs = [x for x in sel]
vp = view.viewport_position()
view.set_viewport_position(tuple([0, 0]))
view.replace(edit, regions, contents)
sel.clear()
for x in rs:
sel.add(sublime.Region(x.a, x.b))
view.set_viewport_position(vp)
stamps[file_name] = stamp
sublime.status_message('{0} -> UTF8'.format(encoding))
def description(self):
encoding = self.view.settings().get('origin_encoding')
if not encoding:
return
return '{0} -> UTF8'.format(encoding)
def is_enabled(self):
return self.view.encoding() != 'Hexadecimal'
class ConvertFromUtf8Command(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
encoding = view.settings().get('force_encoding')
if not encoding:
encoding = view.settings().get('origin_encoding')
file_name = view.file_name()
if not encoding or encoding == 'UTF-8':
encoding_cache.pop(file_name)
return
fp = None
try:
fp = open(file_name, 'rb')
contents = codecs.EncodedFile(fp, encoding, 'UTF-8').read()
except (LookupError, UnicodeEncodeError) as e:
sublime.error_message(u'Can not convert file encoding of {0} to {1}, it was saved as UTF-8 instead:\n\n{2}'.format
(os.path.basename(file_name), encoding, e))
return
finally:
if fp:
fp.close()
# write content to temporary file
tmp_name = os.path.join(TMP_DIR, get_temp_name(file_name))
fp = open(tmp_name, 'wb')
fp.write(contents)
fp.close()
if not get_setting(view, 'lazy_reload'):
# os.rename has "Invalid cross-device link" issue
shutil.move(tmp_name, file_name)
else:
# copy the timestamp from original file
mtime = os.path.getmtime(file_name)
os.utime(tmp_name, (mtime, mtime))
encoding_cache.set(file_name, encoding)
view.settings().set('prevent_detect', True)
sublime.status_message('UTF8 -> {0}'.format(encoding))
def description(self):
encoding = self.view.settings().get('origin_encoding')
if not encoding:
return
return 'UTF8 -> {0}'.format(encoding)
class ConvertToUTF8Listener(sublime_plugin.EventListener):
def check_clones(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
if clone_numbers:
check_times = view.settings().get('check_times', clone_numbers)
if check_times:
view.settings().set('check_times', check_times - 1)
return True
view.settings().erase('check_times')
return False
def on_new(self, view):
if get_setting(view, 'default_encoding_on_create'):
init_encoding_vars(view, get_setting(view, 'default_encoding_on_create'), False)
def on_clone(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
view.settings().set('clone_numbers', clone_numbers + 1)
encoding = view.settings().get('origin_encoding')
if encoding:
view.set_status('origin_encoding', encoding)
def on_close(self, view):
clone_numbers = view.settings().get('clone_numbers', 0)
if clone_numbers:
view.settings().set('clone_numbers', clone_numbers - 1)
else:
remove_reverting(view.file_name())
def on_load(self, view):
encoding = view.encoding()
if encoding == 'Hexadecimal' or encoding.endswith(' BOM'):
return
file_name = view.file_name()
if not file_name:
return
if self.check_clones(view):
return
encoding = view.settings().get('origin_encoding')
if encoding and not view.get_status('origin_encoding'):
view.set_status('origin_encoding', encoding)
# file is reloading
if view.settings().get('prevent_detect'):
if view.is_dirty():
# changes have not been saved
sublime.set_timeout(lambda: self.on_deactivated(view), 0)
return
else:
# treat as a new file
sublime.set_timeout(lambda: self.clean_reload(view, file_name), 250)
return
else:
return
if get_setting(view, 'convert_on_load') == 'never':
return
self.perform_action(view, file_name, 5)
def on_activated(self, view):
if view.settings().get('is_preview'):
self.perform_action(view, view.file_name(), 3)
def is_preview(self, view):
window = view.window()
if not window:
return True
view_index = window.get_view_index(view)
return view_index[1] == -1
def clean_reload(self, view, file_name):
window = view.window()
if not window:
sublime.set_timeout(lambda: self.clean_reload(view, file_name), 100)
return
for v in window.views():
if v.file_name() == file_name:
v.settings().erase('prevent_detect')
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def perform_action(self, view, file_name, times):
if get_setting(view, 'preview_action') != 'convert_and_open' and self.is_preview(view):
if times > 0:
# give it another chance before everything is ready
sublime.set_timeout(lambda: self.perform_action(view, file_name, times - 1), 100)
return
view.settings().set('is_preview', True)
return
view.settings().erase('is_preview')
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
def on_modified(self, view):
encoding = view.encoding()
if encoding == 'Hexadecimal':
return
file_name = view.file_name()
if not file_name or view.is_loading():
return
if not view.settings().get('in_converting'):
if view.settings().get('is_preview'):
view.settings().erase('is_preview')
detect(view, file_name, get_setting(view, 'max_detect_lines'))
return
if self.check_clones(view):
return
command = view.command_history(0)
command1 = view.command_history(1)
if command == NONE_COMMAND:
if command1[0] == 'convert_to_utf8':
view.run_command('redo')
elif command[0] == 'convert_to_utf8':
if file_name in stamps:
if stamps[file_name] == command[1].get('stamp'):
view.set_scratch(True)
elif command[0] == 'revert':
if command1 == NONE_COMMAND:
# on_modified will be invoked twice for each revert
if file_name not in REVERTING_FILES:
REVERTING_FILES.insert(0, file_name)
return
remove_reverting(file_name)
if view.settings().get('prevent_detect'):
sublime.set_timeout(lambda: self.undo_me(view), 0)
else:
# file was modified outside
cnt = get_setting(view, 'max_detect_lines')
threading.Thread(target=lambda: detect(view, file_name, cnt)).start()
else:
view.set_scratch(False)
def undo_me(self, view):
view.settings().erase('prevent_detect')
view.run_command('undo')
# st3 will reload file immediately
if view.settings().get('revert_to_scratch') or (ST3 and not get_setting(view, 'lazy_reload')):
view.set_scratch(True)
def on_deactivated(self, view):
# st2 will reload file when on_deactivated
if view.settings().get('prevent_detect'):
file_name = view.file_name()
if get_setting(view, 'lazy_reload'):
tmp_name = os.path.join(TMP_DIR, get_temp_name(file_name))
shutil.move(tmp_name, file_name)
remove_reverting(file_name)
view.settings().set('revert_to_scratch', not view.is_dirty())
# make ST stop asking about reloading
view.run_command('revert')
def on_pre_save(self, view):
if view.encoding() == 'Hexadecimal':
return
force_encoding = view.settings().get('force_encoding')
if force_encoding == 'UTF-8':
view.set_encoding(force_encoding)
return
if not view.settings().get('in_converting'):
return
if self.check_clones(view):
return
view.set_encoding('UTF-8')
def on_post_save(self, view):
view_encoding = view.encoding()
if view_encoding == 'Hexadecimal':
return
if not view.settings().get('in_converting'):
return
if self.check_clones(view):
return
file_name = view.file_name()
if file_name in stamps:
del stamps[file_name]
if get_setting(view, 'convert_on_save') == 'never':
return
# file was saved with other encoding
if view_encoding != 'UTF-8':
clean_encoding_vars(view)
return
view.run_command('convert_from_utf8')
|
|
#!/usr/bin/python
import os
import re
import sys
import time
import errno
import types
import atexit
import signal
import socket
import classad
import datetime
import unittest
master_pid = 0
def kill_master():
if master_pid: os.kill(master_pid, signal.SIGTERM)
atexit.register(kill_master)
def makedirs_ignore_exist(directory):
try:
os.makedirs(directory)
except:
exctype, oe = sys.exc_info()[:2]
if not issubclass(exctype, OSError): raise
if oe.errno != errno.EEXIST:
raise
def remove_ignore_missing(file):
try:
os.unlink(file)
except:
exctype, oe = sys.exc_info()[:2]
if not issubclass(exctype, OSError): raise
if oe.errno != errno.ENOENT:
raise
# Bootstrap condor
testdir = os.path.join(os.getcwd(), "tests_tmp")
logdir = os.path.join(testdir, "log")
makedirs_ignore_exist(testdir)
makedirs_ignore_exist(logdir)
config_file = os.path.join(testdir, "condor_config")
open(config_file, "w").close()
os.environ["CONDOR_CONFIG"] = config_file
os.environ["_condor_TOOL_LOG"] = os.path.join(logdir, "ToolLog")
os.environ["_condor_TOOL_DEBUG"] = "D_FULLDEBUG, D_NETWORK"
import htcondor
htcondor.enable_log()
class WithDaemons(unittest.TestCase):
def setUp(self):
self.pid = -1
to_delete = [i for i in os.environ if i.lower().startswith("_condor_")]
for key in to_delete: del os.environ[key]
os.environ["_condor_MASTER"] = os.path.join(os.getcwd(), "../condor_master.V6/condor_master")
os.environ["_condor_COLLECTOR"] = os.path.join(os.getcwd(), "../condor_collector.V6/condor_collector")
os.environ["_condor_SCHEDD"] = os.path.join(os.getcwd(), "../condor_schedd.V6/condor_schedd")
os.environ["_condor_PROCD"] = os.path.join(os.getcwd(), "../condor_procd/condor_procd")
os.environ["_condor_STARTD"] = os.path.join(os.getcwd(), "../condor_startd.V6/condor_startd")
os.environ["_condor_STARTER"] = os.path.join(os.getcwd(), "../condor_starter.V6.1/condor_starter")
os.environ["_condor_NEGOTIATOR"] = os.path.join(os.getcwd(), "../condor_negotiator.V6/condor_negotiator")
os.environ["_condor_SHADOW"] = os.path.join(os.getcwd(), "../condor_shadow.V6.1/condor_shadow")
os.environ["_condor_CONDOR_HOST"] = socket.getfqdn()
os.environ["_condor_LOCAL_DIR"] = testdir
os.environ["_condor_LOG"] = '$(LOCAL_DIR)/log'
os.environ["_condor_LOCK"] = '$(LOCAL_DIR)/lock'
os.environ["_condor_RUN"] = '$(LOCAL_DIR)/run'
os.environ["_condor_COLLECTOR_NAME"] = "python_classad_tests"
os.environ["_condor_SCHEDD_NAME"] = "python_classad_tests"
os.environ["_condor_MASTER_ADDRESS_FILE"] = "$(LOG)/.master_address"
os.environ["_condor_COLLECTOR_ADDRESS_FILE"] = "$(LOG)/.collector_address"
os.environ["_condor_SCHEDD_ADDRESS_FILE"] = "$(LOG)/.schedd_address"
os.environ["_condor_STARTD_ADDRESS_FILE"] = "$(LOG)/.startd_address"
os.environ["_condor_STARTD_DEBUG"] = "D_FULLDEBUG"
os.environ["_condor_STARTER_DEBUG"] = "D_FULLDEBUG"
os.environ["_condor_SHADOW_DEBUG"] = "D_FULLDEBUG|D_MACHINE"
os.environ["_condor_NEGOTIATOR_ADDRESS_FILE"] = "$(LOG)/.negotiator_address"
os.environ["_condor_NEGOTIATOR_CYCLE_DELAY"] = "1"
os.environ["_condor_NEGOTIATOR_INTERVAL"] = "1"
os.environ["_condor_SCHEDD_INTERVAL"] = "1"
os.environ["_condor_SCHEDD_MIN_INTERVAL"] = "1"
# Various required attributes for the startd
os.environ["_condor_START"] = "TRUE"
os.environ["_condor_SUSPEND"] = "FALSE"
os.environ["_condor_CONTINUE"] = "TRUE"
os.environ["_condor_PREEMPT"] = "FALSE"
os.environ["_condor_KILL"] = "FALSE"
os.environ["_condor_WANT_SUSPEND"] = "FALSE"
os.environ["_condor_WANT_VACATE"] = "FALSE"
os.environ["_condor_MachineMaxVacateTime"] = "5"
os.environ["_condor_JOB_INHERITS_STARTER_ENVIRONMENT"] = "TRUE"
htcondor.reload_config()
htcondor.SecMan().invalidateAllSessions()
def launch_daemons(self, daemons=["MASTER", "COLLECTOR"], config={}):
makedirs_ignore_exist(htcondor.param["LOG"])
makedirs_ignore_exist(htcondor.param["LOCK"])
makedirs_ignore_exist(htcondor.param["EXECUTE"])
makedirs_ignore_exist(htcondor.param["SPOOL"])
makedirs_ignore_exist(htcondor.param["RUN"])
remove_ignore_missing(htcondor.param["MASTER_ADDRESS_FILE"])
remove_ignore_missing(htcondor.param["COLLECTOR_ADDRESS_FILE"])
remove_ignore_missing(htcondor.param["SCHEDD_ADDRESS_FILE"])
for key, val in config.items():
os.environ["_condor_%s" % key] = val
if "COLLECTOR" in daemons:
os.environ["_condor_PORT"] = "9622"
os.environ["_condor_COLLECTOR_ARGS"] = "-port $(PORT)"
os.environ["_condor_COLLECTOR_HOST"] = "$(CONDOR_HOST):$(PORT)"
if 'MASTER' not in daemons:
daemons.append('MASTER')
os.environ["_condor_DAEMON_LIST"] = ", ".join(daemons)
htcondor.reload_config()
self.pid = os.fork()
if not self.pid:
try:
try:
os.execvp("condor_master", ["condor_master", "-f"])
except:
e = sys.exc_info()[1]
print(str(e))
finally:
os._exit(1)
global master_pid
master_pid = self.pid
for daemon in daemons:
self.waitLocalDaemon(daemon)
def tearDown(self):
if self.pid > 1:
global master_pid
master_pid = 0
os.kill(self.pid, signal.SIGQUIT)
pid, exit_status = os.waitpid(self.pid, 0)
self.assertTrue(os.WIFEXITED(exit_status))
code = os.WEXITSTATUS(exit_status)
self.assertEquals(code, 0)
def waitLocalDaemon(self, daemon, timeout=5):
address_file = htcondor.param[daemon + "_ADDRESS_FILE"]
for i in range(timeout):
if os.path.exists(address_file):
return
time.sleep(1)
if not os.path.exists(address_file):
raise RuntimeError("Waiting for daemon %s timed out." % daemon)
def waitRemoteDaemon(self, dtype, dname, pool=None, timeout=5):
if pool:
coll = htcondor.Collector(pool)
else:
coll = htcondor.Collector()
for i in range(timeout):
try:
return coll.locate(dtype, dname)
except Exception:
pass
time.sleep(1)
return coll.locate(dtype, dname)
class TestPythonBindings(WithDaemons):
def testDaemon(self):
self.launch_daemons(["COLLECTOR"])
def testLocate(self):
self.launch_daemons(["COLLECTOR"])
coll = htcondor.Collector()
coll_ad = coll.locate(htcondor.DaemonTypes.Collector)
self.assertTrue("MyAddress" in coll_ad)
self.assertEquals(coll_ad["Name"].split(":")[-1], os.environ["_condor_PORT"])
def testLocateList(self):
self.launch_daemons(["COLLECTOR"])
coll = htcondor.Collector()
coll_ad = coll.locate(htcondor.DaemonTypes.Collector)
self.assertTrue("MyAddress" in coll_ad)
self.assertEquals(coll_ad["Name"].split(":")[-1], os.environ["_condor_PORT"])
# Make sure we can pass a list of addresses
coll = htcondor.Collector(["collector.example.com", coll_ad['Name']])
coll_ad = coll.locate(htcondor.DaemonTypes.Collector)
def testRemoteLocate(self):
self.launch_daemons(["COLLECTOR"])
coll = htcondor.Collector()
coll_ad = coll.locate(htcondor.DaemonTypes.Collector)
remote_ad = self.waitRemoteDaemon(htcondor.DaemonTypes.Collector, "%s@%s" % (htcondor.param["COLLECTOR_NAME"], htcondor.param["CONDOR_HOST"]))
remote_address = remote_ad["MyAddress"].split(">")[0].split("?")[0].lower()
coll_address = coll_ad["MyAddress"].split(">")[0].split("?")[0].lower()
self.assertEquals(remote_address, coll_address)
def testScheddLocate(self):
self.launch_daemons(["SCHEDD", "COLLECTOR"])
coll = htcondor.Collector()
name = "%s@%s" % (htcondor.param["SCHEDD_NAME"], htcondor.param["CONDOR_HOST"])
schedd_ad = self.waitRemoteDaemon(htcondor.DaemonTypes.Schedd, name, timeout=10)
self.assertEquals(schedd_ad.eval("Name").lower(), name.lower())
def testCollectorAdvertise(self):
self.launch_daemons(["COLLECTOR"])
coll = htcondor.Collector()
now = time.time()
ad = classad.ClassAd('[MyType="GenericAd"; Name="Foo"; Foo=1; Bar=%f; Baz="foo"]' % now)
coll.advertise([ad])
for i in range(5):
ads = coll.query(htcondor.AdTypes.Any, 'Name =?= "Foo"', ["Bar"])
if ads: break
time.sleep(1)
self.assertEquals(len(ads), 1)
self.assertTrue(isinstance(ads[0]["Bar"], types.FloatType))
self.assertEquals(ads[0]["Bar"], now)
self.assertTrue("Foo" not in ads[0])
def testScheddSubmit(self):
self.launch_daemons(["SCHEDD", "COLLECTOR", "STARTD", "NEGOTIATOR"])
output_file = os.path.join(testdir, "test.out")
if os.path.exists(output_file):
os.unlink(output_file)
schedd = htcondor.Schedd()
ad = classad.parse(open("tests/submit.ad"))
ads = []
cluster = schedd.submit(ad, 1, False, ads)
#print ads[0]
for i in range(60):
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus"])
#print ads
if len(ads) == 0:
break
if i % 2 == 0:
schedd.reschedule()
time.sleep(1)
self.assertEquals(open(output_file).read(), "hello world\n");
def testScheddSubmitMany(self):
self.launch_daemons(["SCHEDD", "COLLECTOR", "STARTD", "NEGOTIATOR"])
output_file = os.path.join(testdir, "test.out")
if os.path.exists(output_file):
os.unlink(output_file)
schedd = htcondor.Schedd()
ad = classad.parse(open("tests/submit.ad"))
ads = []
cluster = schedd.submit(ad, 10, False, ads)
#print ads[0]
for i in range(60):
ads = schedd.xquery("ClusterId == %d" % cluster, ["JobStatus"])
ads = list(ads)
#print ads
if len(ads) == 0:
break
if i % 2 == 0:
schedd.reschedule()
time.sleep(1)
self.assertEquals(open(output_file).read(), "hello world\n");
def testScheddNonblockingQuery(self):
self.launch_daemons(["SCHEDD", "COLLECTOR", "STARTD", "NEGOTIATOR"])
output_file = os.path.join(testdir, "test.out")
if os.path.exists(output_file):
os.unlink(output_file)
schedd = htcondor.Schedd()
ad = classad.parse(open("tests/submit.ad"))
ads = []
cluster = schedd.submit(ad, 10, False, ads)
for i in range(60):
ads = schedd.xquery("ClusterId == %d" % cluster, ["JobStatus"])
ads2 = schedd.xquery("ClusterId == %d" % cluster, ["JobStatus"])
ctrs = [0, 0]
iters = [(ads, 0), (ads2, 1)]
while iters:
for it, pos in iters:
try:
it.next()
ctrs[pos] += 1
except StopIteration:
iters.remove((it, pos))
print ctrs
if ctrs[0] == 0:
break
if i % 2 == 0:
schedd.reschedule()
time.sleep(1)
self.assertEquals(open(output_file).read(), "hello world\n");
def testScheddNonblockingQueryRemove(self):
os.environ["_condor_SCHEDD_DEBUG"] = "D_FULLDEBUG|D_NETWORK"
self.launch_daemons(["SCHEDD"])
schedd = htcondor.Schedd()
submit_ad = classad.parse(open("tests/submit.ad"))
ads = []
cluster = schedd.submit(submit_ad, 300, False, ads)
ads = schedd.xquery("ClusterId == %d" % cluster)
print str(datetime.datetime.now())
print str(datetime.datetime.now())
schedd.act(htcondor.JobAction.Remove, "ClusterId == %d" % cluster)
time.sleep(3)
print str(datetime.datetime.now())
print len(list(ads))
print str(datetime.datetime.now())
def testScheddNonblockingQueryCount(self):
os.environ["_condor_SCHEDD_DEBUG"] = "D_FULLDEBUG|D_NETWORK"
self.launch_daemons(["SCHEDD"])
schedd = htcondor.Schedd()
submit_ad = classad.parse(open("tests/submit_large.ad"))
schedd.act(htcondor.JobAction.Remove, "true")
ads = []
time.sleep(1)
while ads:
time.sleep(.2)
ads = schedd.query("true")
#print ads
for i in range(1, 60):
print "Testing querying %d jobs in queue." % i
schedd.submit(submit_ad, i, True, ads)
ads = schedd.query("true", ["ClusterID", "ProcID"])
ads2 = list(schedd.xquery("true", ["ClusterID", "ProcID", "a1", "a2", "a3", "a4"]))
#print ads
#print ads2
self.assertNotEqual(ads2[0].lookup("ProcID"), classad.Value.Undefined)
for ad in ads:
found_ad = False
for ad2 in ads2:
if ad2["ProcID"] == ad["ProcID"] and ad2["ClusterID"] == ad["ClusterID"]:
found_ad = True
break
self.assertTrue(found_ad, msg="Ad %s missing from xquery results: %s" % (ad, ads2))
self.assertEquals(len(ads), i, msg="Old query protocol gives incorrect number of results (expected %d, got %d)" % (i, len(ads)))
self.assertEquals(len(ads2), i, msg="New query protocol gives incorrect number of results (expected %d, got %d)" % (i, len(ads2)))
schedd.act(htcondor.JobAction.Remove, "true")
while ads:
time.sleep(.2)
ads = schedd.query("true")
def testScheddSubmitSpool(self):
self.launch_daemons(["SCHEDD", "COLLECTOR", "STARTD", "NEGOTIATOR"])
output_file = os.path.join(testdir, "test.out")
if os.path.exists(output_file):
os.unlink(output_file)
schedd = htcondor.Schedd()
ad = classad.parse(open("tests/submit.ad"))
result_ads = []
cluster = schedd.submit(ad, 1, True, result_ads)
#print result_ads[0]
schedd.spool(result_ads)
for i in range(60):
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus"])
#print ads
self.assertEquals(len(ads), 1)
if ads[0]["JobStatus"] == 4:
break
if i % 5 == 0:
schedd.reschedule()
time.sleep(1)
schedd.retrieve("ClusterId == %d" % cluster)
#print "Final status:", schedd.query("ClusterId == %d" % cluster)[0];
schedd.act(htcondor.JobAction.Remove, ["%d.0" % cluster])
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus"])
self.assertEquals(len(ads), 0)
self.assertEquals(open(output_file).read(), "hello world\n");
def testPing(self):
self.launch_daemons(["COLLECTOR"])
coll = htcondor.Collector()
coll_ad = coll.locate(htcondor.DaemonTypes.Collector)
self.assertTrue("MyAddress" in coll_ad)
secman = htcondor.SecMan()
authz_ad = secman.ping(coll_ad, "WRITE")
self.assertTrue("AuthCommand" in authz_ad)
self.assertEquals(authz_ad['AuthCommand'], 60021)
self.assertTrue("AuthorizationSucceeded" in authz_ad)
self.assertTrue(authz_ad['AuthorizationSucceeded'])
authz_ad = secman.ping(coll_ad["MyAddress"], "WRITE")
self.assertTrue("AuthCommand" in authz_ad)
self.assertEquals(authz_ad['AuthCommand'], 60021)
self.assertTrue("AuthorizationSucceeded" in authz_ad)
self.assertTrue(authz_ad['AuthorizationSucceeded'])
authz_ad = secman.ping(coll_ad["MyAddress"])
self.assertTrue("AuthCommand" in authz_ad)
self.assertEquals(authz_ad['AuthCommand'], 60011)
self.assertTrue("AuthorizationSucceeded" in authz_ad)
self.assertTrue(authz_ad['AuthorizationSucceeded'])
def testEventLog(self):
events = list(htcondor.read_events(open("tests/test_log.txt")))
self.assertEquals(len(events), 4)
a = dict(events[0])
if 'CurrentTime' in a:
del a['CurrentTime']
b = {"LogNotes": "DAG Node: Job1",
"MyType": "SubmitEvent",
"EventTypeNumber": 0,
"Subproc": 0,
"Cluster": 236467,
"Proc": 0,
"EventTime": "%d-11-15T17:05:55" % datetime.datetime.now().year,
"SubmitHost": "<169.228.38.38:9615?sock=18627_6227_3>",
}
self.assertEquals(set(a.keys()), set(b.keys()))
for key, val in a.items():
self.assertEquals(val, b[key])
def testTransaction(self):
self.launch_daemons(["SCHEDD", "COLLECTOR", "STARTD", "NEGOTIATOR"])
output_file = os.path.join(testdir, "test.out")
log_file = os.path.join(testdir, "test.log")
if os.path.exists(output_file):
os.unlink(output_file)
if os.path.exists(log_file):
os.unlink(log_file)
schedd = htcondor.Schedd()
ad = classad.parse(open("tests/submit_sleep.ad"))
result_ads = []
cluster = schedd.submit(ad, 1, True, result_ads)
with schedd.transaction() as txn:
schedd.edit(["%d.0" % cluster], 'foo', classad.Literal(1))
schedd.edit(["%d.0" % cluster], 'bar', classad.Literal(2))
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus", 'foo', 'bar'])
self.assertEquals(len(ads), 1)
self.assertEquals(ads[0]['foo'], 1)
self.assertEquals(ads[0]['bar'], 2)
with schedd.transaction() as txn:
schedd.edit(["%d.0" % cluster], 'baz', classad.Literal(3))
with schedd.transaction(htcondor.TransactionFlags.NonDurable | htcondor.TransactionFlags.ShouldLog, True) as txn:
schedd.edit(["%d.0" % cluster], 'foo', classad.Literal(4))
schedd.edit(["%d.0" % cluster], 'bar', classad.Literal(5))
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus", 'foo', 'bar', 'baz'])
self.assertEquals(len(ads), 1)
self.assertEquals(ads[0]['foo'], 4)
self.assertEquals(ads[0]['bar'], 5)
self.assertEquals(ads[0]['baz'], 3)
try:
with schedd.transaction() as txn:
schedd.edit(["%d.0" % cluster], 'foo', classad.Literal(6))
schedd.edit(["%d.0" % cluster], 'bar', classad.Literal(7))
raise Exception("force abort")
except:
exctype, e = sys.exc_info()[:2]
if not issubclass(exctype, Exception):
raise
self.assertEquals(str(e), "force abort")
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus", 'foo', 'bar'])
self.assertEquals(len(ads), 1)
self.assertEquals(ads[0]['foo'], 4)
self.assertEquals(ads[0]['bar'], 5)
try:
with schedd.transaction() as txn:
schedd.edit(["%d.0" % cluster], 'baz', classad.Literal(8))
with schedd.transaction(htcondor.TransactionFlags.NonDurable | htcondor.TransactionFlags.ShouldLog, True) as txn:
schedd.edit(["%d.0" % cluster], 'foo', classad.Literal(9))
schedd.edit(["%d.0" % cluster], 'bar', classad.Literal(10))
raise Exception("force abort")
except:
exctype, e = sys.exc_info()[:2]
if not issubclass(exctype, Exception):
raise
self.assertEquals(str(e), "force abort")
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus", 'foo', 'bar', 'baz'])
self.assertEquals(len(ads), 1)
self.assertEquals(ads[0]['foo'], 4)
self.assertEquals(ads[0]['bar'], 5)
self.assertEquals(ads[0]['baz'], 3)
schedd.act(htcondor.JobAction.Remove, ["%d.0" % cluster])
ads = schedd.query("ClusterId == %d" % cluster, ["JobStatus"])
self.assertEquals(len(ads), 0)
if __name__ == '__main__':
unittest.main()
|
|
"""Implementation of the objects for the ArcGIS Server REST
Administration API"""
import cgi
import itertools
import os.path
import urllib
import urlparse
import urllib2
from arcrest import server, GenerateToken
__all__ = ['Admin', 'Folder', 'Services', 'Service',
'Machine', 'Machines', 'SiteMachines', 'ClusterMachines',
'Directory', 'Directories',
'Clusters', 'Cluster']
class Admin(server.RestURL):
"""Represents the top level URL resource of the ArcGIS Server
Administration API"""
def __init__(self, url, username=None, password=None,
token=None, generate_token=False,
expiration=60):
url_list = list(urlparse.urlsplit(url))
if not url_list[2].endswith('/'):
url_list[2] += "/"
url = urlparse.urlunsplit(url_list)
if username is not None and password is not None:
self._pwdmgr.add_password(None,
url,
username,
password)
if token:
self.__token__ = token
elif generate_token:
self.__generateToken(url, username, password, expiration)
super(Admin, self).__init__(url)
@property
def resources(self):
return self._json_struct['resources']
@property
def currentVersion(self):
return self._json_struct['currentVersion']
@property
def clusters(self):
return self._get_subfolder("./clusters/", Clusters)
@property
def services(self):
return self._get_subfolder("./services/", Services)
@property
def machines(self):
return self._get_subfolder("./machines/", SiteMachines)
@property
def data(self):
return self._get_subfolder("./data/", Data)
@property
def uploads(self):
return self._get_subfolder('./uploads/', Uploads)
@property
def system(self):
return self._get_subfolder("./system/", System)
def createNewSite(self, username, password, configStoreConnection=None,
directories=None, cluster=None):
res = self._get_subfolder("./createNewSite",
server.JsonPostResult,
{'username': username,
'password': password,
'configStoreConnection': configStoreConnection,
'directories': directories,
'cluster': cluster})
self.__generateToken(self.url, username, password, 60)
return res
def joinSite(self, adminURL, username, password):
res = self._get_subfolder("./joinSite",
server.JsonPostResult,
{'username': username,
'password': password,
'adminURL': adminURL})
return res
def deleteSite(self):
res = self._get_subfolder("./deleteSite",
server.JsonPostResult)
self.__token__ = None
return res
def __generateToken(self, url, username, password, expiration):
token_auth = GenerateToken(url,
username,
password,
expiration)
if token_auth._json_struct.get('status', 'ok').lower() == 'error':
raise urllib2.URLError('\n'.join(
token_auth._json_struct.get(
'messages', ['Failed.'])))
self.__token__ = token_auth.token
class Data(server.RestURL):
"""Administration URL's data store -- Geodatabases and file data"""
@property
def geodatabases(self):
return self._get_subfolder("./geodatabases/", GeoDatabases)
@property
def items(self):
return self._get_subfolder("./items/", DataItems)
class GeoDatabases(server.RestURL):
"""Server's geodatabases and GDB connections"""
pass
class HasUploads(object):
def upload(self, file, description=''):
if isinstance(file, basestring):
file = open(file, 'rb')
sub = self._get_subfolder('./upload/', server.JsonResult,
{'description': description},
{'itemFile': file})
return sub._json_struct['item']
class Uploads(server.RestURL, HasUploads):
"""Uploads URL"""
pass
class DataItems(server.RestURL, HasUploads):
"""Server's data files"""
@property
def packages(self):
return self._json_struct['packages']
class Folder(server.RestURL):
@property
def folderName(self):
return self._json_struct['folderName']
@property
def description(self):
return self._json_struct['description']
@property
def serviceNames(self):
return [service['serviceName']
for service in self._json_struct['services']]
@property
def services(self):
return [self._get_subfolder("./%s.%s/" %
(servicename['serviceName'],
servicename['type']),
Service)
for servicename in self._json_struct['services']]
def __getitem__(self, itemname):
if '/' in itemname:
itemname, rest = itemname.split('/', 1)
return self[itemname][rest]
for servicename in self._json_struct['services']:
fstrings = (servicename['serviceName'].lower(),
(servicename['serviceName'] +
"." +
servicename['type']).lower())
if itemname.lower() in fstrings:
return self._get_subfolder("./%s.%s/" %
(servicename['serviceName'],
servicename['type']),
Service)
raise KeyError(itemname)
def __iter__(self):
return iter(self.services)
class Services(Folder):
def createFolder(self, folderName, description):
raise NotImplementedError("Not implemented")
@property
def folders(self):
return [self._get_subfolder("./%s/" % foldername, Folder)
for foldername in self._json_struct['folders']
if foldername != "/"]
@property
def types(self):
return_type = self._get_subfolder("./types/", server.JsonPostResult)
return return_type._json_struct['types']
def __getitem__(self, itemname):
for foldername in self._json_struct['folders']:
if foldername.lower() == itemname.lower():
return self._get_subfolder("./%s/" % foldername, Folder)
return super(Services, self).__getitem__(itemname)
def __iter__(self):
for folder in self.folders:
for service in folder.services:
yield service
for service in super(Services, self).__iter__():
yield service
class Service(server.RestURL):
@property
def name(self):
return self._json_struct['serviceName'] + "." + self._json_struct['type']
@property
def status(self):
return self._get_subfolder("./status/",
server.JsonPostResult)._json_struct
@property
def statistics(self):
return self._get_subfolder("./statistics/",
server.JsonPostResult)._json_struct
def start(self):
return self._get_subfolder("./start/",
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder("./stop/",
server.JsonPostResult)._json_struct
def delete(self):
return self._get_subfolder("./delete/",
server.JsonPostResult)._json_struct
class Machine(server.RestURL):
"""Base class for a single machine on a site"""
@property
def name(self):
return self._json_struct['machineName']
@property
def admin_url(self):
return self._json_struct['adminURL']
@property
def platform(self):
return self._json_struct['platform']
def start(self):
return self._get_subfolder("./start/",
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder("./stop/",
server.JsonPostResult)._json_struct
def unregister(self):
return self._get_subfolder("./unregister/",
server.JsonPostResult)._json_struct
class Machines(server.RestURL):
"""Base class for a list of machines, both on a Cluster and a Site"""
__post__ = True
__machines__ = Ellipsis
@property
def _machines(self):
if self.__machines__ is Ellipsis:
path_and_attribs = [(d['machineName'], d)
for d in self._json_struct['machines']]
self.__machines__ = dict(path_and_attribs)
return self.__machines__
def keys(self):
return self._machines.keys()
def __iter__(self):
return (Admin(item['adminURL'])
for item in self._machines.itervalues())
def register(self, machine_name, admin_url=None):
return self._get_subfolder("./register/",
server.JsonPostResult,
{'machineName': machine_name,
'adminURL': admin_url})._json_struct
class ClusterMachines(Machines):
"""A list of machines participating on a cluster"""
def add(self, machine_names):
if isinstance(machine_names, basestring):
machine_names = [machine_names]
responses = [self._get_subfolder("./add/", server.JsonPostResult,
{"machineNames": m})
for m in machine_names]
return responses
def remove(self, machine_names):
if isinstance(machine_names, basestring):
machine_names = [machine_names]
responses = [self._get_subfolder("./remove/", server.JsonPostResult,
{"machineNames": m})
for m in machine_names]
return responses
class SiteMachines(Machines):
"""A list of machines on a site"""
def register(self, machineName, adminURL=None):
res = self._get_subfolder("./register/", server.JsonPostResult,
{'machineName': machineName,
'adminURL': adminURL})
@property
def machines(self):
return [self._get_subfolder("./%s/" % machinename, Machine) for
machinename in self._machines]
def __getitem__(self, itemname):
assert itemname in self._machines, "Couldn't find %s" % itemname
return self._get_subfolder('./%s/' % itemname, Machine)
class Directory(server.RestURL):
__post__ = True
class Directories(server.RestURL):
__directories__ = Ellipsis
@property
def _directories(self):
path_and_attribs = [(d['physicalPath'], d)
for d in self._json_struct['directories']]
self.__directories__ = dict(path_and_attribs)
return self.__directories__
def __contains__(self, k):
return self._directories.__contains__(k)
def __getitem__(self, k):
return self._directories.__getitem__(k)
def register(self, type, path, vpath=None):
response = self._get_subfolder('./register', server.JsonPostResult,
{'directoryType': type.upper(),
'physicalPath': path,
'virtualPath': vpath})._json_struct
def unregister(self, path):
response = self._get_subfolder('./unregister', server.JsonPostResult,
{'physicalPath': path})._json_struct
class Cluster(server.JsonResult):
__post__ = True
__lazy_fetch__ = False
__cache_request__ = True
def __eq__(self, other):
if not isinstance(other, Cluster):
return False
return self._url == other._url
@property
def machineNames(self, _error=None, _success=None):
if "machineNames" in self._json_struct:
return self._json_struct["machineNames"]
@property
def machines(self):
return self._get_subfolder("./machines/", ClusterMachines)
def start(self):
return self._get_subfolder('./start/',
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder('./stop/',
server.JsonPostResult)._json_struct
def delete(self):
return self._get_subfolder('./delete/',
server.JsonPostResult)._json_struct
def editProtocol(self, type="TCP", tcpClusterPort=-1,
multicastAddress=10, multicastPort=-1):
if type not in ("TCP", "UDP"):
raise ValueError("Got %r. Valid choices are: TCP, UDP" % type)
res = self._get_subfolder('./editProtocol', server.JsonPostResult,
{'type': type,
'tcpClusterPort': tcpClusterPort
if type == "TCP"
else None,
'multicastAddress': multicastAddress
if type == "UDP"
else None,
'multicastPort': multicastPort
if type == "UDP"
else None})
class Clusters(server.RestURL):
__post__ = True
__directories__ = Ellipsis
__cluster_cache__ = Ellipsis
@property
def _clusters(self):
if self.__cluster_cache__ is Ellipsis:
path_and_attribs = [(d['clusterName'],
self._get_subfolder('./%s/' %d['clusterName'],
Cluster))
for d in self._json_struct['clusters']]
self.__cluster_cache__ = dict(path_and_attribs)
return self.__cluster_cache__
@property
def clusterNames(self):
return [d['clusterName'] for d in self._json_struct['clusters']]
def __contains__(self, k):
if isinstance(k, int):
return k < len(self)
return self._clusters.__contains__(k)
def __getitem__(self, k):
if isinstance(k, int):
k = self.clusterNames[k]
return self._clusters.__getitem__(k)
def __len__(self):
return len(self.clusterNames)
def create(self, clusterName, type="TCP", tcpClusterPort=-1,
multicastAddress=10, multicastPort=-1):
if type not in ("TCP", "UDP"):
raise ValueError("Got %r. Valid choices are: TCP, UDP" % type)
res = self._get_subfolder('./create', server.JsonPostResult,
{'clusterName': clusterName,
'type': type,
'tcpClusterPort': tcpClusterPort
if type == "TCP"
else None,
'multicastAddress': multicastAddress
if type == "UDP"
else None,
'multicastPort': multicastPort
if type == "UDP"
else None})
return self._get_subfolder('./%s/' % clusterName, Cluster)
|
|
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import os
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
self.connector.disconnect_volume(connection_info['data'], None)
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISER', utils._get_root_helper(),
use_multipath=CONF.libvirt.iser_use_multipath,
device_scan_attempts=CONF.libvirt.num_iser_scan_tries,
transport=self._get_transport())
def _get_transport(self):
return 'iser'
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
|
|
'''
Author : Nitish Reddy Koripalli
Date : 14-11-2015
'''
import numpy as np
from ActivationFunction import ActivationFunction
from LayerStatistics import LayerStatistics
class MLP_BP:
DEBUG = False
WEIGHTS_RANDOM = 1
WEIGHTS_ZEROS = 2
WEIGHTS_ONES = 3
OUTPUT_CLASSIFICATION = 1
OUTPUT_REGRESSION = 2
TRAIN_CLASSIFICATION = 1
TRAIN_REGRESSION = 2
FORWARD_PASS_TRAIN = 1
FORWARD_PASS_TEST = 2
def __init__(self, network_design, initial_weights=WEIGHTS_RANDOM,
bias=False, learning_rate=1,
activation_alpha=0.1,
train_type=TRAIN_REGRESSION,
output_type=OUTPUT_REGRESSION):
self.network_design = network_design
self.initial_weights = initial_weights
self.layer_stats_list = []
self.bias = bias
self.learning_rate = learning_rate
self.activation_alpha = activation_alpha
self.train_type = train_type
self.output_type = output_type
# initialize network
self.__gen_network()
def __gen_network(self):
for i in range(len(self.network_design) - 1):
layer_stats = LayerStatistics()
self.layer_stats_list.append(layer_stats)
# input layer flag
if i == 0: layer_stats.is_input_layer = True
else: layer_stats.is_input_layer = False
# output layer flag
if i == len(self.network_design) - 2: layer_stats.is_output_layer = True
else: layer_stats.is_output_layer = False
# learning rate
layer_stats.LR = self.learning_rate
# input - nothing to be initialized
# weight matrix creation
input_dim = self.network_design[i]
layer_stats.input_dim = input_dim
output_dim = self.network_design[i+1]
layer_stats.output_dim = output_dim
# initialize weight matrix, taking bias into consideration
if self.initial_weights is self.WEIGHTS_RANDOM:
if self.bias: weight_matrix = np.random.rand(input_dim + 1, output_dim)
else: weight_matrix = np.random.rand(input_dim, output_dim)
elif self.initial_weights is self.WEIGHTS_ONES:
if self.bias: weight_matrix = np.ones((input_dim + 1, output_dim))
else: weight_matrix = np.ones((input_dim, output_dim))
else:
if self.bias: weight_matrix = np.zeros((input_dim + 1, output_dim))
else: weight_matrix = np.zeros((input_dim, output_dim))
layer_stats.W = weight_matrix
# local field - nothing to be initialized
# activation function objects
af_objs = np.empty((output_dim,1), dtype='object')
for i in range(output_dim):
af_obj = ActivationFunction(activation_alpha=self.activation_alpha)
af_objs[i,0] = af_obj
layer_stats.AF = af_objs
# outputs/activations - nothing to be initialized
# activation gradient - nothing to be initialized
def print_weights(self):
print "\n Network Weights"
for i in range(len(self.layer_stats_list)):
print "\nLayer :", i+1
print self.layer_stats_list[i].W
def __compute_activations(self, activation_functions, local_fields):
activations = np.empty(local_fields.shape)
for i in range(local_fields.shape[0]):
activations[i,0] = activation_functions[i,0].get_activation(local_fields[i,0])
return activations
def __compute_activation_gradients(self, activation_gradients, local_fields):
gradients = np.empty(local_fields.shape)
for i in range(local_fields.shape[0]):
gradients[i,0] = activation_gradients[i,0].get_gradient(local_fields[i,0])
return gradients
def __apply_output_type(self, layer_stats, fp_type):
# if layer is output layer
if layer_stats.is_output_layer:
# if training and if training is supposed to use classification
if fp_type is self.FORWARD_PASS_TRAIN and self.train_type is self.TRAIN_CLASSIFICATION:
return np.array(layer_stats.A > 0.5, dtype='float')
# if training and if training is supposed to use regression
elif fp_type is self.FORWARD_PASS_TRAIN and self.train_type is self.TRAIN_REGRESSION:
return layer_stats.A
# if testing and output is supposed to use classification
elif fp_type is self.FORWARD_PASS_TEST and self.output_type is self.OUTPUT_CLASSIFICATION:
return np.array(layer_stats.A > 0.5, dtype='float')
# if testing and output is suppsed to use regression
else:
return layer_stats.A
# if layer is not output just use continous activation values
else:
return layer_stats.A
def forward_pass(self, train_inputs, fp_type):
if self.DEBUG:
print "\nForward Pass:"
for i in range(len(self.layer_stats_list)):
if self.DEBUG:
print "\nLayer :", i + 1
layer_stats = self.layer_stats_list[i]
# set train input only to input layer
if layer_stats.is_input_layer:
if self.bias: layer_stats.X = np.vstack((np.ones((1,1)), train_inputs))
else: layer_stats.X = train_inputs
if self.DEBUG:
print "is input layer"
print "layer_stats.X.shape :", layer_stats.X.shape
# calculate local fields
layer_stats.V = np.dot(layer_stats.W.T, layer_stats.X)
# calculate activations
layer_stats.A = self.__compute_activations(layer_stats.AF, layer_stats.V)
# calculate output
layer_stats.Y = self.__apply_output_type(layer_stats, fp_type)
if self.DEBUG:
print "layer_stats.V.shape :", layer_stats.V.shape
print "layer_stats.Y.shape :", layer_stats.Y.shape
# set next layer inputs as current layer outputs
if not layer_stats.is_output_layer:
next_layer_stats = self.layer_stats_list[i+1]
if self.bias: next_layer_stats.X = np.vstack((np.ones((1,1)),layer_stats.Y.copy()))
else: next_layer_stats.X = layer_stats.Y.copy()
def backward_pass(self, target):
if self.DEBUG:
print "\nBackward Pass"
for i in reversed(range(len(self.layer_stats_list))):
layer_stats = self.layer_stats_list[i]
if self.DEBUG:
print "\nLayer :", i
if layer_stats.is_output_layer:
if self.DEBUG:
print "Output Layer"
# calculate the error
layer_stats.E = target - layer_stats.Y
# calculate activation error
layer_stats.AE = target - layer_stats.A
# calculate the activation gradient
layer_stats.G = self.__compute_activation_gradients(layer_stats.AF, layer_stats.V)
# calculate deltas
layer_stats.D = layer_stats.E * layer_stats.G
# update weights
layer_stats.W += layer_stats.LR * np.dot(layer_stats.X, layer_stats.D.T)
# compute Sigma DW (bias correction)
if self.bias: layer_stats.DW = np.dot(layer_stats.W[1:,:], layer_stats.D)
else: layer_stats.DW = np.dot(layer_stats.W, layer_stats.D)
if self.DEBUG:
print "layer_stats.E.shape :", layer_stats.E.shape
print "layer_stats.G.shape :", layer_stats.G.shape
print "layer_stats.D.shape :", layer_stats.D.shape
print "layer_stats.W.shape :", layer_stats.W.shape
print "layer_stats.DW.shape :", layer_stats.DW.shape
else:
# calculate activation gradients
layer_stats.G = self.__compute_activation_gradients(layer_stats.AF, layer_stats.V)
# calculate deltas
next_layer_stats = self.layer_stats_list[i+1]
layer_stats.D = layer_stats.G * next_layer_stats.DW
# update weights
layer_stats.W += layer_stats.LR * np.dot(layer_stats.X, layer_stats.D.T) # outer product
# calculate DW
if self.bias: layer_stats.DW = np.dot(layer_stats.W[1:,:], layer_stats.D)
else: layer_stats.DW = np.dot(layer_stats.W, layer_stats.D)
if self.DEBUG:
print "Non Output Layer"
print "layer_stats.G.shape :", layer_stats.G.shape
print "layer_stats.D.shape :", layer_stats.D.shape
print "layer_stats.W.shape :", layer_stats.W.shape
print "layer_stats.DW.shape :", layer_stats.DW.shape
def train(self, inputs, targets, DEBUG=True):
if DEBUG:
print "Pre-Train Weights:\n", self.layer_stats_list[-1].W
for i in range(inputs.shape[0]):
input_ = inputs[i,:][:,np.newaxis]
target_ = targets[i,:][:,np.newaxis]
self.forward_pass(input_, self.FORWARD_PASS_TRAIN)
self.backward_pass(target_)
if DEBUG:
print "\nTrain Iteration :", i+1
#print "Output :", self.layer_stats_list[-1].Y
#print "Weights:\n", self.layer_stats_list[-1].W
print "Error:", self.layer_stats_list[-1].E
#print "Activation Error:", self.layer_stats_list[-1].AE
def test(self, inputs, DEBUG=True):
if DEBUG:
print "Testing"
for i in range(inputs.shape[0]):
input_ = inputs[i,:][:,np.newaxis]
self.forward_pass(input_, self.FORWARD_PASS_TEST)
if DEBUG:
print "\nTest Iteration :", i+1
print "Test Input :", input_.T
print "Activation :", self.layer_stats_list[-1].A
print "Output :", self.layer_stats_list[-1].Y
|
|
#!/usr/bin/env python3
import curses
import shutil
import traceback
from argparse import ArgumentParser
from time import sleep
from threading import Thread, Semaphore
import requests
__version__ = '0.2.4'
tsize = shutil.get_terminal_size()
class JasperCoreController:
def __init__(self, args):
self.__HOST = args.connect
self.__PORT = 5237
self.__NEED_EXIT = False
self.__sem = Semaphore()
self.__poll_delay = 5 # seconds
self.__footbar_message_remaining = 10000
self.modules = dict()
self.focus = -1
self.__refresh_thread = None
curses.wrapper(self.main)
@property
def BASE_URL(self):
return 'http://{host}:{port}'.format(host=self.__HOST, port=self.__PORT)
def refresh_modules(self, stdscr, loop):
while True:
try:
response = requests.get(self.BASE_URL + '/getall')
response.raise_for_status()
self.__sem.acquire()
self.modules = response.json()
self.__sem.release()
if not loop:
return
sleep(self.__poll_delay)
except requests.exceptions.HTTPError:
stdscr.clear()
stdscr.addstr(0, 0, '{} - {}'.format(response.status_code, response.reason), curses.A_STANDOUT)
stdscr.refresh()
sleep(3)
exit(1)
except:
stdscr.clear()
stdscr.addstr(0, 0, str(traceback.format_exc()), curses.A_STANDOUT)
stdscr.refresh()
sleep(3)
exit(1)
def key_handler(self, key, stdscr):
if key == curses.KEY_UP:
self.focus -= 1
if self.focus <= -1:
self.focus = -1
elif key == curses.KEY_DOWN:
self.focus += 1
self.__sem.acquire()
if self.focus >= len(self.modules):
self.focus = len(self.modules) - 1
self.__sem.release()
elif key == ord('q'):
self.__NEED_EXIT = True
elif key == ord('s'):
# start/stop
self.toggle_module(stdscr)
def toggle_module(self, stdscr):
if self.focus > -1:
curses.flash()
try:
self.__sem.acquire()
response = requests.get(self.BASE_URL + '/status/{}'.format(sorted(self.modules)[self.focus]))
response.raise_for_status()
data = response.json()
if data['status'] == 'stopped':
response = requests.get(self.BASE_URL + '/start/{}'.format(sorted(self.modules)[self.focus]))
elif data['status'] == 'running':
response = requests.get(self.BASE_URL + '/stop/{}'.format(sorted(self.modules)[self.focus]))
else:
stdscr.addstr(tsize.lines - 1, 0, 'ERROR in data[\'status\']', curses.A_STANDOUT)
stdscr.refresh()
self.__footbar_message_remaining = 10000
return
response.raise_for_status()
data = response.json()
if data['status'] == 'running':
string = '{} is running with pid {}'.format(sorted(self.modules)[self.focus], data['pid'])
elif data['status'] == 'stopped':
string = '{} stopped'.format(sorted(self.modules)[self.focus])
elif data['status'] == 'error':
string = data['message']
stdscr.addstr(tsize.lines - 1, 0, string, curses.A_STANDOUT)
stdscr.refresh()
self.__footbar_message_remaining = 10000
except requests.exceptions.HTTPError:
stdscr.clear()
stdscr.addstr(0, 0, '{} - {}'.format(response.status_code, response.reason), curses.A_STANDOUT)
stdscr.refresh()
sleep(3)
exit(1)
except:
stdscr.clear()
stdscr.addstr(0, 0, str(traceback.format_exc()), curses.A_STANDOUT)
stdscr.refresh()
sleep(3)
exit(1)
self.__sem.release()
def print_table_content(self, stdscr):
line = 4
self.__sem.acquire()
for module in sorted(self.modules):
module_str = '{}'.format(module)
while len(module_str) < tsize.columns / 4 - 1:
module_str += ' '
if self.modules[module]['pid'] >= 0:
module_str += '{}'.format(self.modules[module]['pid'])
else:
module_str += '-'
while len(module_str) < tsize.columns / 2 - 12:
module_str += ' '
if self.modules[module]['cpu'] >= 0:
module_str += '{}'.format(self.modules[module]['cpu'])
else:
module_str += '-'
while len(module_str) < tsize.columns / 2:
module_str += ' '
if self.modules[module]['mem'] >= 0:
module_str += '{}'.format(self.modules[module]['mem'])
else:
module_str += '-'
while len(module_str) < tsize.columns / 2 + 15:
module_str += ' '
module_str += '{}'.format(self.modules[module]['start_on_boot'])
while len(module_str) < tsize.columns - len('Restart-on-crash'):
module_str += ' '
module_str += '{}'.format(self.modules[module]['restart_on_crash'])
while len(module_str) < tsize.columns:
module_str += ' '
if line - 4 == self.focus:
stdscr.addstr(line, 0, module_str, curses.A_STANDOUT)
else:
stdscr.addstr(line, 0, module_str)
line += 1
#stdscr.addstr(tsize.lines - 1, 0, ' '.join([' ' for i in range(tsize.columns - 2)]))
self.__footbar_message_remaining -= 1
if self.__footbar_message_remaining < 0:
self.__footbar_message_remaining = -1
stdscr.delch(tsize.lines - 1, 0)
self.__sem.release()
stdscr.refresh()
def main(self, stdscr):
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_CYAN)
stdscr.nodelay(1)
self.refresh_modules(stdscr, False) # Used to check connection
# Start refesh thread
self.__refresh_thread = Thread(target=self.refresh_modules, args=(stdscr, True))
self.__refresh_thread.daemon = True
self.__refresh_thread.start()
# Set up top string
top_string = 'JasperCore Controller v{} $ s Start/Stop | c Restart-on-crash [N/A] | q Quit'.format(__version__)
while len(top_string) < tsize.columns:
top_string = top_string.replace('$', '$ ')
top_string = top_string.replace('$', ' ')
stdscr.clear()
stdscr.addstr(0, 0, top_string, curses.color_pair(1))
stdscr.addstr(2, 0, 'Module', curses.A_BOLD)
stdscr.addstr(2, int(tsize.columns / 4) - 1, 'PID', curses.A_BOLD)
stdscr.addstr(2, int(tsize.columns / 2) - 12, 'CPU', curses.A_BOLD)
stdscr.addstr(2, int(tsize.columns / 2), 'MEMORY', curses.A_BOLD)
stdscr.addstr(2, int(tsize.columns / 2) + 15, 'Start-on-boot', curses.A_BOLD)
stdscr.addstr(2, tsize.columns - len('restart on crash'), 'Restart-on-crash', curses.A_BOLD)
stdscr.addstr(3, 0, ''.join(['-' for i in range(tsize.columns)]))
stdscr.refresh()
# Main interface
while True:
self.print_table_content(stdscr)
c = stdscr.getch()
self.key_handler(c, stdscr)
if self.__NEED_EXIT:
break
def main():
parser = ArgumentParser('jasperctl')
parser.add_argument('-c', '--connect', type=str, default='localhost')
args = parser.parse_args()
JasperCoreController(args)
if __name__ == '__main__':
main()
|
|
"""A class that performs HTTP-01 challenges for Apache"""
import errno
import logging
from typing import List
from typing import Set
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
from certbot.plugins import common
from certbot_apache._internal.obj import VirtualHost # pylint: disable=unused-import
from certbot_apache._internal.parser import get_aug_path
logger = logging.getLogger(__name__)
class ApacheHttp01(common.ChallengePerformer):
"""Class that performs HTTP-01 challenges within the Apache configurator."""
CONFIG_TEMPLATE22_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [L]
"""
CONFIG_TEMPLATE22_POST = """\
<Directory {0}>
Order Allow,Deny
Allow from all
</Directory>
<Location /.well-known/acme-challenge>
Order Allow,Deny
Allow from all
</Location>
"""
CONFIG_TEMPLATE24_PRE = """\
RewriteEngine on
RewriteRule ^/\\.well-known/acme-challenge/([A-Za-z0-9-_=]+)$ {0}/$1 [END]
"""
CONFIG_TEMPLATE24_POST = """\
<Directory {0}>
Require all granted
</Directory>
<Location /.well-known/acme-challenge>
Require all granted
</Location>
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.challenge_conf_pre = os.path.join(
self.configurator.conf("challenge-location"),
"le_http_01_challenge_pre.conf")
self.challenge_conf_post = os.path.join(
self.configurator.conf("challenge-location"),
"le_http_01_challenge_post.conf")
self.challenge_dir = os.path.join(
self.configurator.config.work_dir,
"http_challenges")
self.moded_vhosts: Set[VirtualHost] = set()
def perform(self):
"""Perform all HTTP-01 challenges."""
if not self.achalls:
return []
# Save any changes to the configuration as a precaution
# About to make temporary changes to the config
self.configurator.save("Changes before challenge setup", True)
self.configurator.ensure_listen(str(
self.configurator.config.http01_port))
self.prepare_http01_modules()
responses = self._set_up_challenges()
self._mod_config()
# Save reversible changes
self.configurator.save("HTTP Challenge", True)
return responses
def prepare_http01_modules(self):
"""Make sure that we have the needed modules available for http01"""
if self.configurator.conf("handle-modules"):
needed_modules = ["rewrite"]
if self.configurator.version < (2, 4):
needed_modules.append("authz_host")
else:
needed_modules.append("authz_core")
for mod in needed_modules:
if mod + "_module" not in self.configurator.parser.modules:
self.configurator.enable_mod(mod, temp=True)
def _mod_config(self):
selected_vhosts: List[VirtualHost] = []
http_port = str(self.configurator.config.http01_port)
# Search for VirtualHosts matching by name
for chall in self.achalls:
selected_vhosts += self._matching_vhosts(chall.domain)
# Ensure that we have one or more VirtualHosts that we can continue
# with. (one that listens to port configured with --http-01-port)
found = False
for vhost in selected_vhosts:
if any(a.is_wildcard() or a.get_port() == http_port for a in vhost.addrs):
found = True
# If there's at least one elgible VirtualHost, also add all unnamed VirtualHosts
# because they might match at runtime (#8890)
if found:
selected_vhosts += self._unnamed_vhosts()
# Otherwise, add every Virtualhost which listens on the right port
else:
selected_vhosts += self._relevant_vhosts()
# Add the challenge configuration
for vh in selected_vhosts:
self._set_up_include_directives(vh)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf_pre)
self.configurator.reverter.register_file_creation(
True, self.challenge_conf_post)
if self.configurator.version < (2, 4):
config_template_pre = self.CONFIG_TEMPLATE22_PRE
config_template_post = self.CONFIG_TEMPLATE22_POST
else:
config_template_pre = self.CONFIG_TEMPLATE24_PRE
config_template_post = self.CONFIG_TEMPLATE24_POST
config_text_pre = config_template_pre.format(self.challenge_dir)
config_text_post = config_template_post.format(self.challenge_dir)
logger.debug("writing a pre config file with text:\n %s", config_text_pre)
with open(self.challenge_conf_pre, "w") as new_conf:
new_conf.write(config_text_pre)
logger.debug("writing a post config file with text:\n %s", config_text_post)
with open(self.challenge_conf_post, "w") as new_conf:
new_conf.write(config_text_post)
def _matching_vhosts(self, domain):
"""Return all VirtualHost objects that have the requested domain name or
a wildcard name that would match the domain in ServerName or ServerAlias
directive.
"""
matching_vhosts = []
for vhost in self.configurator.vhosts:
if self.configurator.domain_in_names(vhost.get_names(), domain):
# domain_in_names also matches the exact names, so no need
# to check "domain in vhost.get_names()" explicitly here
matching_vhosts.append(vhost)
return matching_vhosts
def _relevant_vhosts(self):
http01_port = str(self.configurator.config.http01_port)
relevant_vhosts = []
for vhost in self.configurator.vhosts:
if any(a.is_wildcard() or a.get_port() == http01_port for a in vhost.addrs):
if not vhost.ssl:
relevant_vhosts.append(vhost)
if not relevant_vhosts:
raise errors.PluginError(
"Unable to find a virtual host listening on port {0} which is"
" currently needed for Certbot to prove to the CA that you"
" control your domain. Please add a virtual host for port"
" {0}.".format(http01_port))
return relevant_vhosts
def _unnamed_vhosts(self) -> List[VirtualHost]:
"""Return all VirtualHost objects with no ServerName"""
return [vh for vh in self.configurator.vhosts if vh.name is None]
def _set_up_challenges(self):
if not os.path.isdir(self.challenge_dir):
old_umask = filesystem.umask(0o022)
try:
filesystem.makedirs(self.challenge_dir, 0o755)
except OSError as exception:
if exception.errno not in (errno.EEXIST, errno.EISDIR):
raise errors.PluginError(
"Couldn't create root for http-01 challenge")
finally:
filesystem.umask(old_umask)
responses = []
for achall in self.achalls:
responses.append(self._set_up_challenge(achall))
return responses
def _set_up_challenge(self, achall):
response, validation = achall.response_and_validation()
name = os.path.join(self.challenge_dir, achall.chall.encode("token"))
self.configurator.reverter.register_file_creation(True, name)
with open(name, 'wb') as f:
f.write(validation.encode())
filesystem.chmod(name, 0o644)
return response
def _set_up_include_directives(self, vhost):
"""Includes override configuration to the beginning and to the end of
VirtualHost. Note that this include isn't added to Augeas search tree"""
if vhost not in self.moded_vhosts:
logger.debug(
"Adding a temporary challenge validation Include for name: %s in: %s",
vhost.name, vhost.filep)
self.configurator.parser.add_dir_beginning(
vhost.path, "Include", self.challenge_conf_pre)
self.configurator.parser.add_dir(
vhost.path, "Include", self.challenge_conf_post)
if not vhost.enabled:
self.configurator.parser.add_dir(
get_aug_path(self.configurator.parser.loc["default"]),
"Include", vhost.filep)
self.moded_vhosts.add(vhost)
|
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Layers from Torch wrapped in Keras style.
import sys
from ..engine.topology import ZooKerasLayer
if sys.version >= '3':
long = int
unicode = str
class Select(ZooKerasLayer):
"""
Select an index of the input in the given dim and return the subset part.
The batch dimension needs to be unchanged.
The returned tensor has one less dimension: the dimension dim is removed.
As a result, it is not possible to select() on a 1D tensor.
For example, if input is: [[1 2 3], [4 5 6]]
Select(1, 1) will give output [2 5]
Select(1, -1) will give output [3 6]
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
dim: The dimension to select. 0-based index. Cannot select the batch dimension.
-1 means the last dimension of the input.
index: The index of the dimension to be selected. 0-based index.
-1 means the last dimension of the input.
input_shape: A shape tuple, not including batch.
name: String to set the name of the wrapper.
If not specified, its name will by default to be a generated string.
>>> select = Select(0, -1, input_shape=(3, 4), name="select1")
creating: createZooKerasSelect
"""
def __init__(self, dim, index, input_shape=None, **kwargs):
super(Select, self).__init__(None,
dim,
index,
list(input_shape) if input_shape else None,
**kwargs)
class Narrow(ZooKerasLayer):
"""
Narrow the input with the number of dimensions not being reduced.
The batch dimension needs to be unchanged.
For example, if input is: [[1 2 3], [4 5 6]]
Narrow(1, 1, 2) will give output [[2 3], [5 6]]
Narrow(1, 2, -1) will give output [[3], [6]]
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
dim: The dimension to narrow. 0-based index. Cannot narrow the batch dimension.
-1 means the last dimension of the input.
offset: Non-negative integer. The start index on the given dimension. 0-based index.
length: The length to narrow. Default is 1.
Can use a negative length such as -1 in the case where input size is unknown.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> narrow = Narrow(1, 3, input_shape=(5, 6, 7), name="narrow1")
creating: createZooKerasNarrow
"""
def __init__(self, dim, offset, length=1, input_shape=None, **kwargs):
super(Narrow, self).__init__(None,
dim,
offset,
length,
list(input_shape) if input_shape else None,
**kwargs)
class Squeeze(ZooKerasLayer):
"""
Delete the singleton dimension(s).
The batch dimension needs to be unchanged.
For example, if input has size (2, 1, 3, 4, 1):
Squeeze(1) will give output size (2, 3, 4, 1)
Squeeze() will give output size (2, 3, 4)
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
dim: The dimension(s) to squeeze. Can be either int or tuple of int.
0-based index. Cannot squeeze the batch dimension.
The selected dimensions must be singleton, i.e. having size 1.
Default is None, and in this case all the non-batch singleton dimensions will be deleted.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> squeeze1 = Squeeze(1, input_shape=(1, 4, 5))
creating: createZooKerasSqueeze
>>> squeeze2 = Squeeze(input_shape=(1, 8, 1, 4))
creating: createZooKerasSqueeze
>>> squeeze3 = Squeeze((1, 2), input_shape=(1, 1, 1, 32))
creating: createZooKerasSqueeze
"""
def __init__(self, dim=None, input_shape=None, **kwargs):
if isinstance(dim, int):
dim = (dim, )
super(Squeeze, self).__init__(None,
dim,
list(input_shape) if input_shape else None,
**kwargs)
class AddConstant(ZooKerasLayer):
"""
Add a (non-learnable) scalar constant to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
constant: The scalar constant to be added.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> addconstant = AddConstant(1, input_shape=(1, 4, 5))
creating: createZooKerasAddConstant
"""
def __init__(self, constant, input_shape=None, **kwargs):
super(AddConstant, self).__init__(None,
float(constant),
list(input_shape) if input_shape else None,
**kwargs)
class MulConstant(ZooKerasLayer):
"""
Multiply the input by a (non-learnable) scalar constant.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
constant: The scalar constant to be multiplied.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> mulconstant = MulConstant(2.2, input_shape=(3, 4))
creating: createZooKerasMulConstant
"""
def __init__(self, constant, input_shape=None, **kwargs):
super(MulConstant, self).__init__(None,
float(constant),
list(input_shape) if input_shape else None,
**kwargs)
class LRN2D(ZooKerasLayer):
"""
Local Response Normalization between different feature maps.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
alpha: Float. The scaling parameter. Default is 0.0001.
k: Float. A constant.
beta: Float. The exponent. Default is 0.75.
n: The number of channels to sum over.
dim_ordering: Format of input data. Either 'th' (Channel First) or 'tf' (Channel Last).
Default is 'th'.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> lrn2d = LRN2D(1e-3, 1.2, 0.4, 4, dim_ordering="tf", input_shape=(4, 5, 6))
creating: createZooKerasLRN2D
"""
def __init__(self, alpha=1e-4, k=1.0, beta=0.75, n=5,
dim_ordering="th", input_shape=None, **kwargs):
super(LRN2D, self).__init__(None,
float(alpha),
float(k),
float(beta),
n,
dim_ordering,
list(input_shape) if input_shape else None,
**kwargs)
class ShareConvolution2D(ZooKerasLayer):
"""
Applies a 2D convolution over an input image composed of several input planes.
You can also use ShareConv2D as an alias of this layer.
Data format currently supported for this layer is dim_ordering='th' (Channel First).
The input of this layer should be 4D.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
e.g. input_shape=(3, 128, 128) for 128x128 RGB pictures.
# Arguments
nb_filter: Number of convolution filters to use.
nb_row: Number of rows in the convolution kernel.
nb_col: Number of cols in the convolution kernel.
init: String representation of the initialization method for the weights of the layer.
Default is 'glorot_uniform'.
activation: String representation of the activation function to use
(such as 'relu' or 'sigmoid'). Default is None.
subsample: Int tuple of length 2 corresponding to the step of the convolution in the
height and width dimension. Also called strides elsewhere. Default is (1, 1).
pad_h: The additional zeros added to the height dimension. Default is 0.
pad_w: The additional zeros added to the width dimension. Default is 0.
propagate_back: Whether to propagate gradient back. Default is True.
dim_ordering: Format of input data. Only 'th' (Channel First) is supported for now.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the input weights matrices. Default is None.
b_regularizer: An instance of [[Regularizer]], applied to the bias. Default is None.
bias: Whether to include a bias (i.e. make the layer affine rather than linear).
Default is True.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> shareconv2d = ShareConvolution2D(32, 3, 4, activation="tanh", input_shape=(3, 128, 128))
creating: createZooKerasShareConvolution2D
"""
def __init__(self, nb_filter, nb_row, nb_col, init="glorot_uniform",
activation=None, subsample=(1, 1), pad_h=0, pad_w=0, propagate_back=True,
dim_ordering="th", W_regularizer=None, b_regularizer=None,
bias=True, input_shape=None, **kwargs):
super(ShareConvolution2D, self).__init__(None,
nb_filter,
nb_row,
nb_col,
init,
activation,
subsample,
pad_h,
pad_w,
propagate_back,
dim_ordering,
W_regularizer,
b_regularizer,
bias,
list(input_shape) if input_shape else None,
**kwargs)
ShareConv2D = ShareConvolution2D
class CAdd(ZooKerasLayer):
"""
This layer has a bias with given size.
The bias will be added element-wise to the input.
If the element number of the bias matches the input, a simple element-wise addition
will be done.
Or the bias will be expanded to the same size of the input.
The expand means repeat on unmatched singleton dimension (if some unmatched dimension
isn't a singleton dimension, an error will be raised).
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
size: The size of the bias.
b_regularizer: An instance of [[Regularizer]], applied to the bias. Default is null.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> cadd = CAdd((2, 1), input_shape=(3, ))
creating: createZooKerasCAdd
"""
def __init__(self, size, b_regularizer=None, input_shape=None, **kwargs):
super(CAdd, self).__init__(None,
size,
b_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
class CMul(ZooKerasLayer):
"""
This layer has a weight with given size.
The weight will be multiplied element-wise to the input.
If the element number of the weight matches the input,
a simple element-wise multiplication will be done.
Or the bias will be expanded to the same size of the input.
The expand means repeat on unmatched singleton dimension (if some unmatched dimension isn't
singleton dimension, an error will be raised).
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
size: The size of the bias.
W_regularizer: An instance of [[Regularizer]], (eg. L1 or L2 regularization),
applied to the input weights matrices. Default is null.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> cmul = CMul((2, 1), input_shape=(3, ))
creating: createZooKerasCMul
"""
def __init__(self, size, W_regularizer=None, input_shape=None, **kwargs):
super(CMul, self).__init__(None,
size,
W_regularizer,
list(input_shape) if input_shape else None,
**kwargs)
class Exp(ZooKerasLayer):
"""
Applies element-wise exp to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> exp = Exp(input_shape=(2, 3, 4))
creating: createZooKerasExp
"""
def __init__(self, input_shape=None, **kwargs):
super(Exp, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Identity(ZooKerasLayer):
"""
Identity just return the input to output.
It's useful in same parallel container to get an origin input.
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> identity = Identity(input_shape=(3, ))
creating: createZooKerasIdentity
"""
def __init__(self, input_shape=None, **kwargs):
super(Identity, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Log(ZooKerasLayer):
"""
Applies a log transformation to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> log = Log(input_shape=(4, 8, 8))
creating: createZooKerasLog
"""
def __init__(self, input_shape=None, **kwargs):
super(Log, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Mul(ZooKerasLayer):
"""
Multiply a single scalar factor to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> mul = Mul(input_shape=(3, 4, 5))
creating: createZooKerasMul
"""
def __init__(self, input_shape=None, **kwargs):
super(Mul, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Power(ZooKerasLayer):
"""
Applies an element-wise power operation with scale and shift to the input.
f(x) = (shift + scale * x)^power^
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
power: The exponent.
scale: The scale parameter. Default is 1.
shift: The shift parameter. Default is 0.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> power = Power(3, input_shape=(3, ))
creating: createZooKerasPower
"""
def __init__(self, power, scale=1, shift=0, input_shape=None, **kwargs):
super(Power, self).__init__(None,
float(power),
float(scale),
float(shift),
list(input_shape) if input_shape else None,
**kwargs)
class Scale(ZooKerasLayer):
"""
Scale is the combination of CMul and CAdd.
Computes the element-wise product of the input and weight,
with the shape of the weight "expand" to match the shape of the input.
Similarly, perform an expanded bias and perform an element-wise add.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
size: Size of the weight and bias.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> scale = Scale((2, 1), input_shape=(3, ))
creating: createZooKerasScale
"""
def __init__(self, size, input_shape=None, **kwargs):
super(Scale, self).__init__(None,
size,
list(input_shape) if input_shape else None,
**kwargs)
class Sqrt(ZooKerasLayer):
"""
Applies an element-wise square root operation to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> sqrt = Sqrt(input_shape=(3, ))
creating: createZooKerasSqrt
"""
def __init__(self, input_shape=None, **kwargs):
super(Sqrt, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class Square(ZooKerasLayer):
"""
Applies an element-wise square operation to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> square = Square(input_shape=(5, ))
creating: createZooKerasSquare
"""
def __init__(self, input_shape=None, **kwargs):
super(Square, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class HardShrink(ZooKerasLayer):
"""
Applies the hard shrinkage function element-wise to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
value: The threshold value. Default is 0.5.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> hardshrink = HardShrink(input_shape=(2, 4, 8))
creating: createZooKerasHardShrink
"""
def __init__(self, value=0.5, input_shape=None, **kwargs):
super(HardShrink, self).__init__(None,
float(value),
list(input_shape) if input_shape else None,
**kwargs)
class HardTanh(ZooKerasLayer):
"""
Applies the hard tanh function element-wise to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
min_value: The minimum threshold value. Default is -1.
max_value: The maximum threshold value. Default is 1.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> hardtanh = HardTanh(input_shape=(3, 4))
creating: createZooKerasHardTanh
"""
def __init__(self, min_value=-1, max_value=1, input_shape=None, **kwargs):
super(HardTanh, self).__init__(None,
float(min_value),
float(max_value),
list(input_shape) if input_shape else None,
**kwargs)
class Negative(ZooKerasLayer):
"""
Computes the negative value of each element of the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> negative = Negative(input_shape=(4, 5, 8))
creating: createZooKerasNegative
"""
def __init__(self, input_shape=None, **kwargs):
super(Negative, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class PReLU(ZooKerasLayer):
"""
Applies parametric ReLU, where parameter varies the slope of the negative part.
Notice: Please don't use weight decay on this.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
n_output_plane: Input map number. Default is 0,
which means using PReLU in shared version and has only one parameter.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> prelu = PReLU(input_shape=(3, 4, 8, 8))
creating: createZooKerasPReLU
"""
def __init__(self, n_output_plane=0, input_shape=None, **kwargs):
super(PReLU, self).__init__(None,
n_output_plane,
list(input_shape) if input_shape else None,
**kwargs)
class RReLU(ZooKerasLayer):
"""
Applies the randomized leaky rectified linear unit element-wise to the input.
In the training mode, negative inputs are multiplied by a factor drawn
from a uniform random distribution U(l, u).
In the evaluation mode, a RReLU behaves like a LeakyReLU with a constant mean
factor a = (l + u) / 2.
If l == u, a RReLU essentially becomes a LeakyReLU.
Regardless of operating in in-place mode a RReLU will internally
allocate an input-sized noise tensor to store random factors for negative inputs.
For reference see [Empirical Evaluation of Rectified Activations in Convolutional
Network](http://arxiv.org/abs/1505.00853).
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
lower: Lower boundary of the uniform random distribution. Default is 1.0/8.
upper: Upper boundary of the uniform random distribution. Default is 1.0/3.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> rrelu = RReLU(input_shape=(3, 4))
creating: createZooKerasRReLU
"""
def __init__(self, lower=1.0/8, upper=1.0/3, input_shape=None, **kwargs):
super(RReLU, self).__init__(None,
float(lower),
float(upper),
list(input_shape) if input_shape else None,
**kwargs)
class SoftShrink(ZooKerasLayer):
"""
Applies the soft shrinkage function element-wise to the input.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
value: The threshold value. Default is 0.5.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> softshrink = SoftShrink(input_shape=(4, 4, 8, 8))
creating: createZooKerasSoftShrink
"""
def __init__(self, value=0.5, input_shape=None, **kwargs):
super(SoftShrink, self).__init__(None,
float(value),
list(input_shape) if input_shape else None,
**kwargs)
class WithinChannelLRN2D(ZooKerasLayer):
"""
The local response normalization layer performs a kind of "lateral inhibition"
by normalizing over local input regions. The local regions extend spatially,
in separate channels (i.e., they have shape 1 x size x size).
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
size: The side length of the square region to sum over. Default is 5.
alpha: The scaling parameter. Default is 1.0.
beta: The exponent. Default is 0.75.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> withinchannellrn2d = WithinChannelLRN2D(input_shape=(2, 3, 8, 8))
creating: createZooKerasWithinChannelLRN2D
"""
def __init__(self, size=5, alpha=1.0, beta=0.75, input_shape=None, **kwargs):
super(WithinChannelLRN2D, self).__init__(None,
size,
float(alpha),
float(beta),
list(input_shape) if input_shape else None,
**kwargs)
class BinaryThreshold(ZooKerasLayer):
"""
Threshold the input.
If an input element is smaller than the threshold value,
it will be replaced by 0; otherwise, it will be replaced by 1.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
value: The threshold value to compare with. Default is 1e-6.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> binarythreshold = BinaryThreshold(input_shape=(2, 3, 4, 5))
creating: createZooKerasBinaryThreshold
"""
def __init__(self, value=1e-6, input_shape=None, **kwargs):
super(BinaryThreshold, self).__init__(None,
float(value),
list(input_shape) if input_shape else None,
**kwargs)
class Threshold(ZooKerasLayer):
"""
Threshold input Tensor.
If values in the Tensor smaller than or equal to th, then replace it with v.
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
th: The threshold value to compare with. Default is 1e-6.
v: the value to replace with.
input_shape: A shape tuple, not including batch.
name: String to set the name of the layer.
If not specified, its name will by default to be a generated string.
>>> threshold = Threshold(input_shape=(2, 3, 4, 5))
creating: createZooKerasThreshold
"""
def __init__(self, th=1e-6, v=0.0, input_shape=None, **kwargs):
super(Threshold, self).__init__(None,
float(th),
float(v),
list(input_shape) if input_shape else None,
**kwargs)
class GaussianSampler(ZooKerasLayer):
"""
Takes {mean, log_variance} as input and samples from the Gaussian distribution
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
input_shape: A shape tuple, not including batch.
>>> gaussianSampler = GaussianSampler(input_shape=[(3,),(3,)])
creating: createZooKerasGaussianSampler
"""
def __init__(self, input_shape=None, **kwargs):
super(GaussianSampler, self).__init__(None,
list(input_shape) if input_shape else None,
**kwargs)
class ResizeBilinear(ZooKerasLayer):
"""
Resize the input image with bilinear interpolation. The input image must be a float tensor with
NHWC or NCHW layout
When you use this layer as the first layer of a model, you need to provide the argument
input_shape (a shape tuple, does not include the batch dimension).
# Arguments
output_height: output height
output_width: output width
align_corner: align corner or not
dim_ordering: Format of input data. Either 'th' (Channel First) or 'tf' (Channel Last).
Default is 'th'.
input_shape: A shape tuple, not including batch.
>>> resizeBilinear = ResizeBilinear(10, 20, input_shape=(2, 3, 5, 7))
creating: createZooKerasResizeBilinear
"""
def __init__(self, output_height, output_width, align_corner=False,
dim_ordering="th", input_shape=None, **kwargs):
super(ResizeBilinear, self).__init__(None,
output_height,
output_width,
align_corner,
dim_ordering,
list(input_shape) if input_shape else None,
**kwargs)
class SelectTable(ZooKerasLayer):
"""
Creates a module that takes a list of JTensors as input and outputs the element at index `index`
# Arguments
index: the index to be selected. 0-based index
input_shape: a list of shape tuples, not including batch.
>>> selectTable = SelectTable(0, input_shape=[[2, 3], [5, 7]])
creating: createZooKerasSelectTable
"""
def __init__(self, index, input_shape=None, **kwargs):
super(SelectTable, self).__init__(None,
index,
list(input_shape) if input_shape else None,
**kwargs)
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import test_expectations
from telemetry.page.actions import action_runner as action_runner_module
class TestNotSupportedOnPlatformError(Exception):
"""PageTest Exception raised when a required feature is unavailable.
The feature required to run the test could be part of the platform,
hardware configuration, or browser.
"""
class MultiTabTestAppCrashError(Exception):
"""PageTest Exception raised after browser or tab crash for multi-tab tests.
Used to abort the test rather than try to recover from an unknown state.
"""
class Failure(Exception):
"""PageTest Exception raised when an undesired but designed-for problem."""
class MeasurementFailure(Failure):
"""PageTest Exception raised when an undesired but designed-for problem."""
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests.
Test should override ValidateAndMeasurePage to perform test
validation and page measurement as necessary.
class BodyChildElementMeasurement(PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.AddValue(scalar.ScalarValue(
page, 'body_children', 'count', body_child_count))
Args:
action_name_to_run: This is the method name in telemetry.page.Page
subclasses to run.
discard_first_run: Discard the first run of this page. This is
usually used with page_repeat and pageset_repeat options.
is_action_name_to_run_optional: Determines what to do if
action_name_to_run is not empty but the page doesn't have that
action. The page will run (without any action) if
is_action_name_to_run_optional is True, otherwise the page
will fail.
"""
def __init__(self,
action_name_to_run='',
needs_browser_restart_after_each_page=False,
discard_first_result=False,
clear_cache_before_each_run=False,
is_action_name_to_run_optional=False):
super(PageTest, self).__init__()
self.options = None
if action_name_to_run:
assert action_name_to_run.startswith('Run') \
and '_' not in action_name_to_run, \
('Wrong way of naming action_name_to_run. By new convention,'
'action_name_to_run must start with Run- prefix and in CamelCase.')
self._action_name_to_run = action_name_to_run
self._needs_browser_restart_after_each_page = (
needs_browser_restart_after_each_page)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
self._is_action_name_to_run_optional = is_action_name_to_run_optional
@property
def is_multi_tab_test(self):
"""Returns True if the test opens multiple tabs.
If the test overrides TabForPage, it is deemed a multi-tab test.
Multi-tab tests do not retry after tab or browser crashes, whereas,
single-tab tests too. That is because the state of multi-tab tests
(e.g., how many tabs are open, etc.) is unknown after crashes.
"""
return self.TabForPage.__func__ is not PageTest.TabForPage.__func__
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
def RestartBrowserBeforeEachPage(self):
""" Should the browser be restarted for the page?
This returns true if the test needs to unconditionally restart the
browser for each page. It may be called before the browser is started.
"""
return self._needs_browser_restart_after_each_page
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
"""Should the browser be stopped after the page is run?
This is called after a page is run to decide whether the browser needs to
be stopped to clean up its state. If it is stopped, then it will be
restarted to run the next page.
A test that overrides this can look at both the page and the browser to
decide whether it needs to stop the browser.
"""
return False
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
def CustomizeBrowserOptionsForSinglePage(self, page, options):
"""Set options specific to the test and the given page.
This will be called with the current page when the browser is (re)started.
Changing options at this point only makes sense if the browser is being
restarted for each page. Note that if page has a startup_url, the browser
will always be restarted for each run.
"""
if page.startup_url:
options.browser_options.startup_url = page.startup_url
def WillStartBrowser(self, platform):
"""Override to manipulate the browser environment before it launches."""
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
def CanRunForPage(self, page): # pylint: disable=W0613
"""Override to customize if the test can be ran for the given page."""
if self._action_name_to_run and not self._is_action_name_to_run_optional:
return hasattr(page, self._action_name_to_run)
return True
def SetOptions(self, options):
"""Sets the BrowserFinderOptions instance to use."""
self.options = options
def DidRunTest(self, browser, results): # pylint: disable=W0613
"""Override to do operations after all page set(s) are completed.
This will occur before the browser is torn down.
"""
self.options = None
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
def WillRunActions(self, page, tab):
"""Override to do operations before running the actions on the page."""
def DidRunActions(self, page, tab):
"""Override to do operations after running the actions on the page."""
def CleanUpAfterPage(self, page, tab):
"""Called after the test run method was run, even if it failed."""
def CreateExpectations(self, page_set): # pylint: disable=W0613
"""Override to make this test generate its own expectations instead of
any that may have been defined in the page set."""
return test_expectations.TestExpectations()
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
return browser.tabs[0]
def ValidatePageSet(self, page_set):
"""Override to examine the page set before the test run. Useful for
example to validate that the pageset can be used with the test."""
def ValidateAndMeasurePage(self, page, tab, results):
"""Override to check test assertions and perform measurement.
When adding measurement results, call results.AddValue(...) for
each result. Raise an exception or add a failure.FailureValue on
failure. page_test.py also provides several base exception classes
to use.
Prefer metric value names that are in accordance with python
variable style. e.g., metric_name. The name 'url' must not be used.
Put together:
def ValidateAndMeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.AddValue(scalar.ScalarValue(
page, 'two_plus_two', 'count', res))
Args:
page: A telemetry.page.Page instance.
tab: A telemetry.core.Tab instance.
results: A telemetry.results.PageTestResults instance.
"""
raise NotImplementedError
def RunPage(self, page, tab, results):
# Run actions.
interactive = self.options and self.options.interactive
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
self.WillRunActions(page, tab)
if interactive:
action_runner.PauseInteractive()
else:
self._RunMethod(page, self._action_name_to_run, action_runner)
self.DidRunActions(page, tab)
self.ValidateAndMeasurePage(page, tab, results)
def _RunMethod(self, page, method_name, action_runner):
if hasattr(page, method_name):
run_method = getattr(page, method_name)
run_method(action_runner)
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunNavigateSteps(action_runner)
@property
def action_name_to_run(self):
return self._action_name_to_run
|
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from framework.exceptions import PermissionsError
from api.base.exceptions import InvalidModelValueError
from api.base.serializers import ValuesListField, RelationshipField, LinksField, HideIfDraftRegistration, IDField
from api.base.utils import absolute_reverse, get_user_auth
from api.nodes.serializers import (
DraftRegistrationLegacySerializer,
DraftRegistrationDetailLegacySerializer,
update_institutions,
get_license_details,
NodeSerializer,
NodeLicenseSerializer,
NodeLicenseRelationshipField,
NodeContributorsSerializer,
NodeContributorsCreateSerializer,
NodeContributorDetailSerializer,
)
from api.taxonomies.serializers import TaxonomizableSerializerMixin
from osf.exceptions import DraftRegistrationStateError
from website import settings
class NodeRelationshipField(RelationshipField):
def to_internal_value(self, node_id):
node = self.context['view'].get_node(node_id=node_id) if node_id else None
return {'branched_from': node}
class DraftRegistrationSerializer(DraftRegistrationLegacySerializer, TaxonomizableSerializerMixin):
"""
New DraftRegistrationSerializer - instead of the node_id being provided in the URL, an optional
node is passed in under `branched_from`.
DraftRegistrations have several fields that can be edited that are persisted to the final registration.
"""
category_choices = list(settings.NODE_CATEGORY_MAP.items())
category_choices_string = ', '.join(["'{}'".format(choice[0]) for choice in category_choices])
title = ser.CharField(required=False, allow_blank=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string)
tags = ValuesListField(attr_name='name', child=ser.CharField(), required=False)
node_license = NodeLicenseSerializer(required=False, source='license')
links = LinksField({
'self': 'get_absolute_url',
})
affiliated_institutions = RelationshipField(
related_view='draft_registrations:draft-registration-institutions',
related_view_kwargs={'draft_id': '<_id>'},
self_view='draft_registrations:draft-registration-relationships-institutions',
self_view_kwargs={'draft_id': '<_id>'},
read_only=False,
many=True,
required=False,
)
branched_from = NodeRelationshipField(
related_view=lambda n: 'draft_nodes:draft-node-detail' if getattr(n, 'type', False) == 'osf.draftnode' else 'nodes:node-detail',
related_view_kwargs={'node_id': '<branched_from._id>'},
read_only=False,
required=False,
)
contributors = RelationshipField(
related_view='draft_registrations:draft-registration-contributors',
related_view_kwargs={'draft_id': '<_id>'},
)
bibliographic_contributors = RelationshipField(
related_view='draft_registrations:draft-registration-bibliographic-contributor-detail',
related_view_kwargs={'draft_id': '<_id>'},
)
current_user_permissions = ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this draft registratione.',
)
license = NodeLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
)
has_project = ser.SerializerMethodField()
def get_has_project(self, obj):
return obj.has_project
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'draft_registrations:draft-registration-subjects'
@property
def subjects_view_kwargs(self):
# Overrides TaxonomizableSerializerMixin
return {'draft_id': '<_id>'}
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'draft_registrations:draft-registration-relationships-subjects'
def get_self_url(self, obj):
return absolute_reverse(
'draft_registrations:draft-registration-list',
kwargs={
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_absolute_url(self, obj):
return obj.get_absolute_url()
# Overrides DraftRegistrationLegacySerializer
def get_node(self, validated_data):
# Node comes from branched_from relationship rather than from URL
return validated_data.pop('branched_from', None)
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def expect_subjects_as_relationships(self, request):
"""Determines whether subjects should be serialized as a relationship.
Older serializers expect subjects as attributes for earlier versions,
but this new serializer does not have to adhere to that same behavior.
:param object request: Request object
:return bool: Subjects should be serialized as relationships
"""
# Overrides TaxonomizableSerializerMixin
return True
class DraftRegistrationDetailSerializer(DraftRegistrationSerializer, DraftRegistrationDetailLegacySerializer):
"""
Overrides DraftRegistrationLegacySerializer to make id required.
registration_supplement, node, cannot be changed after draft has been created.
"""
links = LinksField({
'self': 'get_self_url',
})
def get_self_url(self, obj):
return absolute_reverse(
'draft_registrations:draft-registration-detail',
kwargs={
'version': self.context['request'].parser_context['kwargs']['version'],
'draft_id': self.context['request'].parser_context['kwargs']['draft_id'],
},
)
def update(self, draft, validated_data):
draft = super(DraftRegistrationDetailSerializer, self).update(draft, validated_data)
user = self.context['request'].user
auth = get_user_auth(self.context['request'])
if 'tags' in validated_data:
new_tags = set(validated_data.pop('tags', []))
draft.update_tags(new_tags, auth=auth)
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(draft, validated_data)
validated_data['node_license'] = license_details
if 'affiliated_institutions' in validated_data:
institutions_list = validated_data.pop('affiliated_institutions')
new_institutions = [{'_id': institution} for institution in institutions_list]
update_institutions(draft, new_institutions, user)
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(draft, subjects, auth)
try:
draft.update(validated_data, auth=auth)
except PermissionsError:
raise exceptions.PermissionDenied
except DraftRegistrationStateError as e:
raise InvalidModelValueError(detail=str(e))
return draft
class DraftRegistrationContributorsSerializer(NodeContributorsSerializer):
draft_registration = RelationshipField(
related_view='draft_registrations:draft-registration-detail',
related_view_kwargs={'draft_id': '<draft_registration._id>'},
)
node = HideIfDraftRegistration(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<node._id>'},
))
class Meta:
type_ = 'contributors'
links = LinksField({
'self': 'get_absolute_url',
})
def get_absolute_url(self, obj):
return absolute_reverse(
'draft_registrations:draft-registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'draft_id': self.context['request'].parser_context['kwargs']['draft_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class DraftRegistrationContributorsCreateSerializer(NodeContributorsCreateSerializer, DraftRegistrationContributorsSerializer):
"""
Overrides DraftRegistrationContributorsSerializer to add email, full_name, send_email, and non-required index and users field.
id and index redefined because of the two serializers we've inherited
"""
id = IDField(source='_id', required=False, allow_null=True)
index = ser.IntegerField(required=False, source='_order')
email_preferences = ['draft_registration', 'false']
class DraftRegistrationContributorDetailSerializer(NodeContributorDetailSerializer, DraftRegistrationContributorsSerializer):
"""
Overrides NodeContributorDetailSerializer to set the draft registration instead of the node
id and index redefined because of the two serializers we've inherited
"""
id = IDField(required=True, source='_id')
index = ser.IntegerField(required=False, read_only=False, source='_order')
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
|
|
"""A module to receive data from UR CB2 robots."""
# The MIT License (MIT)
#
# Copyright (c) 2016 GTRC.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import struct
import array
import threading
class URReceiver(object):
"""A class to receive and process data from a UR Robot
The receiving and processing can be run in a separate thread by calling
start(). The stop() command must be called before exiting to halt the
additional thread. Alternatively, receive(), decode(), and
print_parsed_data() can be called in sequence in order to receive,
decode, and print data. One should not call receive(), decode(), or any
of the print methods, if a separate thread is being used. You should
never write to any of the data fields externally, however you can read
from them. Python's atomic read/write architecture should prevent you
from getting any half baked results from basic types, for all lists and
tuples, you must lock using lock (recommend that you use `with lock:`
paradigm.
Attributes:
clean_data: Double array of length 101 for all of the data returned by
the robot
raw_data: String of complete raw data packet
__socket: The socket for communications
clean_packets: The Integer number of packets which have been received
cleanly
stub_packets: The Integer number of packets which have been received
as stubs
received: The total Integer number of complete data sets which have
been received
waiting_data: String to hold incomplete data sets
new_data: Boolean whether new data is available for processing
time: Double of time elapsed since the controller was started
target_joint_positions: 6 member Double list of target joint positions
target_joint_velocities: 6 member Double list of target joint velocities
target_joint_accelerations: 6 member Double list of target joint
accelerations
target_joint_currents: 6 member Double list of target joint currents
target_joint_moments: 6 member Double list of target joint moments as
torques
actual_joint_positions: 6 member Double list of actual joint positions
actual_joint_velocities: 6 member Double list of actual joint velocities
actual_joint_currents: 6 member Double list of actual joint currents
tool_accelerometer: 3 member Double list of ool x,y and z accelerometer
values (software version 1.7)
force_tcp: 6 member Double list of generalised forces in the TCP
position: 6 member Double list of cartesian coordinates of the tool:
(x,y,z,rx,ry,rz), where rx, ry and rz is a rotation vector
representation of the tool orientation
tool_speed: 6 member Double list of speed of the tool given in cartesian
coordinates
digital_inputs: Current state of the digital inputs. NOTE: these are
bits encoded as int64_t, e.g. a value of 5 corresponds to bit 0 and
bit 2 set high
joint_temperature: 6 member Double list of temperature of each joint in
degrees celsius
controller_period: Double of controller real time thread execution time
robot_control_mode: Double of robot control mode (see
PolyScopeProgramServer on the "How to" page
joint_control_modes: 6 member Double list of joint control modes (see
PolyScopeProgramServer on the "How to" page) (only from software
version 1.8 and on)
run: Boolean on whether to run or not
__receiving_thread: Thread object for running the receiving and parsing
loops
verbose: Boolean defining whether or not to print data
lock: A threading lock which is used to protect data from race
conditions
_is_stopped: A boolean specifying whether the robot is stopped
"""
# Format specifier:
# ! : network (big endian)
# I : unsigned int, message size
# 85d : 85 doubles
# q : int64_t for digital inputs
# 15d : 15 doubles
#: Format spec for complete data packet
format = struct.Struct('! I 85d q 15d')
#: The format spec for the packet length field
formatLength = struct.Struct('! I')
#: The width to be given to name items when printing out
name_width = 30
#: The precision for printing data
precision = 7
double_format_string = "{:+0"+str(precision+4)+"."+str(precision)+"f}"
def __init__(self, open_socket, verbose=False):
"""Construct a UR Robot connection given connection parameters
Args:
open_socket (socket.socket): The socket to use for communications.
verbose (bool): Whether to print received data in main loop
"""
self.clean_data = array.array('d', [0] * 101)
self.raw_data = ''
self.__socket = open_socket
self.clean_packets = 0
self.stub_packets = 0
self.received = 0
self.waiting_data = ''
self.new_data = False
self.time = 0.0
self.target_joint_positions = [0.0]*6
self.target_joint_velocities = [0.0]*6
self.target_joint_accelerations = [0.0]*6
self.target_joint_currents = [0.0]*6
self.target_joint_moments = [0.0]*6
self.actual_joint_positions = [0.0]*6
self.actual_joint_velocities = [0.0]*6
self.actual_joint_currents = [0.0]*6
self.tool_accelerometer = [0.0]*3
self.force_tcp = [0.0]*6
self.position = [0.0]*6
self.tool_speed = [0.0]*6
self.digital_inputs = 0
self.joint_temperature = [0.0]*6
self.controller_period = 0.0
self.robot_control_mode = 0.0
self.joint_control_modes = [0.0]*6
self.run = False
self.__receiving_thread = None
self.verbose = verbose
self.lock = threading.Lock()
self._is_stopped = False
if verbose:
print "\033[2J" # Clear screen
def __del__(self):
"""Shutdown side thread and print aggregated connection stats"""
self.stop()
print "Received: "+str(self.received) + " data sets"
print "Received: "+str(self.clean_packets) + " clean packets"
print "Received: "+str(self.stub_packets) + " stub packets"
def decode(self):
"""Decode the data stored in the class's rawData field.
Only process the data if there is new data available. Unset the
self.newData flag upon completion. Note, this will lock the data set
and block execution in a number of other functions
"""
with self.lock:
if self.new_data:
self.clean_data = self.format.unpack(self.raw_data)
self.time = self.clean_data[1]
self.target_joint_positions = self.clean_data[2:8]
self.target_joint_velocities = self.clean_data[8:14]
self.target_joint_accelerations = self.clean_data[14:20]
self.target_joint_currents = self.clean_data[20:26]
self.target_joint_moments = self.clean_data[26:32]
self.actual_joint_positions = self.clean_data[32:38]
self.actual_joint_velocities = self.clean_data[38:44]
self.actual_joint_currents = self.clean_data[44:50]
self.tool_accelerometer = self.clean_data[50:53]
# unused = self.clean_data[53:68]
self.force_tcp = self.clean_data[68:74]
self.position = self.clean_data[74:80]
self.tool_speed = self.clean_data[80:86]
self.digital_inputs = self.clean_data[86]
self.joint_temperature = self.clean_data[87:93]
self.controller_period = self.clean_data[93]
# test value = self.clean_data[94]
self.robot_control_mode = self.clean_data[95]
self.joint_control_modes = self.clean_data[96:102]
self.new_data = False
self._is_stopped = self.is_stopped()
def receive(self):
"""Receive data from the UR Robot.
If an entire data set is not received, then store the data in a
temporary location (self.waitingData). Once a complete packet is
received, place the complete packet into self.rawData and set the
newData flag. Note, this will lock the data set and block execution in a
number of other functions once a full data set is built.
"""
incoming_data = self.__socket.recv(812) # expect to get 812 bytes
if len(incoming_data) == 812:
self.clean_packets += 1
else:
self.stub_packets += 1
if self.formatLength.unpack(incoming_data[0:4])[0] == 812:
self.waiting_data = incoming_data
else:
self.waiting_data += incoming_data
if len(self.waiting_data) == 812:
with self.lock:
self.raw_data = self.waiting_data
self.received += 1
self.new_data = True
def print_raw_data(self):
"""Print the raw data which is stored in self.raw_data.
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (raw): "+self.raw_data + "\n"
def print_data(self):
"""Print the processed data stored in self.clean_data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "Received (unpacked):\n "
print self.clean_data
print "\n"
def output_data_item(self, name, values):
"""Output item with name and values.
Formatting is specified by self.name_width and self.precision.
Args:
name (str): The name of the value
values (float, int, tuple of float, list of float): The list of
values
"""
to_print = ("%-"+str(self.name_width)+"s") % name
if isinstance(values, (list, tuple)):
to_print += ": [%s]" % ', '.join(self.double_format_string.format(x)
for x in values)
elif isinstance(values, (int, bool)):
to_print += ": [%s]" % str(values)
elif isinstance(values, float):
to_print += ": [%s]" % self.double_format_string.format(values)
else:
print "I don't know that data type: " + str(type(values))
print to_print
def print_parsed_data(self):
"""Print the parsed data
Note, this will lock the data set and block execution in a number of
other functions
"""
with self.lock:
print "\033[H"
self.output_data_item("Time since controller turn on",
self.time)
self.output_data_item("Target joint positions",
self.target_joint_positions)
self.output_data_item("Target joint velocities",
self.target_joint_velocities)
self.output_data_item("Target joint accelerations",
self.target_joint_accelerations)
self.output_data_item("Target joint currents",
self.target_joint_currents)
self.output_data_item("Target joint moments (torque)",
self.target_joint_moments)
self.output_data_item("Actual joint positions",
self.actual_joint_positions)
self.output_data_item("Actual joint velocities",
self.actual_joint_velocities)
self.output_data_item("Actual joint currents",
self.actual_joint_currents)
self.output_data_item("Tool accelerometer values",
self.tool_accelerometer)
self.output_data_item("Generalised forces in the TCP",
self.force_tcp)
self.output_data_item("Cartesian tool position",
self.position)
self.output_data_item("Cartesian tool speed",
self.tool_speed)
self.output_data_item("Joint temperatures (deg C)",
self.joint_temperature)
self.output_data_item("Controller period",
self.controller_period)
self.output_data_item("Robot control mode",
self.robot_control_mode)
self.output_data_item("Joint control modes",
self.joint_control_modes)
print ((("%-"+str(self.name_width)+"s") % "Digital Input Number") +
": " + '|'.join('{:^2d}'.format(x) for x in range(0, 18)))
print ((("%-"+str(self.name_width)+"s") % "Digital Input Value: ") +
": " + '|'.join('{:^2s}'.format(x) for x in '{:018b}'.format(
self.digital_inputs)[::-1]))
self.output_data_item("Is Stopped:",
self._is_stopped)
def start(self):
"""Spawn a new thread for receiving and run it"""
if (self.__receiving_thread is None or
not self.__receiving_thread.is_alive()):
self.run = True
self.__receiving_thread = threading.Thread(group=None,
target=self.loop,
name='receiving_thread',
args=(),
kwargs={})
self.__receiving_thread.start()
def loop(self):
"""The main loop which receives, decodes, and optionally prints data"""
while self.run:
self.receive()
self.decode()
if self.verbose:
self.print_parsed_data()
def stop(self):
"""Stops execution of the auxiliary receiving thread"""
if self.__receiving_thread is not None:
if self.__receiving_thread.is_alive():
self.verbose_print('attempting to shutdown auxiliary thread',
'*')
self.run = False # Python writes like this are atomic
self.__receiving_thread.join()
self.verbose_print('\033[500D')
self.verbose_print('\033[500C')
self.verbose_print('-', '-', 40)
if self.__receiving_thread.is_alive():
self.verbose_print('failed to shutdown auxiliary thread',
'*')
else:
self.verbose_print('shutdown auxiliary thread', '*')
else:
self.verbose_print('auxiliary thread already shutdown', '*')
else:
self.verbose_print('no auxiliary threads exist', '*')
def verbose_print(self, string_input, emphasis='', count=5):
"""Print input if verbose is set
Args:
string_input (str): The input string to be printed.
emphasis (str): Emphasis character to be placed around input.
count (int): Number of emphasis characters to use.
"""
if self.verbose:
if emphasis == '':
print string_input
else:
print (emphasis*count + " " + string_input + " " +
emphasis * count)
def is_stopped(self, error=0.005):
"""Check whether the robot is stopped.
Check whether the joint velocities are all below some error. Note, this
will lock the data set and block execution in a number of other
functions
Args:
error (float): The error range to define "stopped"
Returns: Boolean, whether the robot is stopped.
"""
with self.lock:
to_return = (
all(v == 0 for v in self.target_joint_velocities) and
all(v < error for v in self.actual_joint_velocities))
return to_return
def at_goal(self, goal, cartesian, error=0.005):
"""Check whether the robot is at a goal point.
Check whether the differences between the joint or cartesian
coordinates are all below some error. This can be used to
determine if a move has been completed. It can also be used to
create blends by beginning the next move prior to the current one
reaching its goal. Note, this will lock the data set and block execution
in a number of other functions.
Args:
goal (6 member tuple or list of floats): The goal to check against
cartesian (bool): Whether the goal is in cartesian coordinates or
not (in which case joint coordinates)
error (float): The error range in which to consider an object at
its goal, in meters for cartesian space and radians for axis
space.
Returns: Boolean, whether the current position is within the error
range of the goal.
"""
with self.lock:
to_return = (
all(abs(g-a) < error for g, a in zip(self.position, goal))
if cartesian else
all(abs(g-a) < error for g, a in
zip(self.actual_joint_positions, goal)))
return to_return
def __enter__(self):
"""Enters the URRobot receiver from a with statement"""
return self
def __exit__(self, *_):
"""Exits at the end of a context manager statement by destructing."""
self.stop()
|
|
def index():
# If user has been logged in already, then
# redirect to dashboard.
if auth.user:
redirect('dashboard')
return dict()
def dashboard():
# If user has not been logged in, then
# redirect to index.
if not auth.user:
redirect('/')
return dict()
def get_user():
return {
'result': {
'user': auth.user,
}
}
def login():
result = Auth(db=db).login_bare(request.vars.email, request.vars.password)
return locals()
def signup():
result = Auth(db=db).register_bare(**request.vars)
return locals()
def logout():
result = Auth(db=db).logout(next=None)
return locals()
def query():
rows = db(db.address.email == request.vars.email).select(db.address.ALL, orderby=~db.address.create_time)
addresses = []
for row in rows:
info_rows = db(db.address_info.id == row.address_info_id).select()
for info in info_rows:
if row.available:
address = {
'first_name': get_mask(info.first_name),
'last_name': get_mask(info.last_name),
'company': get_mask(info.company),
'area': get_mask(info.area),
'phone': info.phone[-4:].rjust(len(info.phone), "*"),
'street': get_mask(info.street),
'apt': get_mask(info.apt),
'city': info.city,
'state': info.address_state,
'zip': info.zip,
'id': info.id
}
addresses.append(address)
return {
'result': {
'addresses': addresses,
}
}
def get_mask(s):
l = len(s)
if l == 0:
return ''
return s[0] + '*' * (l - 1)
def get_info():
users = len(db().select(db.auth_user.id)) + 123
addresses = len(db().select(db.address_info.id)) + 321
return locals()
@auth.requires_signature()
def get_api():
get_list_api = URL('default', 'get_addresses', user_signature=True)
create = URL('default', 'create_address', user_signature=True)
edit = URL('default', 'edit_address', user_signature=True)
delete = URL('default', 'delete_address', user_signature=True)
return locals()
@auth.requires_signature()
def get_addresses():
rows = db(db.address.user_id == auth.user.id).select(db.address.ALL, orderby=~db.address.create_time)
addresses = []
for row in rows:
info_rows = db(db.address_info.id == row.address_info_id).select()
for info in info_rows:
address = {
'show': True,
'id': row.id,
'available': row.available,
'first_name': info.first_name,
'last_name': info.last_name,
'company': info.company,
'area': info.area,
'phone': info.phone,
'street': info.street,
'apt': info.apt,
'city': info.city,
'state': info.address_state,
'zip': info.zip
}
addresses.append(address)
return {
'result': {
'addresses': addresses,
}
}
@auth.requires_signature()
def create_address():
address_info = {
'first_name': request.vars.first_name,
'last_name': request.vars.last_name,
'company': request.vars.company,
'area': request.vars.area,
'phone': request.vars.phone,
'street': request.vars.street,
'apt': request.vars.apt,
'city': request.vars.city,
'address_state': request.vars.state,
'zip': request.vars.zip,
}
address_info_id = db.address_info.insert(**address_info)
address = {
'user_id': auth.user.id,
'email': auth.user.email,
'address_info_id': address_info_id,
}
address_id = db.address.insert(**address)
return {
'result': {
'id': address_id,
}
}
@auth.requires_signature()
def edit_address():
db(db.address.id == request.vars.id).update(available=request.vars.available)
rows = db(db.address.id == request.vars.id).select()
for row in rows:
address_info = {
'first_name': request.vars.first_name,
'last_name': request.vars.last_name,
'company': request.vars.company,
'area': request.vars.area,
'phone': request.vars.phone,
'street': request.vars.street,
'apt': request.vars.apt,
'city': request.vars.city,
'address_state': request.vars.state,
'zip': request.vars.zip,
}
db(db.address_info.id == row.address_info_id).update(**address_info)
return {
'result': {
'state': True,
}
}
@auth.requires_signature()
def delete_address():
rows = db(db.address.id == request.vars.id).select()
for row in rows:
db(db.address_info.id == row.address_info_id).delete()
db(db.address.id == request.vars.id).delete()
return {
'result': {
'state': True,
},
}
def merchant_address_api():
result = {}
if request.vars.token is not None and request.vars.token == "ZHANGXUBAIHANTIANYUE":
rows = db(db.address_info.id == request.vars.address_id).select()
if len(rows) > 0:
result['address'] = rows.first()
else:
result['address'] = 'None'
return dict(result=result)
def user():
"""
exposes:
http://..../[app]/default/user/login
http://..../[app]/default/user/logout
http://..../[app]/default/user/register
http://..../[app]/default/user/profile
http://..../[app]/default/user/retrieve_password
http://..../[app]/default/user/change_password
http://..../[app]/default/user/manage_users (requires membership in
http://..../[app]/default/user/bulk_register
use @auth.requires_login()
@auth.requires_membership('group name')
@auth.requires_permission('read','table name',record_id)
to decorate functions that need access control
"""
return dict(form=auth())
@cache.action()
def download():
"""
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
"""
return response.download(request, db)
def call():
"""
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides wrapper for TensorFlow modules."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import types
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import tf_stack
from tensorflow.tools.compatibility import all_renames_v2
_PER_MODULE_WARNING_LIMIT = 1
def get_rename_v2(name):
if name not in all_renames_v2.symbol_renames:
return None
return all_renames_v2.symbol_renames[name]
def _call_location():
# We want to get stack frame 3 frames up from current frame,
# i.e. above __getattr__, _tfmw_add_deprecation_warning,
# and _call_location calls.
stack = tf_stack.extract_stack(limit=4)
if not stack: # should never happen as we're in a function
return 'UNKNOWN'
frame = stack[0]
return '{}:{}'.format(frame.filename, frame.lineno)
def contains_deprecation_decorator(decorators):
return any(
d.decorator_name == 'deprecated' for d in decorators)
def has_deprecation_decorator(symbol):
"""Checks if given object has a deprecation decorator.
We check if deprecation decorator is in decorators as well as
whether symbol is a class whose __init__ method has a deprecation
decorator.
Args:
symbol: Python object.
Returns:
True if symbol has deprecation decorator.
"""
decorators, symbol = tf_decorator.unwrap(symbol)
if contains_deprecation_decorator(decorators):
return True
if tf_inspect.isfunction(symbol):
return False
if not tf_inspect.isclass(symbol):
return False
if not hasattr(symbol, '__init__'):
return False
init_decorators, _ = tf_decorator.unwrap(symbol.__init__)
return contains_deprecation_decorator(init_decorators)
class TFModuleWrapper(types.ModuleType):
"""Wrapper for TF modules to support deprecation messages and lazyloading."""
def __init__( # pylint: disable=super-on-old-class
self,
wrapped,
module_name,
public_apis=None,
deprecation=True,
has_lite=False): # pylint: enable=super-on-old-class
super(TFModuleWrapper, self).__init__(wrapped.__name__)
# A cache for all members which do not print deprecations (any more).
self._tfmw_attr_map = {}
self.__dict__.update(wrapped.__dict__)
# Prefix all local attributes with _tfmw_ so that we can
# handle them differently in attribute access methods.
self._tfmw_wrapped_module = wrapped
self._tfmw_module_name = module_name
self._tfmw_public_apis = public_apis
self._tfmw_print_deprecation_warnings = deprecation
self._tfmw_has_lite = has_lite
# Set __all__ so that import * work for lazy loaded modules
if self._tfmw_public_apis:
self._tfmw_wrapped_module.__all__ = list(self._tfmw_public_apis.keys())
self.__all__ = list(self._tfmw_public_apis.keys())
else:
if hasattr(self._tfmw_wrapped_module, '__all__'):
self.__all__ = self._tfmw_wrapped_module.__all__
else:
self._tfmw_wrapped_module.__all__ = [
attr for attr in dir(self._tfmw_wrapped_module)
if not attr.startswith('_')
]
self.__all__ = self._tfmw_wrapped_module.__all__
# names we already checked for deprecation
self._tfmw_deprecated_checked = set()
self._tfmw_warning_count = 0
def _tfmw_add_deprecation_warning(self, name, attr):
"""Print deprecation warning for attr with given name if necessary."""
if (self._tfmw_warning_count < _PER_MODULE_WARNING_LIMIT and
name not in self._tfmw_deprecated_checked):
self._tfmw_deprecated_checked.add(name)
if self._tfmw_module_name:
full_name = 'tf.%s.%s' % (self._tfmw_module_name, name)
else:
full_name = 'tf.%s' % name
rename = get_rename_v2(full_name)
if rename and not has_deprecation_decorator(attr):
call_location = _call_location()
# skip locations in Python source
if not call_location.startswith('<'):
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), full_name, rename)
self._tfmw_warning_count += 1
return True
return False
def _tfmw_import_module(self, name):
symbol_loc_info = self._tfmw_public_apis[name]
if symbol_loc_info[0]:
module = importlib.import_module(symbol_loc_info[0])
attr = getattr(module, symbol_loc_info[1])
else:
attr = importlib.import_module(symbol_loc_info[1])
setattr(self._tfmw_wrapped_module, name, attr)
self.__dict__[name] = attr
return attr
def __getattribute__(self, name): # pylint: disable=super-on-old-class
# Handle edge case where we unpickle and the object is not initialized yet
# and does not have _tfmw_attr_map attribute. Otherwise, calling
# __getattribute__ on __setstate__ will result in infinite recursion where
# we keep trying to get _tfmw_wrapped_module in __getattr__.
try:
attr_map = object.__getattribute__(self, '_tfmw_attr_map')
except AttributeError:
self._tfmw_attr_map = attr_map = {}
try:
# Use cached attrs if available
return attr_map[name]
except KeyError:
# Make sure we do not import from tensorflow/lite/__init__.py
if name == 'lite':
if self._tfmw_has_lite:
attr = self._tfmw_import_module(name)
setattr(self._tfmw_wrapped_module, 'lite', attr)
attr_map[name] = attr
return attr
# Placeholder for Google-internal contrib error
attr = super(TFModuleWrapper, self).__getattribute__(name)
# Return and cache dunders and our own members.
if name.startswith('__') or name.startswith('_tfmw_'):
attr_map[name] = attr
return attr
# Print deprecations, only cache functions after deprecation warnings have
# stopped.
if not (self._tfmw_print_deprecation_warnings and
self._tfmw_add_deprecation_warning(name, attr)):
attr_map[name] = attr
return attr
def __getattr__(self, name):
try:
attr = getattr(self._tfmw_wrapped_module, name)
except AttributeError:
# Placeholder for Google-internal contrib error
if not self._tfmw_public_apis:
raise
if name not in self._tfmw_public_apis:
raise
attr = self._tfmw_import_module(name)
if self._tfmw_print_deprecation_warnings:
self._tfmw_add_deprecation_warning(name, attr)
return attr
def __setattr__(self, arg, val): # pylint: disable=super-on-old-class
if not arg.startswith('_tfmw_'):
setattr(self._tfmw_wrapped_module, arg, val)
self.__dict__[arg] = val
if arg not in self.__all__ and arg != '__all__':
self.__all__.append(arg)
if arg in self._tfmw_attr_map:
self._tfmw_attr_map[arg] = val
super(TFModuleWrapper, self).__setattr__(arg, val)
def __dir__(self):
if self._tfmw_public_apis:
return list(
set(self._tfmw_public_apis.keys()).union(
set([
attr for attr in dir(self._tfmw_wrapped_module)
if not attr.startswith('_')
])))
else:
return dir(self._tfmw_wrapped_module)
def __delattr__(self, name): # pylint: disable=super-on-old-class
if name.startswith('_tfmw_'):
super(TFModuleWrapper, self).__delattr__(name)
else:
delattr(self._tfmw_wrapped_module, name)
def __repr__(self):
return self._tfmw_wrapped_module.__repr__()
def __reduce__(self):
return importlib.import_module, (self.__name__,)
|
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Recursion into other modules.
"""
from logging import debug, warning
from nuitka import ModuleRegistry, Options
from nuitka.freezer.BytecodeModuleFreezer import isFrozenModule
from nuitka.importing import ImportCache, Importing, StandardLibrary
from nuitka.plugins.PluginBase import Plugins
from nuitka.tree.SourceReading import readSourceCodeFromFilename
from nuitka.utils import Utils
def recurseTo(module_package, module_filename, module_relpath, module_kind,
reason):
from nuitka.tree import Building
if not ImportCache.isImportedModuleByPath(module_relpath):
module, source_ref, source_filename = Building.decideModuleTree(
filename = module_filename,
package = module_package,
is_top = False,
is_main = False,
is_shlib = module_kind == "shlib"
)
# Check if the module name is known. In order to avoid duplicates,
# learn the new filename, and continue build if its not.
if not ImportCache.isImportedModuleByName(module.getFullName()):
debug(
"Recurse to import '%s' from %s. (%s)",
module.getFullName(),
module_relpath,
reason
)
if module_kind == "py" and source_filename is not None:
try:
Building.createModuleTree(
module = module,
source_ref = source_ref,
source_code = readSourceCodeFromFilename(
module_name = module.getFullName(),
source_filename = source_filename
),
is_main = False
)
except (SyntaxError, IndentationError) as e:
if module_filename not in Importing.warned_about:
Importing.warned_about.add(module_filename)
warning(
"""\
Cannot recurse to import module '%s' (%s) because of '%s'""",
module_relpath,
module_filename,
e.__class__.__name__
)
return None, False
except Building.CodeTooComplexCode:
warning(
"""\
Cannot recurse to import module '%s' (%s) because code is too complex.""",
module_relpath,
module_filename,
)
ImportCache.addImportedModule(
module_relpath,
module
)
is_added = True
else:
ImportCache.addImportedModule(
module_relpath,
ImportCache.getImportedModuleByName(module.getFullName())
)
module = ImportCache.getImportedModuleByName(
module.getFullName()
)
is_added = False
assert not module_relpath.endswith("/__init__.py"), module
return module, is_added
else:
return ImportCache.getImportedModuleByPath(module_relpath), False
def decideRecursion(module_filename, module_name, module_package,
module_kind):
# Many branches, which make decisions immediately, by returning
# pylint: disable=R0911,R0912
Plugins.onModuleEncounter(
module_filename,
module_name,
module_package,
module_kind
)
if module_kind == "shlib":
if Options.isStandaloneMode():
return True, "Shared library for inclusion."
else:
return False, "Shared library cannot be inspected."
if module_package is None:
full_name = module_name
else:
full_name = module_package + '.' + module_name
if isFrozenModule(full_name, module_filename):
return False, "Module is frozen."
no_case_modules = Options.getShallFollowInNoCase()
for no_case_module in no_case_modules:
if full_name == no_case_module:
return (
False,
"Module listed explicitly to not recurse to."
)
if full_name.startswith(no_case_module + '.'):
return (
False,
"Module in package listed explicitly to not recurse to."
)
any_case_modules = Options.getShallFollowModules()
for any_case_module in any_case_modules:
if full_name == any_case_module:
return (
True,
"Module listed explicitly to recurse to."
)
if full_name.startswith(any_case_module + '.'):
return (
True,
"Module in package listed explicitly to recurse to."
)
if Options.shallFollowNoImports():
return (
False,
"Requested to not recurse at all."
)
if StandardLibrary.isStandardLibraryPath(module_filename):
return (
Options.shallFollowStandardLibrary(),
"Requested to %srecurse to standard library." % (
"" if Options.shallFollowStandardLibrary() else "not "
)
)
if Options.shallFollowAllImports():
return (
True,
"Requested to recurse to all non-standard library modules."
)
# Means, we were not given instructions how to handle things.
return (
None,
"Default behavior, not recursing without request."
)
def considerFilename(module_filename, module_package):
assert module_package is None or \
( type(module_package) is str and module_package != "" )
module_filename = Utils.normpath(module_filename)
if Utils.isDir(module_filename):
module_filename = Utils.abspath(module_filename)
module_name = Utils.basename(module_filename)
module_relpath = Utils.relpath(module_filename)
return module_filename, module_relpath, module_name
elif module_filename.endswith(".py"):
module_name = Utils.basename(module_filename)[:-3]
module_relpath = Utils.relpath(module_filename)
return module_filename, module_relpath, module_name
else:
return None
def isSameModulePath(path1, path2):
if Utils.basename(path1) == "__init__.py":
path1 = Utils.dirname(path1)
if Utils.basename(path2) == "__init__.py":
path2 = Utils.dirname(path2)
return Utils.abspath(path1) == Utils.abspath(path2)
def _checkPluginPath(plugin_filename, module_package):
# Many branches, for the decision is very complex, pylint: disable=R0912
debug(
"Checking detail plug-in path '%s' '%s':",
plugin_filename,
module_package
)
plugin_info = considerFilename(
module_package = module_package,
module_filename = plugin_filename
)
if plugin_info is not None:
module, is_added = recurseTo(
module_filename = plugin_info[0],
module_relpath = plugin_info[1],
module_package = module_package,
module_kind = "py",
reason = "Lives in plug-in directory."
)
if module:
if not is_added:
warning(
"Recursed to %s '%s' at '%s' twice.",
"package" if module.isPythonPackage() else "module",
module.getName(),
plugin_info[0]
)
if not isSameModulePath(module.getFilename(), plugin_info[0]):
warning(
"Duplicate ignored '%s'.",
plugin_info[1]
)
return
debug(
"Recursed to %s %s %s",
module.getName(),
module.getPackage(),
module
)
if module.isPythonPackage():
package_filename = module.getFilename()
if Utils.isDir(package_filename):
# Must be a namespace package.
assert Utils.python_version >= 330
package_dir = package_filename
# Only include it, if it contains actual modules, which will
# recurse to this one and find it again.
useful = False
else:
package_dir = Utils.dirname(package_filename)
# Real packages will always be included.
useful = True
debug(
"Package directory %s",
package_dir
)
for sub_path, sub_filename in Utils.listDir(package_dir):
if sub_filename in ("__init__.py", "__pycache__"):
continue
assert sub_path != plugin_filename
if Importing.isPackageDir(sub_path) or \
sub_path.endswith(".py"):
_checkPluginPath(sub_path, module.getFullName())
else:
# Modules should always be included.
useful = True
if useful:
ModuleRegistry.addRootModule(module)
else:
warning("Failed to include module from '%s'.", plugin_info[0])
def checkPluginPath(plugin_filename, module_package):
debug(
"Checking top level plug-in path %s %s",
plugin_filename,
module_package
)
plugin_info = considerFilename(
module_package = module_package,
module_filename = plugin_filename
)
if plugin_info is not None:
# File or package makes a difference, handle that
if Utils.isFile(plugin_info[0]) or \
Importing.isPackageDir(plugin_info[0]):
_checkPluginPath(plugin_filename, module_package)
elif Utils.isDir(plugin_info[0]):
for sub_path, sub_filename in Utils.listDir(plugin_info[0]):
assert sub_filename != "__init__.py"
if Importing.isPackageDir(sub_path) or \
sub_path.endswith(".py"):
_checkPluginPath(sub_path, None)
else:
warning("Failed to include module from '%s'.", plugin_info[0])
else:
warning("Failed to recurse to directory '%s'.", plugin_filename)
def checkPluginFilenamePattern(pattern):
import sys, glob
debug(
"Checking plug-in pattern '%s':",
pattern,
)
if Utils.isDir(pattern):
sys.exit("Error, pattern cannot be a directory name.")
found = False
for filename in glob.iglob(pattern):
if filename.endswith(".pyc"):
continue
if not Utils.isFile(filename):
continue
found = True
_checkPluginPath(filename, None)
if not found:
warning("Didn't match any files against pattern '%s'." % pattern)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for Web Development Style Guide checker."""
import os
import re
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([
os.path.normpath(os.path.join(test_dir, '..', '..', '..', 'tools')),
os.path.join(test_dir),
])
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
from web_dev_style import css_checker, js_checker # pylint: disable=F0401
class JsStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.re = re
output_api = self.mox.CreateMockAnything()
self.checker = js_checker.JSChecker(input_api, output_api)
def GetHighlight(self, line, error):
"""Returns the substring of |line| that is highlighted in |error|."""
error_lines = error.split('\n')
highlight = error_lines[error_lines.index(line) + 1]
return ''.join(ch1 for (ch1, ch2) in zip(line, highlight) if ch2 == '^')
def ShouldFailConstCheck(self, line):
"""Checks that the 'const' checker flags |line| as a style error."""
error = self.checker.ConstCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'const')
def ShouldPassConstCheck(self, line):
"""Checks that the 'const' checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.ConstCheck(1, line),
'Should not be flagged as style error: ' + line)
def testConstFails(self):
lines = [
"const foo = 'bar';",
" const bar = 'foo';",
# Trying to use |const| as a variable name
"var const = 0;",
"var x = 5; const y = 6;",
"for (var i=0, const e=10; i<e; i++) {",
"for (const x=0; x<foo; i++) {",
"while (const x = 7) {",
]
for line in lines:
self.ShouldFailConstCheck(line)
def testConstPasses(self):
lines = [
# sanity check
"var foo = 'bar'",
# @const JsDoc tag
"/** @const */ var SEVEN = 7;",
# @const tag in multi-line comment
" * @const",
" * @const",
# @constructor tag in multi-line comment
" * @constructor",
" * @constructor",
# words containing 'const'
"if (foo.constructor) {",
"var deconstruction = 'something';",
"var madeUpWordconst = 10;",
# Strings containing the word |const|
"var str = 'const at the beginning';",
"var str = 'At the end: const';",
# doing this one with regex is probably not practical
#"var str = 'a const in the middle';",
]
for line in lines:
self.ShouldPassConstCheck(line)
def ShouldFailChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker flags |line| as a style error."""
error = self.checker.ChromeSendCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), ', []')
def ShouldPassChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.ChromeSendCheck(1, line),
'Should not be flagged as style error: ' + line)
def testChromeSendFails(self):
lines = [
"chrome.send('message', []);",
" chrome.send('message', []);",
]
for line in lines:
self.ShouldFailChromeSendCheck(line)
def testChromeSendPasses(self):
lines = [
"chrome.send('message', constructArgs('foo', []));",
" chrome.send('message', constructArgs('foo', []));",
"chrome.send('message', constructArgs([]));",
" chrome.send('message', constructArgs([]));",
]
for line in lines:
self.ShouldPassChromeSendCheck(line)
def ShouldFailGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker flags |line| as a style
error.
"""
error = self.checker.GetElementByIdCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'document.getElementById')
def ShouldPassGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.GetElementByIdCheck(1, line),
'Should not be flagged as style error: ' + line)
def testGetElementByIdFails(self):
lines = [
"document.getElementById('foo');",
" document.getElementById('foo');",
"var x = document.getElementById('foo');",
"if (document.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldFailGetElementByIdCheck(line)
def testGetElementByIdPasses(self):
lines = [
"elem.ownerDocument.getElementById('foo');",
" elem.ownerDocument.getElementById('foo');",
"var x = elem.ownerDocument.getElementById('foo');",
"if (elem.ownerDocument.getElementById('foo').hidden) {",
"doc.getElementById('foo');",
" doc.getElementById('foo');",
"cr.doc.getElementById('foo');",
" cr.doc.getElementById('foo');",
"var x = doc.getElementById('foo');",
"if (doc.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldPassGetElementByIdCheck(line)
class CssStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.fake_file_name = 'fake.css'
self.fake_file = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.fake_file, 'LocalPath')
self.fake_file.LocalPath().AndReturn(self.fake_file_name)
# Actual calls to NewContents() are defined in each test.
self.mox.StubOutWithMock(self.fake_file, 'NewContents')
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.mox.StubOutWithMock(self.input_api, 'AffectedSourceFiles')
self.input_api.AffectedFiles(
include_deletes=False, file_filter=None).AndReturn([self.fake_file])
# Actual creations of PresubmitPromptWarning are defined in each test.
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitPromptWarning',
use_mock_anything=True)
author_msg = ('Was the CSS checker useful? '
'Send feedback or hate mail to dbeam@chromium.org.')
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitNotifyResult',
use_mock_anything=True)
self.output_api.PresubmitNotifyResult(author_msg).AndReturn(None)
def VerifyContentsProducesOutput(self, contents, output):
self.fake_file.NewContents().AndReturn(contents.splitlines())
self.output_api.PresubmitPromptWarning(
self.fake_file_name + ':\n' + output.strip()).AndReturn(None)
self.mox.ReplayAll()
css_checker.CSSChecker(self.input_api, self.output_api).RunChecks()
def testCssAlphaWithAtBlock(self):
self.VerifyContentsProducesOutput("""
<include src="../shared/css/cr/ui/overlay.css">
<include src="chrome://resources/totally-cool.css" />
/* A hopefully safely ignored comment and @media statement. /**/
@media print {
div {
display: block;
color: red;
}
}
.rule {
z-index: 5;
<if expr="not is macosx">
background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */
background-color: rgb(235, 239, 249);
</if>
<if expr="is_macosx">
background-color: white;
background-image: url(chrome://resources/BLAH2);
</if>
color: black;
}
<if expr="is_macosx">
.language-options-right {
visibility: hidden;
opacity: 1; /* TODO(dbeam): Fix this. */
}
</if>""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
display: block;
color: red;
z-index: 5;
color: black;""")
def testCssAlphaWithNonStandard(self):
self.VerifyContentsProducesOutput("""
div {
/* A hopefully safely ignored comment and @media statement. /**/
color: red;
-webkit-margin-start: 5px;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
color: red;
-webkit-margin-start: 5px;""")
def testCssAlphaWithLongerDashedProps(self):
self.VerifyContentsProducesOutput("""
div {
border-left: 5px; /* A hopefully removed comment. */
border: 5px solid red;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
border-left: 5px;
border: 5px solid red;""")
def testCssBracesHaveSpaceBeforeAndNothingAfter(self):
self.VerifyContentsProducesOutput("""
/* Hello! */div/* Comment here*/{
display: block;
}
blah /* hey! */
{
rule: value;
}
.this.is { /* allowed */
rule: value;
}""", """
- Start braces ({) end a selector, have a space before them and no rules after.
div{
{""")
def testCssClassesUseDashes(self):
self.VerifyContentsProducesOutput("""
.className,
.ClassName,
.class-name /* We should not catch this. */,
.class_name {
display: block;
}""", """
- Classes use .dash-form.
.className,
.ClassName,
.class_name {""")
def testCssCloseBraceOnNewLine(self):
self.VerifyContentsProducesOutput("""
@media { /* TODO(dbeam) Fix this case. */
.rule {
display: block;
}}
@-webkit-keyframe blah {
100% { height: -500px 0; }
}
#rule {
rule: value; }""", """
- Always put a rule closing brace (}) on a new line.
rule: value; }""")
def testCssColonsHaveSpaceAfter(self):
self.VerifyContentsProducesOutput("""
div:not(.class):not([attr=5]), /* We should not catch this. */
div:not(.class):not([attr]) /* Nor this. */ {
background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */
background: -webkit-linear-gradient(left, red,
80% blah blee blar);
color: red;
display:block;
}""", """
- Colons (:) should have a space after them.
display:block;
- Don't use data URIs in source files. Use grit instead.
background: url(data:image/jpeg,asdfasdfsadf);""")
def testCssFavorSingleQuotes(self):
self.VerifyContentsProducesOutput("""
html[dir="rtl"] body,
html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ {
background: url("chrome://resources/BLAH");
font-family: "Open Sans";
<if expr="is_macosx">
blah: blee;
</if>
}""", """
- Use single quotes (') instead of double quotes (") in strings.
html[dir="rtl"] body,
background: url("chrome://resources/BLAH");
font-family: "Open Sans";""")
def testCssHexCouldBeShorter(self):
self.VerifyContentsProducesOutput("""
#abc,
#abc-,
#abc-ghij,
#abcdef-,
#abcdef-ghij,
#aaaaaa,
#bbaacc {
background-color: #336699; /* Ignore short hex rule if not gray. */
color: #999999;
color: #666;
}""", """
- Use abbreviated hex (#rgb) when in form #rrggbb.
color: #999999; (replace with #999)
- Use rgb() over #hex when not a shade of gray (like #333).
background-color: #336699; (replace with rgb(51, 102, 153))""")
def testCssUseMillisecondsForSmallTimes(self):
self.VerifyContentsProducesOutput("""
.transition-0s /* This is gross but may happen. */ {
transform: one 0.2s;
transform: two .1s;
transform: tree 1s;
transform: four 300ms;
}""", """
- Use milliseconds for time measurements under 1 second.
transform: one 0.2s; (replace with 200ms)
transform: two .1s; (replace with 100ms)""")
def testCssNoDataUrisInSourceFiles(self):
self.VerifyContentsProducesOutput("""
img {
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');
}""", """
- Don't use data URIs in source files. Use grit instead.
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');""")
def testCssOneRulePerLine(self):
self.VerifyContentsProducesOutput("""
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type,
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~
input[type='checkbox']:not([hidden]),
div {
background: url(chrome://resources/BLAH);
rule: value; /* rule: value; */
rule: value; rule: value;
}""", """
- One rule per line (what not to do: color: red; margin: 0;).
rule: value; rule: value;""")
def testCssOneSelectorPerLine(self):
self.VerifyContentsProducesOutput("""
a,
div,a,
div,/* Hello! */ span,
#id.class([dir=rtl):not(.class):any(a, b, d) {
rule: value;
}
a,
div,a {
some-other: rule here;
}""", """
- One selector per line (what not to do: a, b {}).
div,a,
div, span,
div,a {""")
def testCssRgbIfNotGray(self):
self.VerifyContentsProducesOutput("""
#abc,
#aaa,
#aabbcc {
background: -webkit-linear-gradient(left, from(#abc), to(#def));
color: #bad;
color: #bada55;
}""", """
- Use rgb() over #hex when not a shade of gray (like #333).
background: -webkit-linear-gradient(left, from(#abc), to(#def)); """
"""(replace with rgb(170, 187, 204), rgb(221, 238, 255))
color: #bad; (replace with rgb(187, 170, 221))
color: #bada55; (replace with rgb(186, 218, 85))""")
def testCssZeroLengthTerms(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
100% {
width: 100px;
}
}
.media-button.play > .state0.active,
.media-button[state='0'] > .state0.normal /* blah */, /* blee */
.media-button[state='0']:not(.disabled):hover > .state0.hover {
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */
opacity: .0;
opacity: 0.0;
opacity: 0.;
}
@page {
border-width: 0mm;
height: 0cm;
width: 0in;
}""", """
- Make all zero length terms (i.e. 0px) 0 unless inside of hsl() or part of"""
""" @keyframe.
width: 0px;
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
opacity: .0;
opacity: 0.0;
opacity: 0.;
border-width: 0mm;
height: 0cm;
width: 0in;
""")
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2001 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that generates the build.ninja for ninja itself.
Projects that use ninja themselves should either write a similar script
or use a meta-build system that supports Ninja output."""
from __future__ import print_function
from optparse import OptionParser
import os
import pipes
import string
import subprocess
import sys
sourcedir = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(sourcedir, 'misc'))
import ninja_syntax
class Platform(object):
"""Represents a host/target platform and its specific build attributes."""
def __init__(self, platform):
self._platform = platform
if self._platform is not None:
return
self._platform = sys.platform
if self._platform.startswith('linux'):
self._platform = 'linux'
elif self._platform.startswith('freebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('gnukfreebsd'):
self._platform = 'freebsd'
elif self._platform.startswith('openbsd'):
self._platform = 'openbsd'
elif self._platform.startswith('solaris') or self._platform == 'sunos5':
self._platform = 'solaris'
elif self._platform.startswith('mingw'):
self._platform = 'mingw'
elif self._platform.startswith('win'):
self._platform = 'msvc'
elif self._platform.startswith('bitrig'):
self._platform = 'bitrig'
elif self._platform.startswith('netbsd'):
self._platform = 'netbsd'
elif self._platform.startswith('aix'):
self._platform = 'aix'
elif self._platform.startswith('dragonfly'):
self._platform = 'dragonfly'
@staticmethod
def known_platforms():
return ['linux', 'darwin', 'freebsd', 'openbsd', 'solaris', 'sunos5',
'mingw', 'msvc', 'gnukfreebsd', 'bitrig', 'netbsd', 'aix',
'dragonfly']
def platform(self):
return self._platform
def is_linux(self):
return self._platform == 'linux'
def is_mingw(self):
return self._platform == 'mingw'
def is_msvc(self):
return self._platform == 'msvc'
def msvc_needs_fs(self):
popen = subprocess.Popen(['cl', '/nologo', '/?'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
return b'/FS' in out
def is_windows(self):
return self.is_mingw() or self.is_msvc()
def is_solaris(self):
return self._platform == 'solaris'
def is_aix(self):
return self._platform == 'aix'
def uses_usr_local(self):
return self._platform in ('freebsd', 'openbsd', 'bitrig', 'dragonfly', 'netbsd')
def supports_ppoll(self):
return self._platform in ('freebsd', 'linux', 'openbsd', 'bitrig',
'dragonfly')
def supports_ninja_browse(self):
return (not self.is_windows()
and not self.is_solaris()
and not self.is_aix())
def can_rebuild_in_place(self):
return not (self.is_windows() or self.is_aix())
class Bootstrap:
"""API shim for ninja_syntax.Writer that instead runs the commands.
Used to bootstrap Ninja from scratch. In --bootstrap mode this
class is used to execute all the commands to build an executable.
It also proxies all calls to an underlying ninja_syntax.Writer, to
behave like non-bootstrap mode.
"""
def __init__(self, writer, verbose=False):
self.writer = writer
self.verbose = verbose
# Map of variable name => expanded variable value.
self.vars = {}
# Map of rule name => dict of rule attributes.
self.rules = {
'phony': {}
}
def comment(self, text):
return self.writer.comment(text)
def newline(self):
return self.writer.newline()
def variable(self, key, val):
# In bootstrap mode, we have no ninja process to catch /showIncludes
# output.
self.vars[key] = self._expand(val).replace('/showIncludes', '')
return self.writer.variable(key, val)
def rule(self, name, **kwargs):
self.rules[name] = kwargs
return self.writer.rule(name, **kwargs)
def build(self, outputs, rule, inputs=None, **kwargs):
ruleattr = self.rules[rule]
cmd = ruleattr.get('command')
if cmd is None: # A phony rule, for example.
return
# Implement just enough of Ninja variable expansion etc. to
# make the bootstrap build work.
local_vars = {
'in': self._expand_paths(inputs),
'out': self._expand_paths(outputs)
}
for key, val in kwargs.get('variables', []):
local_vars[key] = ' '.join(ninja_syntax.as_list(val))
self._run_command(self._expand(cmd, local_vars))
return self.writer.build(outputs, rule, inputs, **kwargs)
def default(self, paths):
return self.writer.default(paths)
def _expand_paths(self, paths):
"""Expand $vars in an array of paths, e.g. from a 'build' block."""
paths = ninja_syntax.as_list(paths)
return ' '.join(map(self._shell_escape, (map(self._expand, paths))))
def _expand(self, str, local_vars={}):
"""Expand $vars in a string."""
return ninja_syntax.expand(str, self.vars, local_vars)
def _shell_escape(self, path):
"""Quote paths containing spaces."""
return '"%s"' % path if ' ' in path else path
def _run_command(self, cmdline):
"""Run a subcommand, quietly. Prints the full command on error."""
try:
if self.verbose:
print(cmdline)
subprocess.check_call(cmdline, shell=True)
except subprocess.CalledProcessError:
print('when running: ', cmdline)
raise
parser = OptionParser()
profilers = ['gmon', 'pprof']
parser.add_option('--bootstrap', action='store_true',
help='bootstrap a ninja binary from nothing')
parser.add_option('--verbose', action='store_true',
help='enable verbose build')
parser.add_option('--platform',
help='target platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--host',
help='host platform (' +
'/'.join(Platform.known_platforms()) + ')',
choices=Platform.known_platforms())
parser.add_option('--debug', action='store_true',
help='enable debugging extras',)
parser.add_option('--profile', metavar='TYPE',
choices=profilers,
help='enable profiling (' + '/'.join(profilers) + ')',)
parser.add_option('--with-gtest', metavar='PATH', help='ignored')
parser.add_option('--with-python', metavar='EXE',
help='use EXE as the Python interpreter',
default=os.path.basename(sys.executable))
parser.add_option('--force-pselect', action='store_true',
help='ppoll() is used by default where available, '
'but some platforms may need to use pselect instead',)
(options, args) = parser.parse_args()
if args:
print('ERROR: extra unparsed command-line arguments:', args)
sys.exit(1)
platform = Platform(options.platform)
if options.host:
host = Platform(options.host)
else:
host = platform
BUILD_FILENAME = 'build.ninja'
ninja_writer = ninja_syntax.Writer(open(BUILD_FILENAME, 'w'))
n = ninja_writer
if options.bootstrap:
# Make the build directory.
try:
os.mkdir('build')
except OSError:
pass
# Wrap ninja_writer with the Bootstrapper, which also executes the
# commands.
print('bootstrapping ninja...')
n = Bootstrap(n, verbose=options.verbose)
n.comment('This file is used to build ninja itself.')
n.comment('It is generated by ' + os.path.basename(__file__) + '.')
n.newline()
n.variable('ninja_required_version', '1.3')
n.newline()
n.comment('The arguments passed to configure.py, for rerunning it.')
configure_args = sys.argv[1:]
if '--bootstrap' in configure_args:
configure_args.remove('--bootstrap')
n.variable('configure_args', ' '.join(configure_args))
env_keys = set(['CXX', 'AR', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS'])
configure_env = dict((k, os.environ[k]) for k in os.environ if k in env_keys)
if configure_env:
config_str = ' '.join([k + '=' + pipes.quote(configure_env[k])
for k in configure_env])
n.variable('configure_env', config_str + '$ ')
n.newline()
CXX = configure_env.get('CXX', 'g++')
objext = '.o'
if platform.is_msvc():
CXX = 'cl'
objext = '.obj'
def src(filename):
return os.path.join('$root', 'src', filename)
def built(filename):
return os.path.join('$builddir', filename)
def doc(filename):
return os.path.join('$root', 'doc', filename)
def cc(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.c'), **kwargs)
def cxx(name, **kwargs):
return n.build(built(name + objext), 'cxx', src(name + '.cc'), **kwargs)
def binary(name):
if platform.is_windows():
exe = name + '.exe'
n.build(name, 'phony', exe)
return exe
return name
root = sourcedir
if root == os.getcwd():
# In the common case where we're building directly in the source
# tree, simplify all the paths to just be cwd-relative.
root = '.'
n.variable('root', root)
n.variable('builddir', 'build')
n.variable('cxx', CXX)
if platform.is_msvc():
n.variable('ar', 'link')
else:
n.variable('ar', configure_env.get('AR', 'ar'))
if platform.is_msvc():
cflags = ['/showIncludes',
'/nologo', # Don't print startup banner.
'/Zi', # Create pdb with debug info.
'/W4', # Highest warning level.
'/WX', # Warnings as errors.
'/wd4530', '/wd4100', '/wd4706', '/wd4244',
'/wd4512', '/wd4800', '/wd4702', '/wd4819',
# Disable warnings about constant conditional expressions.
'/wd4127',
# Disable warnings about passing "this" during initialization.
'/wd4355',
# Disable warnings about ignored typedef in DbgHelp.h
'/wd4091',
'/GR-', # Disable RTTI.
# Disable size_t -> int truncation warning.
# We never have strings or arrays larger than 2**31.
'/wd4267',
'/DNOMINMAX', '/D_CRT_SECURE_NO_WARNINGS',
'/D_HAS_EXCEPTIONS=0',
'/DNINJA_PYTHON="%s"' % options.with_python]
if platform.msvc_needs_fs():
cflags.append('/FS')
ldflags = ['/DEBUG', '/libpath:$builddir']
if not options.debug:
cflags += ['/Ox', '/DNDEBUG', '/GL']
ldflags += ['/LTCG', '/OPT:REF', '/OPT:ICF']
else:
cflags = ['-g', '-Wall', '-Wextra',
'-Wno-deprecated',
'-Wno-missing-field-initializers',
'-Wno-unused-parameter',
'-fno-rtti',
'-fno-exceptions',
'-fvisibility=hidden', '-pipe',
'-DNINJA_PYTHON="%s"' % options.with_python]
if options.debug:
cflags += ['-D_GLIBCXX_DEBUG', '-D_GLIBCXX_DEBUG_PEDANTIC']
cflags.remove('-fno-rtti') # Needed for above pedanticness.
else:
cflags += ['-O2', '-DNDEBUG']
try:
proc = subprocess.Popen(
[CXX, '-fdiagnostics-color', '-c', '-x', 'c++', '/dev/null',
'-o', '/dev/null'],
stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)
if proc.wait() == 0:
cflags += ['-fdiagnostics-color']
except:
pass
if platform.is_mingw():
cflags += ['-D_WIN32_WINNT=0x0501']
ldflags = ['-L$builddir']
if platform.uses_usr_local():
cflags.append('-I/usr/local/include')
ldflags.append('-L/usr/local/lib')
if platform.is_aix():
# printf formats for int64_t, uint64_t; large file support
cflags.append('-D__STDC_FORMAT_MACROS')
cflags.append('-D_LARGE_FILES')
libs = []
if platform.is_mingw():
cflags.remove('-fvisibility=hidden');
ldflags.append('-static')
elif platform.is_solaris():
cflags.remove('-fvisibility=hidden')
elif platform.is_aix():
cflags.remove('-fvisibility=hidden')
elif platform.is_msvc():
pass
else:
if options.profile == 'gmon':
cflags.append('-pg')
ldflags.append('-pg')
elif options.profile == 'pprof':
cflags.append('-fno-omit-frame-pointer')
libs.extend(['-Wl,--no-as-needed', '-lprofiler'])
if platform.supports_ppoll() and not options.force_pselect:
cflags.append('-DUSE_PPOLL')
if platform.supports_ninja_browse():
cflags.append('-DNINJA_HAVE_BROWSE')
# Search for generated headers relative to build dir.
cflags.append('-I.')
def shell_escape(str):
"""Escape str such that it's interpreted as a single argument by
the shell."""
# This isn't complete, but it's just enough to make NINJA_PYTHON work.
if platform.is_windows():
return str
if '"' in str:
return "'%s'" % str.replace("'", "\\'")
return str
if 'CFLAGS' in configure_env:
cflags.append(configure_env['CFLAGS'])
ldflags.append(configure_env['CFLAGS'])
if 'CXXFLAGS' in configure_env:
cflags.append(configure_env['CXXFLAGS'])
ldflags.append(configure_env['CXXFLAGS'])
n.variable('cflags', ' '.join(shell_escape(flag) for flag in cflags))
if 'LDFLAGS' in configure_env:
ldflags.append(configure_env['LDFLAGS'])
n.variable('ldflags', ' '.join(shell_escape(flag) for flag in ldflags))
n.newline()
if platform.is_msvc():
n.rule('cxx',
command='$cxx $cflags -c $in /Fo$out /Fd' + built('$pdb'),
description='CXX $out',
deps='msvc' # /showIncludes is included in $cflags.
)
else:
n.rule('cxx',
command='$cxx -MMD -MT $out -MF $out.d $cflags -c $in -o $out',
depfile='$out.d',
deps='gcc',
description='CXX $out')
n.newline()
if host.is_msvc():
n.rule('ar',
command='lib /nologo /ltcg /out:$out $in',
description='LIB $out')
elif host.is_mingw():
n.rule('ar',
command='cmd /c $ar cqs $out.tmp $in && move /Y $out.tmp $out',
description='AR $out')
else:
n.rule('ar',
command='rm -f $out && $ar crs $out $in',
description='AR $out')
n.newline()
if platform.is_msvc():
n.rule('link',
command='$cxx $in $libs /nologo /link $ldflags /out:$out',
description='LINK $out')
else:
n.rule('link',
command='$cxx $ldflags -o $out $in $libs',
description='LINK $out')
n.newline()
objs = []
if platform.supports_ninja_browse():
n.comment('browse_py.h is used to inline browse.py.')
n.rule('inline',
command='"%s"' % src('inline.sh') + ' $varname < $in > $out',
description='INLINE $out')
n.build(built('browse_py.h'), 'inline', src('browse.py'),
implicit=src('inline.sh'),
variables=[('varname', 'kBrowsePy')])
n.newline()
objs += cxx('browse', order_only=built('browse_py.h'))
n.newline()
n.comment('the depfile parser and ninja lexers are generated using re2c.')
def has_re2c():
try:
proc = subprocess.Popen(['re2c', '-V'], stdout=subprocess.PIPE)
return int(proc.communicate()[0], 10) >= 1103
except OSError:
return False
if has_re2c():
n.rule('re2c',
command='re2c -b -i --no-generation-date -o $out $in',
description='RE2C $out')
# Generate the .cc files in the source directory so we can check them in.
n.build(src('depfile_parser.cc'), 're2c', src('depfile_parser.in.cc'))
n.build(src('lexer.cc'), 're2c', src('lexer.in.cc'))
else:
print("warning: A compatible version of re2c (>= 0.11.3) was not found; "
"changes to src/*.in.cc will not affect your build.")
n.newline()
n.comment('Core source files all build into ninja library.')
cxxvariables = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja.pdb')]
for name in ['build',
'build_log',
'clean',
'clparser',
'debug_flags',
'depfile_parser',
'deps_log',
'disk_interface',
'dyndep',
'dyndep_parser',
'edit_distance',
'eval_env',
'graph',
'graphviz',
'lexer',
'line_printer',
'manifest_parser',
'metrics',
'parser',
'state',
'string_piece_util',
'util',
'version']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['subprocess-win32',
'includes_normalize-win32',
'msvc_helper-win32',
'msvc_helper_main-win32']:
objs += cxx(name, variables=cxxvariables)
if platform.is_msvc():
objs += cxx('minidump-win32', variables=cxxvariables)
objs += cc('getopt')
else:
objs += cxx('subprocess-posix')
if platform.is_aix():
objs += cc('getopt')
if platform.is_msvc():
ninja_lib = n.build(built('ninja.lib'), 'ar', objs)
else:
ninja_lib = n.build(built('libninja.a'), 'ar', objs)
n.newline()
if platform.is_msvc():
libs.append('ninja.lib')
else:
libs.append('-lninja')
if platform.is_aix():
libs.append('-lperfstat')
all_targets = []
n.comment('Main executable is library plus main() function.')
objs = cxx('ninja', variables=cxxvariables)
ninja = n.build(binary('ninja'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja
if options.bootstrap:
# We've built the ninja binary. Don't run any more commands
# through the bootstrap executor, but continue writing the
# build.ninja file.
n = ninja_writer
n.comment('Tests all build into ninja_test executable.')
objs = []
if platform.is_msvc():
cxxvariables = [('pdb', 'ninja_test.pdb')]
for name in ['build_log_test',
'build_test',
'clean_test',
'clparser_test',
'depfile_parser_test',
'deps_log_test',
'dyndep_parser_test',
'disk_interface_test',
'edit_distance_test',
'graph_test',
'lexer_test',
'manifest_parser_test',
'ninja_test',
'state_test',
'string_piece_util_test',
'subprocess_test',
'test',
'util_test']:
objs += cxx(name, variables=cxxvariables)
if platform.is_windows():
for name in ['includes_normalize_test', 'msvc_helper_test']:
objs += cxx(name, variables=cxxvariables)
ninja_test = n.build(binary('ninja_test'), 'link', objs, implicit=ninja_lib,
variables=[('libs', libs)])
n.newline()
all_targets += ninja_test
n.comment('Ancillary executables.')
for name in ['build_log_perftest',
'canon_perftest',
'depfile_parser_perftest',
'hash_collision_bench',
'manifest_parser_perftest',
'clparser_perftest']:
if platform.is_msvc():
cxxvariables = [('pdb', name + '.pdb')]
objs = cxx(name, variables=cxxvariables)
all_targets += n.build(binary(name), 'link', objs,
implicit=ninja_lib, variables=[('libs', libs)])
n.newline()
n.comment('Generate a graph using the "graph" tool.')
n.rule('gendot',
command='./ninja -t graph all > $out')
n.rule('gengraph',
command='dot -Tpng $in > $out')
dot = n.build(built('graph.dot'), 'gendot', ['ninja', 'build.ninja'])
n.build('graph.png', 'gengraph', dot)
n.newline()
n.comment('Generate the manual using asciidoc.')
n.rule('asciidoc',
command='asciidoc -b docbook -d book -o $out $in',
description='ASCIIDOC $out')
n.rule('xsltproc',
command='xsltproc --nonet doc/docbook.xsl $in > $out',
description='XSLTPROC $out')
docbookxml = n.build(built('manual.xml'), 'asciidoc', doc('manual.asciidoc'))
manual = n.build(doc('manual.html'), 'xsltproc', docbookxml,
implicit=[doc('style.css'), doc('docbook.xsl')])
n.build('manual', 'phony',
order_only=manual)
n.newline()
n.rule('dblatex',
command='dblatex -q -o $out -p doc/dblatex.xsl $in',
description='DBLATEX $out')
n.build(doc('manual.pdf'), 'dblatex', docbookxml,
implicit=[doc('dblatex.xsl')])
n.comment('Generate Doxygen.')
n.rule('doxygen',
command='doxygen $in',
description='DOXYGEN $in')
n.variable('doxygen_mainpage_generator',
src('gen_doxygen_mainpage.sh'))
n.rule('doxygen_mainpage',
command='$doxygen_mainpage_generator $in > $out',
description='DOXYGEN_MAINPAGE $out')
mainpage = n.build(built('doxygen_mainpage'), 'doxygen_mainpage',
['README', 'COPYING'],
implicit=['$doxygen_mainpage_generator'])
n.build('doxygen', 'doxygen', doc('doxygen.config'),
implicit=mainpage)
n.newline()
if not host.is_mingw():
n.comment('Regenerate build files if build script changes.')
n.rule('configure',
command='${configure_env}%s $root/configure.py $configure_args' %
options.with_python,
generator=True)
n.build('build.ninja', 'configure',
implicit=['$root/configure.py',
os.path.normpath('$root/misc/ninja_syntax.py')])
n.newline()
n.default(ninja)
n.newline()
if host.is_linux():
n.comment('Packaging')
n.rule('rpmbuild',
command="misc/packaging/rpmbuild.sh",
description='Building rpms..')
n.build('rpm', 'rpmbuild')
n.newline()
n.build('all', 'phony', all_targets)
n.close()
print('wrote %s.' % BUILD_FILENAME)
if options.bootstrap:
print('bootstrap complete. rebuilding...')
rebuild_args = []
if platform.can_rebuild_in_place():
rebuild_args.append('./ninja')
else:
if platform.is_windows():
bootstrap_exe = 'ninja.bootstrap.exe'
final_exe = 'ninja.exe'
else:
bootstrap_exe = './ninja.bootstrap'
final_exe = './ninja'
if os.path.exists(bootstrap_exe):
os.unlink(bootstrap_exe)
os.rename(final_exe, bootstrap_exe)
rebuild_args.append(bootstrap_exe)
if options.verbose:
rebuild_args.append('-v')
subprocess.check_call(rebuild_args)
|
|
import json
import unittest
from hamcrest import assert_that, is_
from mock import patch
from tests.support.performanceplatform_client import fake_data_set_exists
from tests.support.test_helpers import is_bad_request, is_ok, \
is_error_response, has_status, is_not_found
from tests.support.test_helpers import is_unauthorized
from backdrop.write import api
class PostDataTestCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
@fake_data_set_exists("foo")
def test_needs_an_authorization_header_even_if_no_token_is_configured(self):
response = self.app.post(
'/foo',
data='[]',
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_needs_an_authorization_header(self):
response = self.app.post(
'/foo',
data='[]',
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_authorization_header_must_be_correct_format(self):
response = self.app.post(
'/foo',
data='[]',
headers=[('Authorization', 'Bearer')],
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="")
def test_authorization_header_must_not_be_empty(self):
response = self.app.post(
'/foo',
data='[]',
headers=[('Authorization', 'Bearer')],
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token=None)
def test_authorization_header_must_not_be_none(self):
response = self.app.post(
'/foo',
data='[]',
headers=[('Authorization', 'Bearer')],
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_authorization_header_must_match_server_side_value(self):
response = self.app.post(
'/foo',
data='[]',
headers=[('Authorization', 'Bearer not-foo-bearer-token')],
)
assert_that( response, is_unauthorized())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_request_must_be_json(self):
response = self.app.post(
'/foo',
data='foobar',
headers=[('Authorization', 'Bearer foo-bearer-token')],
)
assert_that( response, is_bad_request())
assert_that( response, is_error_response(json.dumps(["ValidationError('Expected header: Content-type: application/json',)"])))
@fake_data_set_exists("foo_data_set", bearer_token="foo_data_set-bearer-token")
@patch("backdrop.core.data_set.DataSet.store")
def test_empty_list_gets_accepted(self, store):
self.app.post(
'/foo_data_set',
data='[]',
content_type="application/json",
headers=[('Authorization', 'Bearer foo_data_set-bearer-token')],
)
store.assert_called_with(
[]
)
@fake_data_set_exists("foo_data_set", bearer_token="foo_data_set-bearer-token")
@patch("backdrop.core.data_set.DataSet.store")
def test_data_gets_stored(self, store):
self.app.post(
'/foo_data_set',
data='{"foo": "bar"}',
content_type="application/json",
headers=[('Authorization', 'Bearer foo_data_set-bearer-token')],
)
store.assert_called_with(
[{"foo": "bar"}]
)
@fake_data_set_exists("foo_data_set", bearer_token="foo_data_set-bearer-token")
@patch("backdrop.core.data_set.DataSet.create_if_not_exists")
@patch("backdrop.core.data_set.DataSet.store")
def test_data_set_is_created_on_write(self, store, create_if_not_exists):
self.app.post(
'/foo_data_set',
data='{"foo": "bar"}',
content_type="application/json",
headers=[("Authorization", "Bearer foo_data_set-bearer-token")],
)
create_if_not_exists.assert_called_once_with()
@fake_data_set_exists("foo_data_set", bearer_token="foo_data_set-bearer-token")
def test_data_with_empty_keys_400s(self):
response = self.app.post(
'/foo_data_set',
data = '{"": ""}',
content_type = "application/json",
headers=[('Authorization', 'Bearer foo_data_set-bearer-token')],
)
assert_that( response, is_bad_request())
assert_that( response, is_error_response())
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
@patch("backdrop.core.data_set.DataSet.store")
def test__id_gets_stored(self, mock_store):
mock_store.return_value = []
response = self.app.post(
'/foo',
data = '{"_id": "foo"}',
content_type = "application/json",
headers=[('Authorization', 'Bearer foo-bearer-token')],
)
assert_that(response, is_ok())
mock_store.assert_called_with(
[{"_id": "foo"}]
)
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_invalid__id_returns_400(self):
response = self.app.post(
'/foo',
data = '{"_id": "f o o"}',
content_type = "application/json",
headers=[('Authorization', 'Bearer foo-bearer-token')],
)
assert_that( response, is_bad_request())
assert_that( response, is_error_response())
@patch("backdrop.write.api.statsd")
@patch("backdrop.core.data_set.DataSet.store")
@fake_data_set_exists("foo", bearer_token="foo-bearer-token")
def test_exception_handling(self, store, statsd):
store.side_effect = RuntimeError("BOOM")
response = self.app.post(
"/foo",
data="{}",
content_type='application/json',
headers=[('Authorization', 'Bearer foo-bearer-token')]
)
assert_that(response, has_status(500))
assert_that(response, is_error_response())
statsd.incr.assert_called_with("write.error", data_set="foo")
class ApiHealthCheckTestCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
self.stored_data_set = None
self.stored_data = None
def test_api_exposes_a_healthcheck(self):
response = self.app.get("/_status")
assert_that(response, is_ok())
assert_that(response.headers["Content-Type"], is_("application/json"))
entity = json.loads(response.data)
assert_that(entity["status"], is_("ok"))
class UploadPageTestCase(unittest.TestCase):
def setUp(self):
self.app = api.app.test_client()
def test_invalid_data_set_name_returns_400(self):
response = self.app.get("/$invalid_data_set/upload")
assert_that(response, is_not_found())
|
|
################################################################################
# Copyright (C) 2011-2012,2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Module for the categorical distribution node.
"""
import numpy as np
from .node import ensureparents
from .expfamily import (ExponentialFamily,
useconstructor)
from .multinomial import (MultinomialMoments,
MultinomialDistribution,
Multinomial)
from .dirichlet import DirichletMoments
from bayespy.utils import random
from bayespy.utils import misc
class CategoricalMoments(MultinomialMoments):
"""
Class for the moments of categorical variables.
"""
def __init__(self, categories):
"""
Create moments object for categorical variables
"""
self.D = categories
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Check that x is valid
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.D):
raise ValueError("Invalid category index")
u0 = np.zeros((np.size(x), self.D))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.D,))
return [u0]
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The observations are scalar.
"""
return ( (self.D,), )
class CategoricalDistribution(MultinomialDistribution):
"""
Class for the VMP formulas of categorical variables.
"""
def __init__(self, categories):
"""
Create VMP formula node for a categorical variable
`categories` is the total number of categories.
"""
if not isinstance(categories, int):
raise ValueError("Number of categories must be integer")
if categories < 0:
raise ValueError("Number of categoriess must be non-negative")
self.D = categories
super().__init__(1)
def compute_message_to_parent(self, parent, index, u, u_p):
"""
Compute the message to a parent node.
"""
return super().compute_message_to_parent(parent, index, u, u_p)
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
return super().compute_phi_from_parents(u_p, mask=mask)
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
return super().compute_moments_and_cgf(phi, mask=mask)
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
return super().compute_cgf_from_parents(u_p)
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Check the validity of x
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Values must be integers")
if np.any(x < 0) or np.any(x >= self.D):
raise ValueError("Invalid category index")
# Form a binary matrix with only one non-zero (1) in the last axis
u0 = np.zeros((np.size(x), self.D))
u0[[np.arange(np.size(x)), np.ravel(x)]] = 1
u0 = np.reshape(u0, np.shape(x) + (self.D,))
u = [u0]
# f(x) is zero
f = 0
return (u, f)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
logp = phi[0]
logp -= np.amax(logp, axis=-1, keepdims=True)
p = np.exp(logp)
return random.categorical(p, size=plates)
class Categorical(ExponentialFamily):
r"""
Node for categorical random variables.
The node models a categorical random variable :math:`x \in \{0,\ldots,K-1\}`
with prior probabilities :math:`\{p_0, \ldots, p_{K-1}\}` for each category:
.. math::
p(x=k) = p_k \quad \text{for } k\in \{0,\ldots,K-1\}.
Parameters
----------
p : Dirichlet-like node or (...,K)-array
Probabilities for each category
See also
--------
Bernoulli, Multinomial, Dirichlet
"""
_parent_moments = [DirichletMoments()]
def __init__(self, p, **kwargs):
"""
Create Categorical node.
"""
super().__init__(p, **kwargs)
@classmethod
@ensureparents
def _constructor(cls, p, **kwargs):
"""
Constructs distribution and moments objects.
This method is called if useconstructor decorator is used for __init__.
Becase the distribution and moments object depend on the number of
categories, that is, they depend on the parent node, this method can be
used to construct those objects.
"""
# Get the number of categories
D = p.dims[0][0]
parents = [p]
moments = CategoricalMoments(D)
distribution = CategoricalDistribution(D)
return (parents,
kwargs,
( (D,), ),
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates)),
distribution,
moments,
cls._parent_moments)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
p = self.u[0]
return ("%s ~ Categorical(p)\n"
" p = \n"
"%s\n"
% (self.name, p))
|
|
# MIT License
#
# Copyright (c) 2017 Matt Boyer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pdb
import re
import stat
import struct
from . import constants
from . import _LOGGER
from .record import Record
from .pages import (
Page, OverflowPage, FreelistLeafPage, FreelistTrunkPage, BTreePage,
PtrmapPage
)
from .table import Table
from .tuples import (
SQLite_header, SQLite_ptrmap_info, SQLite_master_record, type_specs
)
signatures = {}
class SQLite_DB(object):
def __init__(self, path, heuristics_registry):
self._path = path
self._page_types = {}
self._header = self.parse_header()
self._registry = heuristics_registry
self._page_cache = None
# Actual page objects go here
self._pages = {}
self.build_page_cache()
self._ptrmap = {}
# TODO Do we need all of these?
self._table_roots = {}
self._page_tables = {}
self._tables = {}
self._table_columns = {}
self._freelist_leaves = []
self._freelist_btree_pages = []
@property
def ptrmap(self):
return self._ptrmap
@property
def header(self):
return self._header
@property
def pages(self):
return self._pages
@property
def tables(self):
return self._tables
@property
def freelist_leaves(self):
return self._freelist_leaves
@property
def table_columns(self):
return self._table_columns
def page_bytes(self, page_idx):
try:
return self._page_cache[page_idx]
except KeyError:
raise ValueError("No cache for page %d", page_idx)
def map_table_page(self, page_idx, table):
assert isinstance(page_idx, int)
assert isinstance(table, Table)
self._page_tables[page_idx] = table
def get_page_table(self, page_idx):
assert isinstance(page_idx, int)
try:
return self._page_tables[page_idx]
except KeyError:
return None
def __repr__(self):
return '<SQLite DB, page count: {} | page size: {}>'.format(
self.header.size_in_pages,
self.header.page_size
)
def parse_header(self):
header_bytes = None
file_size = None
with open(self._path, 'br') as sqlite:
header_bytes = sqlite.read(100)
file_size = os.fstat(sqlite.fileno())[stat.ST_SIZE]
if not header_bytes:
raise ValueError("Couldn't read SQLite header")
assert isinstance(header_bytes, bytes)
# This DB header is always big-endian
fields = SQLite_header(*struct.unpack(
r'>16sHBBBBBBIIIIIIIIIIII20xII',
header_bytes[:100]
))
assert fields.page_size in constants.VALID_PAGE_SIZES
db_size = fields.page_size * fields.size_in_pages
assert db_size <= file_size
assert (fields.page_size > 0) and \
(fields.file_change_counter == fields.version_valid)
if file_size < 1073741824:
_LOGGER.debug("No lock-byte page in this file!")
if fields.first_freelist_trunk > 0:
self._page_types[fields.first_freelist_trunk] = \
constants.FREELIST_TRUNK_PAGE
_LOGGER.debug(fields)
return fields
def build_page_cache(self):
# The SQLite docs use a numbering convention for pages where the
# first page (the one that has the header) is page 1, with the next
# ptrmap page being page 2, etc.
page_cache = [None, ]
with open(self._path, 'br') as sqlite:
for page_idx in range(self._header.size_in_pages):
page_offset = page_idx * self._header.page_size
sqlite.seek(page_offset, os.SEEK_SET)
page_cache.append(sqlite.read(self._header.page_size))
self._page_cache = page_cache
for page_idx in range(1, len(self._page_cache)):
# We want these to be temporary objects, to be replaced with
# more specialised objects as parsing progresses
self._pages[page_idx] = Page(page_idx, self)
def populate_freelist_pages(self):
if 0 == self._header.first_freelist_trunk:
_LOGGER.debug("This database has no freelist trunk page")
return
_LOGGER.info("Parsing freelist pages")
parsed_trunks = 0
parsed_leaves = 0
freelist_trunk_idx = self._header.first_freelist_trunk
while freelist_trunk_idx != 0:
_LOGGER.debug(
"Parsing freelist trunk page %d",
freelist_trunk_idx
)
trunk_bytes = bytes(self.pages[freelist_trunk_idx])
next_freelist_trunk_page_idx, num_leaf_pages = struct.unpack(
r'>II',
trunk_bytes[:8]
)
# Now that we know how long the array of freelist page pointers is,
# let's read it again
trunk_array = struct.unpack(
r'>{count}I'.format(count=2+num_leaf_pages),
trunk_bytes[:(4*(2+num_leaf_pages))]
)
# We're skipping the first entries as they are realy the next trunk
# index and the leaf count
# TODO Fix that
leaves_in_trunk = []
for page_idx in trunk_array[2:]:
# Let's prepare a specialised object for this freelist leaf
# page
leaf_page = FreelistLeafPage(
page_idx, self, freelist_trunk_idx
)
leaves_in_trunk.append(leaf_page)
self._freelist_leaves.append(page_idx)
self._pages[page_idx] = leaf_page
self._page_types[page_idx] = constants.FREELIST_LEAF_PAGE
trunk_page = FreelistTrunkPage(
freelist_trunk_idx,
self,
leaves_in_trunk
)
self._pages[freelist_trunk_idx] = trunk_page
# We've parsed this trunk page
parsed_trunks += 1
# ...And every leaf in it
parsed_leaves += num_leaf_pages
freelist_trunk_idx = next_freelist_trunk_page_idx
assert (parsed_trunks + parsed_leaves) == self._header.freelist_pages
_LOGGER.info(
"Freelist summary: %d trunk pages, %d leaf pages",
parsed_trunks,
parsed_leaves
)
def populate_overflow_pages(self):
# Knowledge of the overflow pages can come from the pointer map (easy),
# or the parsing of individual cells in table leaf pages (hard)
#
# For now, assume we already have a page type dict populated from the
# ptrmap
_LOGGER.info("Parsing overflow pages")
overflow_count = 0
for page_idx in sorted(self._page_types):
page_type = self._page_types[page_idx]
if page_type not in constants.OVERFLOW_PAGE_TYPES:
continue
overflow_page = OverflowPage(page_idx, self)
self.pages[page_idx] = overflow_page
overflow_count += 1
_LOGGER.info("Overflow summary: %d pages", overflow_count)
def populate_ptrmap_pages(self):
if self._header.largest_btree_page == 0:
# We don't have ptrmap pages in this DB. That sucks.
_LOGGER.warning("%r does not have ptrmap pages!", self)
for page_idx in range(1, self._header.size_in_pages):
self._page_types[page_idx] = constants.UNKNOWN_PAGE
return
_LOGGER.info("Parsing ptrmap pages")
ptrmap_page_idx = 2
usable_size = self._header.page_size - self._header.reserved_length
num_ptrmap_entries_in_page = usable_size // 5
ptrmap_page_indices = []
ptrmap_page_idx = 2
while ptrmap_page_idx <= self._header.size_in_pages:
page_bytes = self._page_cache[ptrmap_page_idx]
ptrmap_page_indices.append(ptrmap_page_idx)
self._page_types[ptrmap_page_idx] = constants.PTRMAP_PAGE
page_ptrmap_entries = {}
ptrmap_bytes = page_bytes[:5 * num_ptrmap_entries_in_page]
for entry_idx in range(num_ptrmap_entries_in_page):
ptr_page_idx = ptrmap_page_idx + entry_idx + 1
page_type, page_ptr = struct.unpack(
r'>BI',
ptrmap_bytes[5*entry_idx:5*(entry_idx+1)]
)
if page_type == 0:
break
ptrmap_entry = SQLite_ptrmap_info(
ptr_page_idx, page_type, page_ptr
)
assert ptrmap_entry.page_type in constants.PTRMAP_PAGE_TYPES
if page_type == constants.BTREE_ROOT_PAGE:
assert page_ptr == 0
self._page_types[ptr_page_idx] = page_type
elif page_type == constants.FREELIST_PAGE:
# Freelist pages are assumed to be known already
assert self._page_types[ptr_page_idx] in \
constants.FREELIST_PAGE_TYPES
assert page_ptr == 0
elif page_type == constants.FIRST_OFLOW_PAGE:
assert page_ptr != 0
self._page_types[ptr_page_idx] = page_type
elif page_type == constants.NON_FIRST_OFLOW_PAGE:
assert page_ptr != 0
self._page_types[ptr_page_idx] = page_type
elif page_type == constants.BTREE_NONROOT_PAGE:
assert page_ptr != 0
self._page_types[ptr_page_idx] = page_type
# _LOGGER.debug("%r", ptrmap_entry)
self._ptrmap[ptr_page_idx] = ptrmap_entry
page_ptrmap_entries[ptr_page_idx] = ptrmap_entry
page = PtrmapPage(ptrmap_page_idx, self, page_ptrmap_entries)
self._pages[ptrmap_page_idx] = page
_LOGGER.debug("%r", page)
ptrmap_page_idx += num_ptrmap_entries_in_page + 1
_LOGGER.info(
"Ptrmap summary: %d pages, %r",
len(ptrmap_page_indices), ptrmap_page_indices
)
def populate_btree_pages(self):
# TODO Should this use table information instead of scanning all pages?
page_idx = 1
while page_idx <= self._header.size_in_pages:
try:
if self._page_types[page_idx] in \
constants.NON_BTREE_PAGE_TYPES:
page_idx += 1
continue
except KeyError:
pass
try:
# We need to pass in the singleton registry instance
page_obj = BTreePage(page_idx, self, self._registry)
except ValueError:
# This page isn't a valid btree page. This can happen if we
# don't have a ptrmap to guide us
_LOGGER.warning(
"Page %d (%s) is not a btree page",
page_idx,
self._page_types[page_idx]
)
page_idx += 1
continue
page_obj.parse_cells()
self._page_types[page_idx] = page_obj.page_type
self._pages[page_idx] = page_obj
page_idx += 1
def _parse_master_leaf_page(self, page):
for cell_idx in page.cells:
_, master_record = page.cells[cell_idx]
assert isinstance(master_record, Record)
fields = [
master_record.fields[idx].value for idx in master_record.fields
]
master_record = SQLite_master_record(*fields)
if 'table' != master_record.type:
continue
self._table_roots[master_record.name] = \
self.pages[master_record.rootpage]
# This record describes a table in the schema, which means it
# includes a SQL statement that defines the table's columns
# We need to parse the field names out of that statement
assert master_record.sql.startswith('CREATE TABLE')
columns_re = re.compile(r'^CREATE TABLE (\S+) \((.*)\)$')
match = columns_re.match(master_record.sql)
if match:
assert match.group(1) == master_record.name
column_list = match.group(2)
csl_between_parens_re = re.compile(r'\([^)]+\)')
expunged = csl_between_parens_re.sub('', column_list)
cols = [
statement.strip() for statement in expunged.split(',')
]
cols = [
statement for statement in cols if not (
statement.startswith('PRIMARY') or
statement.startswith('UNIQUE')
)
]
columns = [col.split()[0] for col in cols]
signature = []
# Some column definitions lack a type
for col_def in cols:
def_tokens = col_def.split()
try:
col_type = def_tokens[1]
except IndexError:
signature.append(object)
continue
_LOGGER.debug(
"Column \"%s\" is defined as \"%s\"",
def_tokens[0], col_type
)
try:
signature.append(type_specs[col_type])
except KeyError:
_LOGGER.warning("No native type for \"%s\"", col_def)
signature.append(object)
_LOGGER.info(
"Signature for table \"%s\": %r",
master_record.name, signature
)
signatures[master_record.name] = signature
_LOGGER.info(
"Columns for table \"%s\": %r",
master_record.name, columns
)
self._table_columns[master_record.name] = columns
def map_tables(self):
first_page = self.pages[1]
assert isinstance(first_page, BTreePage)
master_table = Table('sqlite_master', self, first_page, signatures)
self._table_columns.update(constants.SQLITE_TABLE_COLUMNS)
for master_leaf in master_table.leaves:
self._parse_master_leaf_page(master_leaf)
assert all(
isinstance(root, BTreePage) for root in self._table_roots.values()
)
assert all(
root.parent is None for root in self._table_roots.values()
)
self.map_table_page(1, master_table)
self._table_roots['sqlite_master'] = self.pages[1]
for table_name, rootpage in self._table_roots.items():
try:
table_obj = Table(table_name, self, rootpage, signatures)
except Exception as ex: # pylint:disable=W0703
pdb.set_trace()
_LOGGER.warning(
"Caught %r while instantiating table object for \"%s\"",
ex, table_name
)
else:
self._tables[table_name] = table_obj
def reparent_orphaned_table_leaf_pages(self):
reparented_pages = []
for page in self.pages.values():
if not isinstance(page, BTreePage):
continue
if page.page_type != "Table Leaf":
continue
table = page.table
if not table:
parent = page
root_table = None
while parent:
root_table = parent.table
parent = parent.parent
if root_table is None:
self._freelist_btree_pages.append(page)
if root_table is None:
if not page.cells:
continue
first_record = page.cells[0][1]
matches = []
for table_name in signatures:
# All records within a given page are for the same
# table
if self.tables[table_name].check_signature(
first_record):
matches.append(self.tables[table_name])
if not matches:
_LOGGER.error(
"Couldn't find a matching table for %r",
page
)
continue
if len(matches) > 1:
_LOGGER.error(
"Multiple matching tables for %r: %r",
page, matches
)
continue
elif len(matches) == 1:
root_table = matches[0]
_LOGGER.debug(
"Reparenting %r to table \"%s\"",
page, root_table.name
)
root_table.add_leaf(page)
self.map_table_page(page.idx, root_table)
reparented_pages.append(page)
if reparented_pages:
_LOGGER.info(
"Reparented %d pages: %r",
len(reparented_pages), [p.idx for p in reparented_pages]
)
def grep(self, needle):
match_found = False
page_idx = 1
needle_re = re.compile(needle.encode('utf-8'))
while (page_idx <= self.header.size_in_pages):
page = self.pages[page_idx]
page_offsets = []
for match in needle_re.finditer(bytes(page)):
needle_offset = match.start()
page_offsets.append(needle_offset)
if page_offsets:
_LOGGER.info(
"Found search term in page %r @ offset(s) %s",
page, ', '.join(str(offset) for offset in page_offsets)
)
page_idx += 1
if not match_found:
_LOGGER.warning(
"Search term not found",
)
|
|
"""\
Helpers for build scripts under `scripts` directory (these are not build
rule templates for helping you write build rules).
"""
__all__ = [
'Builder',
'RuleIndex',
'find_default_path',
'find_input_path',
'get_build_image_rules',
'get_build_volume_rules',
'get_specify_app_rule',
'get_specify_image_rule',
'get_specify_pod_rule',
'with_builder_argument',
'with_foreman_argument',
]
from collections import namedtuple
from pathlib import Path
import json
from foreman import Label
from garage import apps
from garage import scripts
from garage.assertions import ASSERT
ROOT = Path(__file__).absolute().parent.parent.parent
scripts.ensure_directory(ROOT / '.git') # Sanity check
DEFAULT_FOREMAN = ROOT / 'shipyard' / 'scripts' / 'foreman.sh'
DEFAULT_BUILDER = ROOT / 'shipyard' / 'scripts' / 'builder'
with_foreman_argument = apps.with_decorators(
apps.with_argument(
'--foreman', metavar='PATH', type=Path, default=DEFAULT_FOREMAN,
help='provide path to the foreman script (default %(default)s)',
),
apps.with_argument(
'--foreman-arg', metavar='ARG', action='append',
help='add command-line argument to foreman script',
),
)
with_builder_argument = apps.with_decorators(
apps.with_argument(
'--builder', metavar='PATH', type=Path, default=DEFAULT_BUILDER,
help='provide path to the builder script (default %(default)s)',
),
apps.with_argument(
'--builder-arg', metavar='ARG', action='append',
help='add command-line argument to builder script',
),
)
with_argument_input = apps.with_argument(
'--input-root', metavar='PATH', type=Path, action='append',
help='add input root path',
)
class Builder:
def __init__(self, args):
self.builder = scripts.ensure_file(args.builder)
self.builder_args = args.builder_arg or ()
def build(self, label, extra_args=()):
cmd = [self.builder, 'build', str(label)]
cmd.extend(self.builder_args)
cmd.extend(extra_args)
scripts.execute(cmd)
class RuleIndex:
def __init__(self, args):
self.foreman = scripts.ensure_file(args.foreman)
self.foreman_args = args.foreman_arg or ()
self._build_data = None
def load_from_labels(self, labels):
"""Load build data from labels."""
cmd = [self.foreman, 'list']
cmd.extend(map(str, labels))
cmd.extend(self.foreman_args)
stdout = scripts.execute(cmd, capture_stdout=True).stdout
self._build_data = json.loads(stdout.decode('utf8'))
def get_parameter(self, label, *, implicit_path=None):
data = self._get_thing('parameters', label, implicit_path)
return Parameter(
label=Label.parse(data['label']),
default=data.get('default'),
)
def get_rule(self, label, *, implicit_path=None):
data = self._get_thing('rules', label, implicit_path)
return Rule(
label=Label.parse(data['label']),
annotations=data['annotations'],
all_dependencies=[
Dependency(
label=Label.parse(dep['label']),
)
for dep in data['all_dependencies']
],
)
def _get_thing(self, kind, label, implicit_path):
ASSERT.not_none(self._build_data)
if isinstance(label, str):
label = Label.parse(label, implicit_path=implicit_path)
label_str = str(label)
for thing in self._build_data['//%s' % label.path][kind]:
if thing['label'] == label_str:
return thing
raise KeyError(label)
def get_pod_name(self, rule_obj):
pod_names = set()
pod_parameter_label = rule_obj.annotations.get('pod-parameter')
if pod_parameter_label is not None:
pod_parameter = self.get_parameter(
pod_parameter_label,
implicit_path=rule_obj.label.path,
)
pod_names.add(pod_parameter.default['name'])
pod_name = rule_obj.annotations.get('pod-name')
if pod_name is not None:
pod_names.add(pod_name)
if len(pod_names) != 1:
raise AssertionError(
'expect exactly one pod name from annotation: %s' %
sorted(pod_names)
)
return Label.parse(pod_names.pop())
def get_volume_name(self, rule_obj):
volume_parameter_label = rule_obj.annotations.get('volume-parameter')
if volume_parameter_label is None:
raise AssertionError(
'expect volume name from annotation: {}'.format(rule_obj))
volume_parameter = self.get_parameter(
volume_parameter_label,
implicit_path=rule_obj.label.path,
)
return Label.parse_name(
rule_obj.label.path,
volume_parameter.default['name'],
)
def find_default_path(input_roots, kind, label):
for input_root in input_roots:
path = (
input_root / 'defaults' / kind / label.path /
('%s.yaml' % label.name)
)
if path.exists():
return path
return None
def find_input_path(input_roots, kind, label):
ASSERT.in_(kind, ('image-data', 'volume-data'))
for input_root in input_roots:
input_path = Path(kind) / label.path / label.name
if (input_root / input_path).exists():
return input_root, input_path
return None, None
Dependency = namedtuple('Dependency', [
'label',
])
Parameter = namedtuple('Parameter', [
'label',
'default',
])
Rule = namedtuple('Rule', [
'label',
'annotations',
'all_dependencies',
])
def get_build_image_rules(rules, build_pod_rule):
return _get_build_rules_for_kind(rules, build_pod_rule, 'image')
def get_build_volume_rules(rules, build_pod_rule):
return _get_build_rules_for_kind(rules, build_pod_rule, 'volume')
def _get_build_rules_for_kind(rules, build_pod_rule, kind):
_ensure_rule_type(build_pod_rule, 'build_pod')
specify_pod_rule = get_specify_pod_rule(rules, build_pod_rule)
annotation_name = 'build-%s-rule' % kind
return [
rules.get_rule(
dep_rule.annotations[annotation_name],
implicit_path=dep_rule.label.path,
)
for dep_rule in _iter_specify_rules(rules, specify_pod_rule, kind)
]
def get_specify_app_rule(rules, build_rule):
return _get_specify_rule(rules, build_rule, 'app')
def get_specify_image_rule(rules, build_rule):
return _get_specify_rule(rules, build_rule, 'image')
def get_specify_pod_rule(rules, build_rule):
return _get_specify_rule(rules, build_rule, 'pod')
def _get_specify_rule(rules, build_rule, kind):
for dep in build_rule.all_dependencies:
dep_rule = rules.get_rule(dep.label)
if dep_rule.annotations.get('rule-type') == 'specify_' + kind:
return dep_rule
raise ValueError('no specify_%s rule for %s' % (kind, build_rule))
def _iter_specify_rules(rules, build_rule, kind):
target_rule_type = 'specify_' + kind
for dep in build_rule.all_dependencies:
dep_rule = rules.get_rule(dep.label)
if dep_rule.annotations.get('rule-type') == target_rule_type:
yield dep_rule
def _ensure_rule_type(rule, rule_type):
if rule.annotations.get('rule-type') != rule_type:
raise ValueError('not a %s rule: %s' % (rule_type, rule))
|
|
import re
from dnload.common import is_verbose
from dnload.glsl_type import interpret_type
from dnload.glsl_type import interpret_pseudo_type
########################################
# GlslName #############################
########################################
class GlslName:
"""GLSL name identifier."""
def __init__(self, source):
"""Constructor."""
self.__name = source
self.__typeid = None
self.__rename = None
self.__access = None
# Reserved words are considered locked in all cases.
if self.__name in get_list_locked():
self.__rename = self.__name
# Some locked variables have implicit types, set them right away.
if self.__name in g_vec2:
self.setType(interpret_type("vec2"))
elif self.__name in g_vec4:
self.setType(interpret_type("vec4"))
elif self.__name in g_mat:
self.setType(interpret_pseudo_type("mat"))
elif self.__name in g_vec:
self.setType(interpret_pseudo_type("vec"))
def format(self, force):
"""Return formatted output."""
if not self.__rename:
if force:
if is_verbose():
print("WARNING: %s not locked" % (self))
return self.__name
return ""
return self.__rename
def getAccess(self):
"""Accessor."""
return self.__access
def getName(self):
"""Gets the original, non-renamed name."""
return self.__name
def getType(self):
"""Accessor."""
return self.__typeid
def isLocked(self):
"""Tell if this is using a locked string."""
if self.__rename:
return True
return False
def lock(self, op):
"""Lock rename into given name."""
if self.__rename:
raise RuntimeError("attempting to lock already locked rename '%s' -> '%s'" % (self.__name, self.__rename))
if not isinstance(op, str):
raise RuntimeError("rename must be string, '%s' given" % (str(op)))
self.__rename = op
def resolveName(self):
"""Get resolved name, this is the locked name or original name if not locked."""
if self.__rename:
return self.__rename
return self.__name
def setAccess(self, op):
"""Set given element as accessing this."""
if self.__access:
raise RuntimeError("'%s' already has access '%s'" % (str(self), str(self.__access)))
self.__access = op
def setType(self, op):
"""Set type information of this."""
if self.__typeid and (self.__typeid != op):
raise RuntimeError("conflicting types '%s' and '%s' for '%s'" % (str(self.__typeid), str(op), self.__name))
self.__typeid = op
def __eq__(self, other):
"""Equals operator."""
if is_glsl_name(other):
return (other.resolveName() == self.resolveName())
return (self.resolveName() == other)
def __ne__(self, other):
"""Not equals operator."""
return not (self == other)
def __hash__(self):
"""Hashing operator."""
return hash(self.__name)
def __str__(self):
"""String representation."""
if self.__rename:
return "GlslName('%s' => '%s')" % (self.__name, self.__rename)
return "GlslName('%s')" % (self.__name)
########################################
# Globals ##############################
########################################
g_locked = (
"abs",
"acos",
"asin",
"atan",
"binding",
"break",
"ceil",
"clamp",
"continue",
"cos",
"cross",
"discard",
"distance",
"dot",
"EmitVertex",
"EndPrimitive",
"exp",
"false",
"floor",
"fract",
"gl_FragDepth",
"gl_InstanceID",
"gl_PerVertex",
"layout",
"length",
"location",
"log",
"main",
"max",
"max_vertices",
"min",
"mix",
"mod",
"pow",
"precision",
"return",
"sign",
"sin",
"smoothstep",
"sqrt",
"step",
"tan",
"tanh",
"true",
"uniform",
)
g_primitives = (
"lines",
"lines_adjacency",
"points",
"triangles",
"triangle_strip",
)
g_mat = (
"transpose",
)
g_vec2 = (
"gl_FragCoord",
)
g_vec4 = (
"gl_FragColor",
"gl_Position",
"texture",
"texture2D",
"texture3D",
"textureCube",
"textureGrad",
)
g_vec = (
"normalize",
"reflect",
)
########################################
# Functions ############################
########################################
def get_list_locked():
"""Get list of all locked words."""
return g_locked + g_primitives + g_mat + g_vec2 + g_vec4 + g_vec
def get_list_primitives():
"""Get list of primitive words."""
return g_primitives
def interpret_name(source):
"""Try to interpret name identifier."""
# All reserved strings other than names here should have been interpreted before.
# Names are interpreted last.
if re.match(r'^([A-Za-z][A-Za-z0-9_]*)$', source, re.I):
return GlslName(source)
return None
def is_glsl_name(op):
"""Tell if token is type identifier."""
return isinstance(op, GlslName)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RoutesOperations:
"""RoutesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
**kwargs: Any
) -> "_models.Route":
"""Gets the specified route from a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Route, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.Route
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs: Any
) -> "_models.Route":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(route_parameters, 'Route')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Route', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
route_name: str,
route_parameters: "_models.Route",
**kwargs: Any
) -> AsyncLROPoller["_models.Route"]:
"""Creates or updates a route in the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param route_name: The name of the route.
:type route_name: str
:param route_parameters: Parameters supplied to the create or update route operation.
:type route_parameters: ~azure.mgmt.network.v2020_03_01.models.Route
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Route or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.Route]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Route"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
route_name=route_name,
route_parameters=route_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Route', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'routeName': self._serialize.url("route_name", route_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}'} # type: ignore
def list(
self,
resource_group_name: str,
route_table_name: str,
**kwargs: Any
) -> AsyncIterable["_models.RouteListResult"]:
"""Gets all routes in a route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.RouteListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RouteListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes'} # type: ignore
|
|
# -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a web-server which integrates with the twisted.internet
infrastructure.
"""
from __future__ import division, absolute_import
import copy
import os
try:
from urllib import quote
except ImportError:
from urllib.parse import quote as _quote
def quote(string, *args, **kwargs):
return _quote(
string.decode('charmap'), *args, **kwargs).encode('charmap')
import zlib
from zope.interface import implementer
from twisted.python.compat import _PY3, networkString, nativeString, intToBytes
if _PY3:
class Copyable:
"""
Fake mixin, until twisted.spread is ported.
"""
else:
from twisted.spread.pb import Copyable, ViewPoint
from twisted.internet import address
from twisted.web import iweb, http, util
from twisted.web.http import unquote
from twisted.python import log, reflect, failure, components
from twisted import copyright
from twisted.web import resource
from twisted.web.error import UnsupportedMethod
from twisted.python.versions import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.python.compat import escape
NOT_DONE_YET = 1
__all__ = [
'supportedMethods',
'Request',
'Session',
'Site',
'version',
'NOT_DONE_YET',
'GzipEncoderFactory'
]
# backwards compatibility
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.datetimeToString instead",
"twisted.web.server",
"date_time_string")
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.stringToDatetime instead",
"twisted.web.server",
"string_date_time")
date_time_string = http.datetimeToString
string_date_time = http.stringToDatetime
# Support for other methods may be implemented on a per-resource basis.
supportedMethods = (b'GET', b'HEAD', b'POST')
def _addressToTuple(addr):
if isinstance(addr, address.IPv4Address):
return ('INET', addr.host, addr.port)
elif isinstance(addr, address.UNIXAddress):
return ('UNIX', addr.name)
else:
return tuple(addr)
@implementer(iweb.IRequest)
class Request(Copyable, http.Request, components.Componentized):
"""
An HTTP request.
@ivar defaultContentType: A C{bytes} giving the default I{Content-Type}
value to send in responses if no other value is set. C{None} disables
the default.
"""
defaultContentType = b"text/html"
site = None
appRootURL = None
__pychecker__ = 'unusednames=issuer'
_inFakeHead = False
_encoder = None
def __init__(self, *args, **kw):
http.Request.__init__(self, *args, **kw)
components.Componentized.__init__(self)
def getStateToCopyFor(self, issuer):
x = self.__dict__.copy()
del x['transport']
# XXX refactor this attribute out; it's from protocol
# del x['server']
del x['channel']
del x['content']
del x['site']
self.content.seek(0, 0)
x['content_data'] = self.content.read()
x['remote'] = ViewPoint(issuer, self)
# Address objects aren't jellyable
x['host'] = _addressToTuple(x['host'])
x['client'] = _addressToTuple(x['client'])
# Header objects also aren't jellyable.
x['requestHeaders'] = list(x['requestHeaders'].getAllRawHeaders())
return x
# HTML generation helpers
def sibLink(self, name):
"""
Return the text that links to a sibling of the requested resource.
"""
if self.postpath:
return (len(self.postpath)*b"../") + name
else:
return name
def childLink(self, name):
"""
Return the text that links to a child of the requested resource.
"""
lpp = len(self.postpath)
if lpp > 1:
return ((lpp-1)*b"../") + name
elif lpp == 1:
return name
else: # lpp == 0
if len(self.prepath) and self.prepath[-1]:
return self.prepath[-1] + b'/' + name
else:
return name
def process(self):
"""
Process a request.
"""
# get site from channel
self.site = self.channel.site
# set various default headers
self.setHeader(b'server', version)
self.setHeader(b'date', http.datetimeToString())
# Resource Identification
self.prepath = []
self.postpath = list(map(unquote, self.path[1:].split(b'/')))
try:
resrc = self.site.getResourceFor(self)
if resource._IEncodingResource.providedBy(resrc):
encoder = resrc.getEncoder(self)
if encoder is not None:
self._encoder = encoder
self.render(resrc)
except:
self.processingFailed(failure.Failure())
def write(self, data):
"""
Write data to the transport (if not responding to a HEAD request).
@param data: A string to write to the response.
"""
if not self.startedWriting:
# Before doing the first write, check to see if a default
# Content-Type header should be supplied.
modified = self.code != http.NOT_MODIFIED
contentType = self.responseHeaders.getRawHeaders(b'content-type')
if (modified and contentType is None and
self.defaultContentType is not None
):
self.responseHeaders.setRawHeaders(
b'content-type', [self.defaultContentType])
# Only let the write happen if we're not generating a HEAD response by
# faking out the request method. Note, if we are doing that,
# startedWriting will never be true, and the above logic may run
# multiple times. It will only actually change the responseHeaders
# once though, so it's still okay.
if not self._inFakeHead:
if self._encoder:
data = self._encoder.encode(data)
http.Request.write(self, data)
def finish(self):
"""
Override C{http.Request.finish} for possible encoding.
"""
if self._encoder:
data = self._encoder.finish()
if data:
http.Request.write(self, data)
return http.Request.finish(self)
def render(self, resrc):
"""
Ask a resource to render itself.
@param resrc: a L{twisted.web.resource.IResource}.
"""
try:
body = resrc.render(self)
except UnsupportedMethod as e:
allowedMethods = e.allowedMethods
if (self.method == b"HEAD") and (b"GET" in allowedMethods):
# We must support HEAD (RFC 2616, 5.1.1). If the
# resource doesn't, fake it by giving the resource
# a 'GET' request and then return only the headers,
# not the body.
log.msg("Using GET to fake a HEAD request for %s" %
(resrc,))
self.method = b"GET"
self._inFakeHead = True
body = resrc.render(self)
if body is NOT_DONE_YET:
log.msg("Tried to fake a HEAD request for %s, but "
"it got away from me." % resrc)
# Oh well, I guess we won't include the content length.
else:
self.setHeader(b'content-length', intToBytes(len(body)))
self._inFakeHead = False
self.method = b"HEAD"
self.write(b'')
self.finish()
return
if self.method in (supportedMethods):
# We MUST include an Allow header
# (RFC 2616, 10.4.6 and 14.7)
self.setHeader(b'Allow', b', '.join(allowedMethods))
s = ('''Your browser approached me (at %(URI)s) with'''
''' the method "%(method)s". I only allow'''
''' the method%(plural)s %(allowed)s here.''' % {
'URI': escape(nativeString(self.uri)),
'method': nativeString(self.method),
'plural': ((len(allowedMethods) > 1) and 's') or '',
'allowed': ', '.join(
[nativeString(x) for x in allowedMethods])
})
epage = resource.ErrorPage(http.NOT_ALLOWED,
"Method Not Allowed", s)
body = epage.render(self)
else:
epage = resource.ErrorPage(
http.NOT_IMPLEMENTED, "Huh?",
"I don't know how to treat a %s request." %
(escape(self.method.decode("charmap")),))
body = epage.render(self)
# end except UnsupportedMethod
if body == NOT_DONE_YET:
return
if not isinstance(body, bytes):
body = resource.ErrorPage(
http.INTERNAL_SERVER_ERROR,
"Request did not return bytes",
"Request: " + util._PRE(reflect.safe_repr(self)) + "<br />" +
"Resource: " + util._PRE(reflect.safe_repr(resrc)) + "<br />" +
"Value: " + util._PRE(reflect.safe_repr(body))).render(self)
if self.method == b"HEAD":
if len(body) > 0:
# This is a Bad Thing (RFC 2616, 9.4)
log.msg("Warning: HEAD request %s for resource %s is"
" returning a message body."
" I think I'll eat it."
% (self, resrc))
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(b'')
else:
self.setHeader(b'content-length',
intToBytes(len(body)))
self.write(body)
self.finish()
def processingFailed(self, reason):
log.err(reason)
if self.site.displayTracebacks:
body = ("<html><head><title>web.Server Traceback"
" (most recent call last)</title></head>"
"<body><b>web.Server Traceback"
" (most recent call last):</b>\n\n"
"%s\n\n</body></html>\n"
% util.formatFailure(reason))
else:
body = (b"<html><head><title>Processing Failed"
b"</title></head><body>"
b"<b>Processing Failed</b></body></html>")
self.setResponseCode(http.INTERNAL_SERVER_ERROR)
self.setHeader(b'content-type', b"text/html")
self.setHeader(b'content-length', intToBytes(len(body)))
self.write(body)
self.finish()
return reason
def view_write(self, issuer, data):
"""Remote version of write; same interface.
"""
self.write(data)
def view_finish(self, issuer):
"""Remote version of finish; same interface.
"""
self.finish()
def view_addCookie(self, issuer, k, v, **kwargs):
"""Remote version of addCookie; same interface.
"""
self.addCookie(k, v, **kwargs)
def view_setHeader(self, issuer, k, v):
"""Remote version of setHeader; same interface.
"""
self.setHeader(k, v)
def view_setLastModified(self, issuer, when):
"""Remote version of setLastModified; same interface.
"""
self.setLastModified(when)
def view_setETag(self, issuer, tag):
"""Remote version of setETag; same interface.
"""
self.setETag(tag)
def view_setResponseCode(self, issuer, code, message=None):
"""
Remote version of setResponseCode; same interface.
"""
self.setResponseCode(code, message)
def view_registerProducer(self, issuer, producer, streaming):
"""Remote version of registerProducer; same interface.
(requires a remote producer.)
"""
self.registerProducer(_RemoteProducerWrapper(producer), streaming)
def view_unregisterProducer(self, issuer):
self.unregisterProducer()
### these calls remain local
session = None
def getSession(self, sessionInterface=None):
# Session management
if not self.session:
cookiename = b"_".join([b'TWISTED_SESSION'] + self.sitepath)
sessionCookie = self.getCookie(cookiename)
if sessionCookie:
try:
self.session = self.site.getSession(sessionCookie)
except KeyError:
pass
# if it still hasn't been set, fix it up.
if not self.session:
self.session = self.site.makeSession()
self.addCookie(cookiename, self.session.uid, path=b'/')
self.session.touch()
if sessionInterface:
return self.session.getComponent(sessionInterface)
return self.session
def _prePathURL(self, prepath):
port = self.getHost().port
if self.isSecure():
default = 443
else:
default = 80
if port == default:
hostport = ''
else:
hostport = ':%d' % port
prefix = networkString('http%s://%s%s/' % (
self.isSecure() and 's' or '',
nativeString(self.getRequestHostname()),
hostport))
path = b'/'.join([quote(segment, safe=b'') for segment in prepath])
return prefix + path
def prePathURL(self):
return self._prePathURL(self.prepath)
def URLPath(self):
from twisted.python import urlpath
return urlpath.URLPath.fromRequest(self)
def rememberRootURL(self):
"""
Remember the currently-processed part of the URL for later
recalling.
"""
url = self._prePathURL(self.prepath[:-1])
self.appRootURL = url
def getRootURL(self):
"""
Get a previously-remembered URL.
"""
return self.appRootURL
@implementer(iweb._IRequestEncoderFactory)
class GzipEncoderFactory(object):
"""
@cvar compressLevel: The compression level used by the compressor, default
to 9 (highest).
@since: 12.3
"""
compressLevel = 9
def encoderForRequest(self, request):
"""
Check the headers if the client accepts gzip encoding, and encodes the
request if so.
"""
acceptHeaders = request.requestHeaders.getRawHeaders(
'accept-encoding', [])
supported = ','.join(acceptHeaders).split(',')
if 'gzip' in supported:
encoding = request.responseHeaders.getRawHeaders(
'content-encoding')
if encoding:
encoding = '%s,gzip' % ','.join(encoding)
else:
encoding = 'gzip'
request.responseHeaders.setRawHeaders('content-encoding',
[encoding])
return _GzipEncoder(self.compressLevel, request)
@implementer(iweb._IRequestEncoder)
class _GzipEncoder(object):
"""
An encoder which supports gzip.
@ivar _zlibCompressor: The zlib compressor instance used to compress the
stream.
@ivar _request: A reference to the originating request.
@since: 12.3
"""
_zlibCompressor = None
def __init__(self, compressLevel, request):
self._zlibCompressor = zlib.compressobj(
compressLevel, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
self._request = request
def encode(self, data):
"""
Write to the request, automatically compressing data on the fly.
"""
if not self._request.startedWriting:
# Remove the content-length header, we can't honor it
# because we compress on the fly.
self._request.responseHeaders.removeHeader(b'content-length')
return self._zlibCompressor.compress(data)
def finish(self):
"""
Finish handling the request request, flushing any data from the zlib
buffer.
"""
remain = self._zlibCompressor.flush()
self._zlibCompressor = None
return remain
class _RemoteProducerWrapper:
def __init__(self, remote):
self.resumeProducing = remote.remoteMethod("resumeProducing")
self.pauseProducing = remote.remoteMethod("pauseProducing")
self.stopProducing = remote.remoteMethod("stopProducing")
class Session(components.Componentized):
"""
A user's session with a system.
This utility class contains no functionality, but is used to
represent a session.
@ivar uid: A unique identifier for the session, C{bytes}.
@ivar _reactor: An object providing L{IReactorTime} to use for scheduling
expiration.
@ivar sessionTimeout: timeout of a session, in seconds.
"""
sessionTimeout = 900
_expireCall = None
def __init__(self, site, uid, reactor=None):
"""
Initialize a session with a unique ID for that session.
"""
components.Componentized.__init__(self)
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.site = site
self.uid = uid
self.expireCallbacks = []
self.touch()
self.sessionNamespaces = {}
def startCheckingExpiration(self):
"""
Start expiration tracking.
@return: C{None}
"""
self._expireCall = self._reactor.callLater(
self.sessionTimeout, self.expire)
def notifyOnExpire(self, callback):
"""
Call this callback when the session expires or logs out.
"""
self.expireCallbacks.append(callback)
def expire(self):
"""
Expire/logout of the session.
"""
del self.site.sessions[self.uid]
for c in self.expireCallbacks:
c()
self.expireCallbacks = []
if self._expireCall and self._expireCall.active():
self._expireCall.cancel()
# Break reference cycle.
self._expireCall = None
def touch(self):
"""
Notify session modification.
"""
self.lastModified = self._reactor.seconds()
if self._expireCall is not None:
self._expireCall.reset(self.sessionTimeout)
version = networkString("TwistedWeb/%s" % (copyright.version,))
class Site(http.HTTPFactory):
"""
A web site: manage log, sessions, and resources.
@ivar counter: increment value used for generating unique sessions ID.
@ivar requestFactory: A factory which is called with (channel, queued)
and creates L{Request} instances. Default to L{Request}.
@ivar displayTracebacks: if set, Twisted internal errors are displayed on
rendered pages. Default to C{True}.
@ivar sessionFactory: factory for sessions objects. Default to L{Session}.
@ivar sessionCheckTime: Deprecated. See L{Session.sessionTimeout} instead.
"""
counter = 0
requestFactory = Request
displayTracebacks = True
sessionFactory = Session
sessionCheckTime = 1800
def __init__(self, resource, requestFactory=None, *args, **kwargs):
"""
@param resource: The root of the resource hierarchy. All request
traversal for requests received by this factory will begin at this
resource.
@type resource: L{IResource} provider
@param requestFactory: Overwrite for default requestFactory.
@type requestFactory: C{callable} or C{class}.
@see: L{twisted.web.http.HTTPFactory.__init__}
"""
http.HTTPFactory.__init__(self, *args, **kwargs)
self.sessions = {}
self.resource = resource
if requestFactory is not None:
self.requestFactory = requestFactory
def _openLogFile(self, path):
from twisted.python import logfile
return logfile.LogFile(os.path.basename(path), os.path.dirname(path))
def __getstate__(self):
d = self.__dict__.copy()
d['sessions'] = {}
return d
def _mkuid(self):
"""
(internal) Generate an opaque, unique ID for a user's session.
"""
from hashlib import md5
import random
self.counter = self.counter + 1
return md5(networkString(
"%s_%s" % (str(random.random()), str(self.counter)))
).hexdigest()
def makeSession(self):
"""
Generate a new Session instance, and store it for future reference.
"""
uid = self._mkuid()
session = self.sessions[uid] = self.sessionFactory(self, uid)
session.startCheckingExpiration()
return session
def getSession(self, uid):
"""
Get a previously generated session, by its unique ID.
This raises a KeyError if the session is not found.
"""
return self.sessions[uid]
def buildProtocol(self, addr):
"""
Generate a channel attached to this site.
"""
channel = http.HTTPFactory.buildProtocol(self, addr)
channel.requestFactory = self.requestFactory
channel.site = self
return channel
isLeaf = 0
def render(self, request):
"""
Redirect because a Site is always a directory.
"""
request.redirect(request.prePathURL() + b'/')
request.finish()
def getChildWithDefault(self, pathEl, request):
"""
Emulate a resource's getChild method.
"""
request.site = self
return self.resource.getChildWithDefault(pathEl, request)
def getResourceFor(self, request):
"""
Get a resource for a request.
This iterates through the resource heirarchy, calling
getChildWithDefault on each resource it finds for a path element,
stopping when it hits an element where isLeaf is true.
"""
request.site = self
# Sitepath is used to determine cookie names between distributed
# servers and disconnected sites.
request.sitepath = copy.copy(request.prepath)
return resource.getChildForRequest(self.resource, request)
|
|
from __future__ import division
import numpy as np
import pandas as pd
import netCDF4 as nc
from datetime import datetime, timedelta
import cPickle as pickle
import sys
import os
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv
import scipy.io as sio
from stationClass import station
from adcpClass import ADCP
from fvcomClass import FVCOM
from tidegaugeClass import Tidegauge
def mjd2num(x):
y = x + 678942
return y
def closest_point(points, lon, lat):
point_list = np.array([lon,lat]).T
print point_list
print points
closest_dist = ((point_list[:, 0] - points[:, 0, None])**2 +
(point_list[:, 1] - points[:, 1, None])**2)
print closest_dist
closest_point_indexes = np.argmin(closest_dist, axis=1)
return closest_point_indexes
def datetime2matlabdn(dt):
# ordinal = dt.toordinal()
mdn = dt + timedelta(days=366)
frac = (dt-datetime(dt.year, dt.month, dt.day, 0, 0, 0)).seconds / \
(24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def main(fvFiles, adcpFiles, tideFiles, isStation=True, ax=[], debug=False):
#fvdebugData = FVCOM(fvdebug)
saveName = 'validationStruct.p'
#Name = 'june_2013_3D_station'
#Struct = {}
struct = np.array([])
for adcpFile in adcpFiles:
print adcpFile
adcpData = ADCP(adcpFile)
lonlat = np.array([adcpData.lon[0], adcpData.lat[0]]).T
print adcpData.mtime.shape
print adcpData.ua.shape
print adcpData.va.shape
print adcpData.surf.shape
adcpVelCoef = ut_solv(adcpData.mtime, adcpData.ua,
adcpData.va, adcpData.lat[0],
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, coef_int=True)
adcpElevCoef = ut_solv(adcpData.mtime, adcpData.surf,
[], adcpData.lat[0],
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, coef_int=True)
#adcpName = adcpFile.split('/')[-1].split('.')[0]
adcp_obs = {'ua':adcpData.ua,
'va':adcpData.va,
'elev':adcpData.surf,
'u':adcpData.east_vel,
'v':adcpData.north_vel,
'bins':adcpData.bins}
# adcp_obs = pd.DataFrame({'ua':adcpData.ua,
# 'va':adcpData.va,
# 'elev':adcpData.surf,
# 'u':adcpData.east_vel,
# 'v':adcpData.north_vel})
for fvFile in fvFiles:
print fvFile
saveName = fvFile + 'validationStruct.p'
if isStation:
fvData = station(fvFile)
ind = closest_point(lonlat, fvData.lon, fvData.lat)
else:
#ax = np.array([adcpData.lon[0], adcpData.lat[0]]).T
ax = [[adcpData.lon[0][0]], [adcpData.lat[0][0]]]
#ax = [adcpData.lon[0][0], adcpData.lat[0][0]]
fvData = FVCOM(fvFile, ax)
#print ax
# lonlat = np.array([[adcpData.lon[0][0],
# adcpData.lat[0][0]]])
# ind = closest_point(lonlat, fvData.lon, fvData.lat)
# print ind
# ind = fvData.closest_point([adcpData.lon[0][0]],
# [adcpData.lat[0][0]])
# right one
#ind = closest_point(lonlat, fvData.lon, fvData.lat)
#lonlat = np.array([adcpData.x[0], adcpData.y[0]]).T
#newind = closest_point(lonlat, fvdebugData.lonc, fvdebugData.latc)
#ind = closest_point(lonlat, fvData.x, fvData.y)
#new = np.array([fvdebugData.xc[newind], fvdebugData.yc[newind]])
#ind = closest_point(new.T, fvData.x, fvData.y)
print fvData.time.shape
print fvData.ua.shape
print fvData.ua
#print fvData.ua[:, ind].shape
#print fvData.va[:, ind].shape
#print fvData.lat[ind].shape
if isStation:
fvVelCoef = ut_solv(fvData.time, fvData.ua[:, ind].flatten(),
fvData.va[:, ind].flatten(),
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
print fvData.elev[:, ind].shape
fvElevCoef = ut_solv(fvData.time, fvData.elev[:, ind].flatten(), [],
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
mod = {'ua':fvData.ua[:, ind].flatten(),
'va':fvData.va[:, ind].flatten(),
'elev':fvData.elev[:, ind].flatten(),
'u':fvData.u,
'v':fvData.v}
else:
fvVelCoef = ut_solv(fvData.time, fvData.ua.flatten(),
fvData.va.flatten(),
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
#print fvData.elev[:, ind].shape
fvElevCoef = ut_solv(fvData.time, fvData.elev.flatten(), [],
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
if fvData.D3:
mod = {'ua':fvData.ua.flatten(),
'va':fvData.va.flatten(),
'elev':fvData.elev.flatten(),
'u':fvData.u,
'v':fvData.v}
else:
mod = {'ua':fvData.ua.flatten(),
'va':fvData.va.flatten(),
'elev':fvData.elev.flatten()}
obs_loc = {'name': adcpFile,
'type':'ADCP',
'lat':adcpData.lat[0],
'lon':adcpData.lon[0],
'obs_timeseries':adcp_obs,
'mod_timeseries':mod,
'obs_time':adcpData.mtime,
'mod_time':fvData.time,
'vel_obs_harmonics':adcpVelCoef,
'elev_obs_harmonics':adcpElevCoef,
'vel_mod_harmonics':fvVelCoef,
'elev_mod_harmonics':fvElevCoef}
#'adcp_bins':adcpData.bins}
# obs_loc = {'name': adcpName, 'type':'ADCP', 'lat':fvdebugData.lat[newind],
# 'lon':fvdebugData.lon[newind], 'obs_timeseries':adcp_obs,
# 'mod_timeseries':mod, 'obs_time':adcpData.mtime,
# 'mod_time':fvData.time, 'vel_obs_harmonics':adcpVelCoef,
# 'elev_obs_harmonics':adcpElevCoef,
# 'vel_mod_harmonics':fvVelCoef, 'elev_mod_harmonics':fvElevCoef}
struct = np.hstack((struct, obs_loc))
for tideFile in tideFiles:
print tideFile
tideData = Tidegauge(tideFile)
ut_constits = ['M2','S2','N2','K2','K1','O1','P1','Q1']
tideData.harmonics(cnstit=ut_constits, notrend=True,
rmin=0.95, method='ols', nodiagn=True, linci=True,
ordercnstit='frq')
tide_obs = {'data':tideData.data, 'elev':tideData.elev}
for fvFile in fvFiles:
print fvFile
if isStation:
fvData = station(fvFile)
ind = np.argmin(np.sqrt((fvData.lon-tideData.lon)**2+(fvData.lat-tideData.lat)**2))
#ind = closest_point(lonlat, fvData.lon, fvData.lat)
else:
#ax = np.array([adcpData.lon[0], adcpData.lat[0]]).T
ax = [[tideData.lon], [tideData.lat]]
fvData = FVCOM(fvFile, ax)
if isStation:
print fvData.elev[:, ind].shape
fvElevCoef = ut_solv(fvData.time, fvData.elev[:, ind].flatten(), [],
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
mod = {'ua':fvData.ua[:, ind].flatten(),
'va':fvData.va[:, ind].flatten(),
'elev':fvData.elev[:, ind].flatten(),
'u':fvData.u,
'v':fvData.v}
else:
#print fvData.elev[:, ind].shape
fvElevCoef = ut_solv(fvData.time, fvData.elev.flatten(), [],
adcpData.lat[0], cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True, conf_int=True)
if fvData.D3:
mod = {'ua':fvData.ua.flatten(),
'va':fvData.va.flatten(),
'elev':fvData.elev.flatten(),
'u':fvData.u,
'v':fvData.v}
else:
mod = {'ua':fvData.ua.flatten(),
'va':fvData.va.flatten(),
'elev':fvData.elev.flatten()}
obs_loc = {'name':tideFile, 'type':'TideGauge',
'mod_time':fvData.time,
'obs_time':tideData.time,
'lon':tideData.lon,
'lat':tideData.lat,
'elev_obs_harmonics':tideData.coef,
'elev_mod_harmonics': fvElevCoef,
'obs_timeseries':tide_obs,
'mod_timeseries':mod}
struct = np.hstack((struct, obs_loc))
saveName = 'validationStruct.p'
pickle.dump(struct, open(saveName, "wb"))
return struct
if __name__ == '__main__':
#fvFiles = ['/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/june_2013_3D/output/']
fvFiles = ['/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/calibration/bottom_roughness/2D/0.0015/output/dngrid_0001.nc',
'/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/calibration/bottom_roughness/2D/0.0020/output/dngrid_0001.nc',
'/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/calibration/bottom_roughness/2D/0.0025/output/dngrid_0001.nc',
'/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/calibration/bottom_roughness/2D/0.002848/output/dngrid_0001.nc',
'/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/calibration/bottom_roughness/2D/0.0030/output/dngrid_0001.nc']
adcpFiles = ['/EcoII/EcoEII_server_data_tree/data/observed/GP/ADCP/Flow_GP-130620-BPa_avg5.mat',
'/EcoII/EcoEII_server_data_tree/data/observed/GP/ADCP/Flow_GP-130620-BPb_avg5.mat']
fvdebug = '/EcoII/EcoEII_server_data_tree/workspace/simulated/FVCOM/dngrid/june_2013_3D/output/dngrid_0001_week2.nc'
tideFiles = \
['/EcoII/EcoEII_server_data_tree/data/observed/GP/TideGauge/Westport_015892_20140325_1212_Z.mat',
'/EcoII/EcoEII_server_data_tree/data/observed/DG/TideGauge/DigbyWharf_015893_20140115_2221_Z.mat']
#ind = [-66.3419, -66.3324, 44.2755, 44.2815]
ind = [-66.3419, -66.3324, 44.2755, 44.2815]
struct = main(fvFiles, adcpFiles, tideFiles, isStation=False, ax=ind)
|
|
import io
import os
import re
import rules._is_c_like as _is_c_like
import rules._strip_comments as _strip_comments
#Any unnecessarily qualified object instance is marked. See
# http://stackoverflow.com/questions/29293136/compiler-warning-for-unnecessary-namespaces
# This is based on an extremely simple lexical analysis/preprocessing. The following
# limitations apply:
# Makes no attempt to parse `#include`s or `#define`s. As such, this does not catch
# any issues related to macroed namespaces (and indeed, it may report false positives
# for such).
# Searches in all preprocessor branches (in a sane way, so e.g. this code, which has
# mismatched braces lexically, won't screw it up):
# #if 1
# namespace Foo {
# #else
# namespace FooBar {
# #endif
# /*...*/
# }
# Does not search in comments. TODO: maybe should?
# Completely ignores using namespace declarations
# If a type has the same name as its enclosing namespace, then it will be reported as
# a false positive. E.g.:
# namespace Foo {
# class Foo { void f(void); };
# void Foo::f(void) {} //false positive!
# }
# If there are two identifiers with the same name but one in an enclosed namespace, then
# this rule is not smart enough to tell that the qualification is necessary when
# invoking the identifier in the enclosing namespace from within enclosed namespace.
#TODO: rewrite to use parser as in "final_virtual.py".
class RuleUnnecessaryQualification(object):
NAME = "Unnecessary Qualification"
@staticmethod
def get_description(line_numbers):
result = "Possible unecessary qualification on line"
if len(line_numbers)>1: result+="s"
return result
@staticmethod
def rule(path,lines):
if not _is_c_like.main(path): return [] #Can only operate on C/C++ files
#Remove comments
temp = _strip_comments.main(lines)
lines2 = [line.real for line in temp]
#Create a recursive stack of nested preprocessor conditionals and extract a recursive stack of
# C/C++ tokens. For the regular expression sorcery used to do this, see this:
# #https://deplinenoise.wordpress.com/2012/01/04/python-tip-regex-based-tokenizer/
regex = re.compile(
"(::)|"+ #scope
"(\\s+)|"+ #whitespace
"(\".+\")|"+ #string
"(\\{)|"+ #open brace
"(\\})|"+ #close brace
"([a-zA-Z_]+\\w*)" #identifier
)
class Stack(object):
def __init__(self, lines,offset, level):
self.elements_sections = [[]]
self.level = level
self.start = offset
i = offset + 1
while i < len(lines):
line = lines[i]
if "#if" in line:
element = Stack(lines,i,level+1)
self.elements_sections[-1].append(element)
i = element.end
elif "#elif" in line or "#else" in line:
self.elements_sections.append([])
elif "#endif" in line:
break
else:
self.elements_sections[-1].append( (i+1,line) )
i += 1
self.end = i
def get_tokens(self):
tokens = []
for section in self.elements_sections:
section_tokens = []
for element in section:
if type(element) == type(()):
for match in re.finditer(regex,element[1]):
scope, whitespace, string, openbrace, closebrace, iden = match.groups()
## raw_input((scope, whitespace, iden))
if scope!=None: section_tokens.append((element[0], scope))
elif openbrace!=None: section_tokens.append((element[0], openbrace))
elif closebrace!=None: section_tokens.append((element[0],closebrace))
elif iden!=None: section_tokens.append((element[0], iden))
else:
section_tokens.append(element.get_tokens())
tokens.append(section_tokens)
return tokens
def __repr__(self):
def get_indent(level):
return " "*level
result = get_indent(2*self.level)+"begin stack\n"
for section in self.elements_sections:
result += get_indent(2*self.level+1)+"begin section\n"
for element in section:
if type(element) == type(()):
result += get_indent(2*self.level+2)+"Line %d: \"%s\"\n" % (element[0],element[1][:-1])
else:
result += str(element) #recurse
result += get_indent(2*self.level+1)+"end section\n"
result += get_indent(2*self.level)+"end stack\n"
return result
stack = Stack(lines2,0, 0)
## print(stack); raw_input()
#For each line in this recursive stack, get the associated list of tokens. This produces
# another recursive stack, with each element a token instead of a line.
tokens = stack.get_tokens()
## def print_tokens(tokens,depth):
## for token in tokens:
## if type(token) == type([]):
## print(" "*depth + "begin")
## print_tokens(token,depth+1)
## print(" "*depth + "end")
## else:
## print(" "*depth + "%d: \"%s\""%token)
## print_tokens(tokens,0); raw_input()
#Flatten this stack into a list of tokens broken up by namespace declarations, open braces, and closing braces.
# Then find any overqualified tokens
result = []
def flatten(stack_base, tokens, depth):
## def print_stacks(with_unfinished=True):
## if with_unfinished: print(" "*depth + "Stack is now: "+str(namespace_stack)+" + "+str(unfinished_stacks))
## else: print(" "*depth + "Stack is now: "+str(namespace_stack))
i = 0
namespace_stack = list(stack_base)
unfinished_stacks = []
while i < len(tokens):
element = tokens[i]
## print_stacks()
## print(" "*depth + "ELEMENT: "+str(element))
if type(element) == type([]):
## print(" "*depth + "begin")
ret = flatten(namespace_stack, element, depth+1)
if len(ret) > 0:
## print(" "*depth + "Got unfinished stack: "+str(ret))
for us in unfinished_stacks: assert len(us)==len(ret) #Different preprocessor sides have different depths. Not implemented!
unfinished_stacks.append(ret)
## print_stacks()
## print(" "*depth + "end")
i += 1
else:
line_number,token = element
if token == "namespace": #[namespace] [name] [{]
if i>=1 and type(tokens[i-1])==type(()) and tokens[i-1][1]=="using": #Not supported
i += 1
else:
assert i+2 < len(tokens) and type(tokens[i+1])==type(()) and type(tokens[i+2])==type(())
namespace = tokens[i+1][1]
#Skip brace
namespace_stack.append(namespace+"{")
## print_stacks()
i += 3
elif token == "{": #block
namespace_stack.append(token)
## print_stacks()
i += 1
elif token == "}": #end block or namespace
namespace_stack = namespace_stack[:-1]
## print_stacks()
i += 1
else: #an identifier or keyword
preceding = None
if i>=2 and type(tokens[i-2])==type(()): preceding=tokens[i-2][1]
T = [token]
while i+1<len(tokens) and type(tokens[i+1])==type(()) and tokens[i+1][1]=="::":
T.append(tokens[i+2][1])
i += 2
def check(stack,T):
## print("Checking: "+str(stack))
if len(stack)==0: return
elem0 = stack[0]
if type(elem0) == type(""):
if T[0] == elem0[:-1]:
result.append(line_number)
return
check(stack[1:],T)
else:
for substack in elem0:
check(substack,T)
if len(T) > 1: #Must have at least one qualification
if preceding==None or preceding!="friend": #Don't check "friend class [iden]", since this needs to be qualified if we're inside a deeper namespace
if len(unfinished_stacks)>0: check(namespace_stack+[unfinished_stacks],T)
else: check(namespace_stack,T)
i += 1
if len(unfinished_stacks)>0: namespace_stack.append(unfinished_stacks)
## print_stacks(False)
if len(namespace_stack) > len(stack_base):
return list(namespace_stack[len(stack_base):])
return []
flatten([], tokens, 0)
return result
|
|
# Author: Giacomo Vianello (giacomov@stanford.edu)
# Copyright 2014 EXTraS.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import numpy as np
import numexpr
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("bayesian_blocks")
__all__ = ['bayesian_blocks', 'bayesian_blocks_not_unique']
def bayesian_blocks_not_unique(tt, ttstart, ttstop, p0):
# Verify that the input array is one-dimensional
tt = np.asarray(tt, dtype=float)
assert tt.ndim == 1
# Now create the array of unique times
unique_t = np.unique(tt)
t = tt
tstart = ttstart
tstop = ttstop
# Create initial cell edges (Voronoi tessellation) using the unique time stamps
edges = np.concatenate([[tstart],
0.5 * (unique_t[1:] + unique_t[:-1]),
[tstop]])
# The last block length is 0 by definition
block_length = tstop - edges
if np.sum((block_length <= 0)) > 1:
raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")
N = unique_t.shape[0]
# arrays to store the best configuration
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
# Pre-computed priors (for speed)
# eq. 21 from Scargle 2012
priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))
# Count how many events are in each Voronoi cell
x, _ = np.histogram(t, edges)
# Speed tricks: resolve once for all the functions which will be used
# in the loop
cumsum = np.cumsum
log = np.log
argmax = np.argmax
numexpr_evaluate = numexpr.evaluate
arange = np.arange
# Decide the step for reporting progress
incr = max(int(float(N) / 100.0 * 10), 1)
logger.debug("Finding blocks...")
# This is where the computation happens. Following Scargle et al. 2012.
# This loop has been optimized for speed:
# * the expression for the fitness function has been rewritten to
# avoid multiple log computations, and to avoid power computations
# * the use of scipy.weave and numexpr has been evaluated. The latter
# gives a big gain (~40%) if used for the fitness function. No other
# gain is obtained by using it anywhere else
# Set numexpr precision to low (more than enough for us), which is
# faster than high
oldaccuracy = numexpr.set_vml_accuracy_mode('low')
numexpr.set_num_threads(1)
numexpr.set_vml_num_threads(1)
for R in range(N):
br = block_length[R + 1]
T_k = block_length[:R + 1] - br
# N_k: number of elements in each block
# This expression has been simplified for the case of
# unbinned events (i.e., one element in each block)
# It was:
N_k = cumsum(x[:R + 1][::-1])[::-1]
# Now it is:
#N_k = arange(R + 1, 0, -1)
# Evaluate fitness function
# This is the slowest part, which I'm speeding up by using
# numexpr. It provides a ~40% gain in execution speed.
fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
optimization='aggressive',
local_dict={'N_k': N_k, 'T_k': T_k})
p = priors[R]
A_R = fit_vec - p
A_R[1:] += best[:R]
i_max = argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
pass
numexpr.set_vml_accuracy_mode(oldaccuracy)
logger.debug("Done\n")
# Now find blocks
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
finalEdges = edges[change_points]
return np.asarray(finalEdges)
def bayesian_blocks(tt, ttstart, ttstop, p0, bkgIntegralDistr=None, myLikelihood=None):
"""Divide a series of events characterized by their arrival time in blocks
of perceptibly constant count rate. If the background integral distribution
is given, divide the series in blocks where the difference with respect to
the background is perceptibly constant.
Args:
tt (iterable): An iterable (list, numpy.array...) containing the arrival
time of the events.
NOTE: the input array MUST be time-ordered, and without
duplicated entries. To ensure this, you may execute the
following code:
tt_array = numpy.asarray(tt)
tt_array = numpy.unique(tt_array)
tt_array.sort()
before running the algorithm.
p0 (float): The probability of finding a variations (i.e., creating a new
block) when there is none. In other words, the probability of
a Type I error, i.e., rejecting the null-hypothesis when is
true. All found variations will have a post-trial significance
larger than p0.
bkgIntegralDistr (function, optional): the integral distribution for the
background counts. It must be a function of the form f(x),
which must return the integral number of counts expected from
the background component between time 0 and x.
Returns:
numpy.array: the edges of the blocks found
"""
# Verify that the input array is one-dimensional
tt = np.asarray(tt, dtype=float)
assert tt.ndim == 1
if (bkgIntegralDistr is not None):
# Transforming the inhomogeneous Poisson process into an homogeneous one with rate 1,
# by changing the time axis according to the background rate
logger.debug("Transforming the inhomogeneous Poisson process to a homogeneous one with rate 1...")
t = np.array(bkgIntegralDistr(tt))
logger.debug("done")
# Now compute the start and stop time in the new system
tstart = bkgIntegralDistr(ttstart)
tstop = bkgIntegralDistr(ttstop)
else:
t = tt
tstart = ttstart
tstop = ttstop
pass
# Create initial cell edges (Voronoi tessellation)
edges = np.concatenate([[tstart],
0.5 * (t[1:] + t[:-1]),
[tstop]])
# Create the edges also in the original time system
edges_ = np.concatenate([[ttstart],
0.5 * (tt[1:] + tt[:-1]),
[ttstop]])
# Create a lookup table to be able to transform back from the transformed system
# to the original one
lookupTable = {key: value for (key, value) in zip(edges, edges_)}
# The last block length is 0 by definition
block_length = tstop - edges
if np.sum((block_length <= 0)) > 1:
raise RuntimeError("Events appears to be out of order! Check for order, or duplicated events.")
N = t.shape[0]
# arrays to store the best configuration
best = np.zeros(N, dtype=float)
last = np.zeros(N, dtype=int)
best_new = np.zeros(N, dtype=float)
last_new = np.zeros(N, dtype=int)
# Pre-computed priors (for speed)
if (myLikelihood):
priors = myLikelihood.getPriors(N, p0)
else:
# eq. 21 from Scargle 2012
#priors = 4 - np.log(73.53 * p0 * np.power(np.arange(1, N + 1), -0.478))
priors = [4 - np.log(73.53 * p0 * N**(-0.478))] * N
pass
x = np.ones(N)
# Speed tricks: resolve once for all the functions which will be used
# in the loop
cumsum = np.cumsum
log = np.log
argmax = np.argmax
numexpr_evaluate = numexpr.evaluate
arange = np.arange
# Decide the step for reporting progress
incr = max(int(float(N) / 100.0 * 10), 1)
logger.debug("Finding blocks...")
# This is where the computation happens. Following Scargle et al. 2012.
# This loop has been optimized for speed:
# * the expression for the fitness function has been rewritten to
# avoid multiple log computations, and to avoid power computations
# * the use of scipy.weave and numexpr has been evaluated. The latter
# gives a big gain (~40%) if used for the fitness function. No other
# gain is obtained by using it anywhere else
times = []
TSs = []
# Set numexpr precision to low (more than enough for us), which is
# faster than high
oldaccuracy = numexpr.set_vml_accuracy_mode('low')
numexpr.set_num_threads(1)
numexpr.set_vml_num_threads(1)
for R in range(N):
br = block_length[R + 1]
T_k = block_length[:R + 1] - br
# N_k: number of elements in each block
# This expression has been simplified for the case of
# unbinned events (i.e., one element in each block)
# It was:
# N_k = cumsum(x[:R + 1][::-1])[::-1]
# Now it is:
N_k = arange(R + 1, 0, -1)
# Evaluate fitness function
# This is the slowest part, which I'm speeding up by using
# numexpr. It provides a ~40% gain in execution speed.
fit_vec = numexpr_evaluate('''N_k * log(N_k/ T_k) ''',
optimization='aggressive')
p = priors[R]
A_R = fit_vec - p
A_R[1:] += best[:R]
i_max = argmax(A_R)
last[R] = i_max
best[R] = A_R[i_max]
# if(myLikelihood):
# logger.debug("Maximum old: %i, Maximum new: %i" %(i_max,i_max_new))
# logger.debug("Best old: %s, Best new: %s" %(best[R],best_new[R]))
pass
numexpr.set_vml_accuracy_mode(oldaccuracy)
# if(myLikelihood):
# from operator import itemgetter
# index, element = max(enumerate(TSs), key=itemgetter(1))
# t1,t2 = times[index]
# print("Maximum TS is %s in time interval %s-%s" %(element,t1,t2))
#
# best = best_new
# last = last_new
# map(oneLoop,range(N))
logger.debug("Done\n")
# Now find blocks
change_points = np.zeros(N, dtype=int)
i_cp = N
ind = N
while True:
i_cp -= 1
change_points[i_cp] = ind
if ind == 0:
break
ind = last[ind - 1]
change_points = change_points[i_cp:]
edg = edges[change_points]
# Transform the found edges back into the original time system
if (bkgIntegralDistr is not None):
finalEdges = map(lambda x: lookupTable[x], edg)
else:
finalEdges = edg
pass
return np.asarray(finalEdges)
# To be run with a profiler
if __name__ == "__main__":
tt = np.random.uniform(0, 1000, int(sys.argv[1]))
tt.sort()
with open("sim.txt", "w+") as f:
for t in tt:
f.write("%s\n" % (t))
res = bayesian_blocks(tt, 0, 1000, 1e-3, None)
print res
|
|
from __future__ import division, print_function
import copy
import numbers
import numpy
import pandas
from scipy.special import expit, logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble._gradient_boosting import _random_sample_mask
from sklearn.ensemble.gradient_boosting import LossFunction
from sklearn.tree.tree import DecisionTreeRegressor, DTYPE
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_arrays, column_or_1d, array2d
from hep_ml.commonutils import check_sample_weight, generate_sample, map_on_cluster, indices_of_values
from hep_ml.losses import AbstractLossFunction
from transformations import enhance_data, Shuffler
real_s = 691.988607712
real_b = 410999.847322
#region Functions for measurements
def get_higgs_data(train_file = '/Users/axelr/ipython/datasets/higgs/training.csv'):
data = pandas.read_csv(train_file, index_col='EventId')
answers_bs = numpy.ravel(data.Label)
weights = numpy.ravel(data.Weight)
data = data.drop(['Label', 'Weight'], axis=1)
answers = numpy.zeros(len(answers_bs), dtype=numpy.int)
answers[answers_bs == 's'] = 1
return data, answers, weights
def AMS(answers, predictions, sample_weight):
""" Predictions are classes """
assert len(answers) == len(predictions) == len(sample_weight)
predictions = column_or_1d(predictions)
total_s = numpy.sum(sample_weight[answers > 0.5])
total_b = numpy.sum(sample_weight[answers < 0.5])
s = numpy.sum(sample_weight[answers * predictions > 0.5])
b = numpy.sum(sample_weight[(1 - answers) * predictions > 0.5])
s *= real_s / total_s
b *= real_b / total_b
br = 10.
radicand = 2 * ( (s+b+br) * numpy.log(1.0 + s/(b+br)) - s)
if radicand < 0:
raise ValueError('Radicand is negative')
else:
return numpy.sqrt(radicand)
def compute_ams_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities"""
assert len(answers) == len(predictions) == len(sample_weight)
answers = column_or_1d(answers)
predictions = column_or_1d(predictions)
sample_weight = column_or_1d(sample_weight)
order = numpy.argsort(predictions)[::-1]
reordered_answers = answers[order]
reordered_weights = sample_weight[order]
s_cumulative = numpy.cumsum(reordered_answers * reordered_weights)
b_cumulative = numpy.cumsum((1 - reordered_answers) * reordered_weights)
b_cumulative *= real_b / b_cumulative[-1]
s_cumulative *= real_s / s_cumulative[-1]
br = 10.
s = s_cumulative
b = b_cumulative
radicands = 2 * ((s + b + br) * numpy.log(1.0 + s/(b + br)) - s)
return predictions[order], radicands
def optimal_AMS(answers, predictions, sample_weight):
""" Prediction is probabilities """
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
return numpy.sqrt(numpy.max(radicands))
def plot_ams_report(answers, predictions, sample_weight=None):
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.figure(figsize=(18, 9))
pylab.subplot(131)
pylab.title('On cuts')
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
pylab.subplot(132)
pylab.title('On signal order')
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
pylab.subplot(133)
pylab.title('On common order')
pylab.plot( numpy.sqrt(radicands) )
def plot_AMS_on_cuts(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
pylab.plot(cuts, numpy.sqrt(numpy.clip(radicands, 0, 100)))
def plot_AMS_on_signal_order(answers, predictions, sample_weight):
""" Prediction is probabilities """
import pylab
cuts, radicands = compute_ams_on_cuts(answers, predictions, sample_weight)
order = numpy.argsort(predictions)[::-1]
pylab.plot( numpy.sqrt(numpy.clip(radicands[answers[order] > 0.5], 0, 100)) )
#endregion
#region Losses
class MyLossFunction(BaseEstimator):
def fit(self, X, y, sample_weight=None):
pass
def negative_gradient(self, y, y_pred, sample_weight=None):
raise NotImplementedError()
def update_terminal_regions(self, tree, X, y, residual, pred, sample_mask, sample_weight):
assert y.ndim == 1 and residual.ndim == 1 and \
pred.ndim == 1 and sample_mask.ndim == 1 and sample_weight.ndim == 1
# residual is negative gradient
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
for leaf, leaf_indices in indices_of_values(masked_terminal_regions):
if leaf == -1:
continue
self._update_terminal_region(tree, terminal_regions=masked_terminal_regions,
leaf=leaf, X=X, y=y, residual=residual, pred=pred,
sample_weight=sample_weight, leaf_indices=leaf_indices)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""This function should select a better values for leaves"""
pass
class LogitLossFunction(MyLossFunction):
def __init__(self, shift=0.):
MyLossFunction.__init__(self)
self.shift = shift
def __call__(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * numpy.log(1 + numpy.exp(- y_signed * y_pred - self.shift)))
def negative_gradient(self, y, y_pred, sample_weight=None):
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * expit(-y_signed * y_pred - self.shift)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
"""Making one Newton step"""
# terminal_region = numpy.where(terminal_regions == leaf)[0]
terminal_region = leaf_indices
y = y.take(terminal_region, axis=0)
y_signed = 2. * y - 1
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
argument = -y_signed * pred - self.shift
n_gradient = numpy.sum(sample_weight * y_signed * expit(argument))
laplacian = numpy.sum(sample_weight / numpy.logaddexp(0., argument) / numpy.logaddexp(0., -argument))
tree.value[leaf, 0, 0] = n_gradient / laplacian
class AdaLossFunction(MyLossFunction):
def __init__(self, signal_curvature=1.):
self.signal_curvature = signal_curvature
# we need only one variable
MyLossFunction.__init__(self)
def fit(self, X, y, sample_weight=None):
pass
def _signed_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = - self.signal_curvature
return result
def _weight_multiplier(self, y):
result = numpy.ones(len(y), dtype=float)
result[y > 0.5] = 1 / self.signal_curvature
return result
def __call__(self, y, y_pred, sample_weight=None):
signed_multiplier = self._signed_multiplier(y)
weight_multiplier = self._weight_multiplier(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return numpy.sum(sample_weight * weight_multiplier * numpy.exp(y_pred * signed_multiplier))
def negative_gradient(self, y, y_pred, sample_weight=None, **kargs):
multiplier = self._signed_multiplier(y)
y_signed = 2. * y - 1
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
return sample_weight * y_signed * numpy.exp(y_pred * multiplier)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight, leaf_indices):
terminal_region = leaf_indices
curv = self.signal_curvature
y = y.take(terminal_region, axis=0)
pred = pred.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region)
w_sig = numpy.sum(sample_weight[y > 0.5] * numpy.exp(- curv * pred[y > 0.5]))
w_bck = numpy.sum(sample_weight[y < 0.5] * numpy.exp(pred[y < 0.5]))
# minimizing w_sig * exp(-curv * x) / curv + w_bck * exp(x)
w_sum = w_sig + w_bck
w_sig += 1e-4 * w_sum
w_bck += 1e-4 * w_sum
tree.value[leaf, 0, 0] = 1 / (1. + curv) * numpy.log(w_sig / w_bck)
#endregion
#region Interpolation
def interpolate(vals, step, steps, use_log=False):
if isinstance(vals, numbers.Number):
return vals
t = numpy.clip(step / float(steps), 0, 1)
assert len(vals) == 2, 'Not two values'
if use_log:
return numpy.exp(numpy.interp(t, [0., 1.], numpy.log(vals)))
else:
return numpy.interp(t, [0., 1.], vals)
#endregion
#region GradientBoosting
class GradientBoosting(BaseEstimator, ClassifierMixin):
def __init__(self, loss,
n_estimators=10,
learning_rate=1.,
max_depth=15,
min_samples_leaf=5,
min_samples_split=2,
max_features='auto',
subsample=1.,
criterion='mse',
splitter='best',
weights_in_loss=True,
update_tree=True,
update_on='all',
smearing=0.0,
recount_step=1000,
random_state=None):
"""
Supports only two classes
:type loss: LossFunction
:type n_estimators: int,
:type learning_rate: float,
:type max_depth: int | NoneType,
:type min_samples_leaf: int,
:type min_samples_split: int,
:type max_features: int | 'auto',
:type subsample: float,
:type splitter: str,
:type weights_in_loss: bool,
:type update_on: str, 'all', 'same', 'other', 'random'
:type smearing: float
:type init_smearing: float
:rtype:
"""
self.loss = loss
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.subsample = subsample
self.splitter = splitter
self.criterion = criterion
self.weights_in_loss = weights_in_loss
self.random_state = random_state
self.update_tree = update_tree
self.update_on = update_on
self.smearing = smearing
self.recount_step = recount_step
def fit(self, X, y, sample_weight=None):
shuffler = Shuffler(X, random_state=self.random_state)
X, y = check_arrays(X, y, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y = column_or_1d(y, warn=True)
n_samples = len(X)
n_inbag = int(self.subsample * n_samples)
sample_weight = check_sample_weight(y, sample_weight=sample_weight).copy()
self.random_state = check_random_state(self.random_state)
# skipping all checks
assert self.update_on in ['all', 'same', 'other', 'random']
y_pred = numpy.zeros(len(y), dtype=float)
self.classifiers = []
self.learning_rates = []
self.loss_values = []
self.loss = copy.copy(self.loss)
self.loss.fit(X, y, sample_weight=sample_weight)
iter_X = shuffler.generate(0.)
prev_smearing = 1
for iteration in range(self.n_estimators):
if iteration % self.recount_step == 0:
if prev_smearing > 0:
iter_smearing = interpolate(self.smearing, iteration, self.n_estimators)
prev_smearing = iter_smearing
iter_X = shuffler.generate(iter_smearing)
iter_X, = check_arrays(iter_X, dtype=DTYPE, sparse_format="dense", check_ccontiguous=True)
y_pred = numpy.zeros(len(y))
y_pred += sum(cl.predict(X) * rate for rate, cl in zip(self.learning_rates, self.classifiers))
self.loss_values.append(self.loss(y, y_pred, sample_weight=sample_weight))
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter=self.splitter,
max_depth=interpolate(self.max_depth, iteration, self.n_estimators),
min_samples_split=self.min_samples_split,
min_samples_leaf=interpolate(self.min_samples_leaf, iteration, self.n_estimators, use_log=True),
max_features=self.max_features,
random_state=self.random_state)
sample_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
loss_weight = sample_weight if self.weights_in_loss else numpy.ones(len(sample_weight))
tree_weight = sample_weight if not self.weights_in_loss else numpy.ones(len(sample_weight))
residual = self.loss.negative_gradient(y, y_pred, sample_weight=loss_weight)
tree.fit(numpy.array(iter_X)[sample_mask, :],
residual[sample_mask],
sample_weight=tree_weight[sample_mask], check_input=False)
# update tree leaves
if self.update_tree:
if self.update_on == 'all':
update_mask = numpy.ones(len(sample_mask), dtype=bool)
elif self.update_on == 'same':
update_mask = sample_mask
elif self.update_on == 'other':
update_mask = ~sample_mask
else: # random
update_mask = _random_sample_mask(n_samples, n_inbag, self.random_state)
self.loss.update_terminal_regions(tree.tree_, X=iter_X, y=y, residual=residual, pred=y_pred,
sample_mask=update_mask, sample_weight=sample_weight)
iter_learning_rate = interpolate(self.learning_rate, iteration, self.n_estimators, use_log=True)
y_pred += iter_learning_rate * tree.predict(X)
self.classifiers.append(tree)
self.learning_rates.append(iter_learning_rate)
return self
def decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, estimator in zip(self.learning_rates, self.classifiers):
result += rate * estimator.predict(X)
return result
def staged_decision_function(self, X):
X = array2d(X, dtype=DTYPE)
result = numpy.zeros(len(X))
for rate, classifier in zip(self.learning_rates, self.classifiers):
result += rate * classifier.predict(X)
yield result
@staticmethod
def _score_to_proba(score):
result = numpy.zeros([len(score), 2], dtype=float)
result[:, 1] = expit(score / 100.)
result[:, 0] = 1. - result[:, 1]
return result
def _proba_to_score(self, proba):
# for init_estimator
return numpy.clip(logit(proba[:, 1]), -5., 5.)
def predict(self, X):
return numpy.argmax(self.predict_proba(X), axis=1)
def predict_proba(self, X):
return self._score_to_proba(self.decision_function(X))
def staged_predict_proba(self, X):
for score in self.staged_decision_function(X):
yield self._score_to_proba(score)
def test_gradient_boosting(size=100, n_features=10):
trainX, trainY = generate_sample(size, n_features)
testX, testY = generate_sample(size, n_features)
for loss in [AdaLossFunction()]:
for update in ['all', 'same', 'other', 'random']:
gb = GradientBoosting(loss=loss, update_on=update, smearing=[0.1, -0.1])
score = gb.fit(trainX, trainY).score(testX, testY)
print(update, score)
test_gradient_boosting()
#endregion
#region Reweighters
def normalize_weight(y, weights, sig_weight=1., pow_sig=1., pow_bg=1.):
result = numpy.copy(weights)
assert numpy.all((y == 0) | (y == 1)), 'Supports only two classes'
result[y == 1] **= pow_sig
result[y == 0] **= pow_bg
result[y == 1] /= numpy.mean(result[y == 1]) / sig_weight
result[y == 0] /= numpy.mean(result[y == 0])
return result
class ReweightingGB(GradientBoosting):
def __init__(self, loss,
sig_weight=1., pow_sig=1., pow_bg=1.,
n_estimators=10, learning_rate=1., max_depth=None, min_samples_leaf=5, min_samples_split=2,
max_features='auto', criterion='mse',
subsample=1., splitter='best', weights_in_loss=True, update_tree=True,
update_on='all', smearing=0.01,
init_estimator=None, init_smearing=0.05, recount_step=1000, random_state=None):
GradientBoosting.__init__(self, loss=loss, n_estimators=n_estimators, learning_rate=learning_rate,
max_depth=max_depth, min_samples_leaf=min_samples_leaf,
min_samples_split=min_samples_split, max_features=max_features, criterion=criterion,
subsample=subsample, splitter=splitter, weights_in_loss=weights_in_loss,
update_on=update_on, update_tree=update_tree, random_state=random_state,
recount_step=recount_step,
smearing=smearing)
# Everything should be set via set_params
self.sig_weight = sig_weight
self.pow_bg = pow_bg
self.pow_sig = pow_sig
def fit(self, X, y, sample_weight=None):
sample_weight = normalize_weight(y, sample_weight, sig_weight=self.sig_weight, pow_sig=self.pow_sig,
pow_bg=self.pow_bg)
return GradientBoosting.fit(self, X, y, sample_weight=sample_weight)
base_gb = ReweightingGB(loss=AdaLossFunction())
base_gb.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=400,
smearing=0.01, max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_short = ReweightingGB(loss=AdaLossFunction())
base_gb_short.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=150, n_estimators=500,
smearing=0.0, max_features=16, update_tree=True, max_depth=14, subsample=0.4,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_no_shuffle = ReweightingGB(loss=AdaLossFunction())
base_gb_no_shuffle.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=250,
smearing=0., max_features=13, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
base_gb_test = ReweightingGB(loss=AdaLossFunction())
base_gb_test.set_params(loss__signal_curvature=0.7, learning_rate=0.03, min_samples_leaf=125, n_estimators=1,
smearing=0.01, max_features=15, update_tree=True, max_depth=16, subsample=0.5,
sig_weight=0.1, weights_in_loss=False, update_on='all')
#endregion
"""
import gradient_boosting as gb
data, y, w = gb.get_higgs_data()
voter = gb.base_gb
voter.set_params(n_estimators=10)
voter.fit(gb.enhance_data(data), y, w)
"""
|
|
import urllib2
import requests
from bs4 import BeautifulSoup
import time
import random
import pandas as pd
class Scrape(object):
SYMBOL_FILES_PATH = '../../symbols/'
QUANDL_INDICES = 'https://s3.amazonaws.com/quandl-static-content/Ticker+CSV%27s/Indicies/'
QUANDL_FUTURES = 'https://s3.amazonaws.com/quandl-static-content/Ticker+CSV%27s/Futures/'
QUANDL_COMMODITIES = 'https://s3.amazonaws.com/quandl-static-content/Ticker+CSV%27s/'
QUANDL_CBOE = 'http://www.cboe.com/publish/ScheduledTask/MktData/datahouse/'
def scrape_page(self, url):
"""
:param url: URL to scrape using beautiful soup
:return: a list of JSON objects as a list
"""
r = requests.get(url)
soup = BeautifulSoup(r.content)
return soup
def scrape_remote_file(self, file_url, local_file_path, local_file_name):
remote_file = urllib2.urlopen(file_url)
output = open(local_file_path+local_file_name, 'wb')
output.write(remote_file.read())
output.close()
def scrape_remote_file_by_page(self, file_url, local_file_path, local_file_name):
page_num = 1
output = open(local_file_path+local_file_name, 'wb')
try:
while 1:
remote_file = urllib2.urlopen(file_url+str(page_num))
output.write(remote_file.read())
page_num += 1
except:
pass
output.close()
def scrape_finviz_codes_overview(self, url_end=7141, sym_per_page=20, file_path=SYMBOL_FILES_PATH):
"""
:param url_end: Total number of stocks + 20
:param sym_per_page: Total number of symbols per page on the finviz.com screener tab
:param file_path: Path to store the file of symbols
:return:
"""
data_url = "http://www.finviz.com/screener.ashx?v=111&r="
header_url = data_url + "1"
url_start = 1
#url_end = 7141
#sym_per_page = 20
pages = range(url_start, url_end, sym_per_page)
soup = self.scrape_page(header_url)
#header = soup.find_all("tr",{"align" :"center"})
# This gets the header items
# information columns will store: Ticker, Company, Sector, Industry and Country
info_columns = []
# Data columns will store: Ticker, Market Cap, P/E, Price, Change and Volume
data_columns = []
#find total number of stocks
total_stocks = int(str(soup.find_all("td", {"class" : "count-text"})[0].contents[1]).split(' ')[0])
index = range(0, total_stocks)
#info_columns.append(soup.find_all("tr",{"align" : "center"})[0].find_all("td",{"style" : "cursor:pointer;"})[0].text)
info_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[1].text)
info_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[2].text)
info_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[3].text)
info_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[4].text)
info_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[5].text)
#data_columns.append(soup.find_all("tr",{"align" : "center"})[0].find_all("td",{"style" : "cursor:pointer;"})[1].text)
data_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[6].text)
#data_columns.append(soup.find_all("tr",{"align" : "center"})[0].find_all("td",{"style" : "cursor:pointer;"})[7].text)
data_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[8].text)
data_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[9].text)
data_columns.append(soup.find_all("tr", {"align" : "center"})[0].find_all("td", {"style" : "cursor:pointer;"})[10].text)
#print data_columns
# first row returns the No. This can become a temporary index in a dataframe
#Ignore the No.
#print soup.find_all("td",{"align":"right","class":"body-table-nw"})[0].contents[0]
# create dataframes
df_info = pd.DataFrame(index = index, columns = info_columns)
df_data = pd.DataFrame(index = index, columns = data_columns)
sym_info_count = range(0, 100, 5)
sym_data_count = range(0, 115, 6)
for page in pages:
fetch_url = data_url + str(page)
print fetch_url
soup = self.scrape_page(fetch_url)
snum = 0
for i in sym_info_count:
try:
info_index = int(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[snum].contents[0])-1
df_info[info_columns[0]].ix[info_index] = soup.find_all("td", {"align":"left", "class":"body-table-nw"})[i].contents[0].contents[0]
df_info[info_columns[1]].ix[info_index] = soup.find_all("td", {"align":"left", "class":"body-table-nw"})[i+1].contents[0]
df_info[info_columns[2]].ix[info_index] = soup.find_all("td", {"align":"left", "class":"body-table-nw"})[i+2].contents[0]
df_info[info_columns[3]].ix[info_index] = soup.find_all("td", {"align":"left", "class":"body-table-nw"})[i+3].contents[0]
df_info[info_columns[4]].ix[info_index] = soup.find_all("td", {"align":"left", "class":"body-table-nw"})[i+4].contents[0]
except:
print 'Issue with Info count for loop'
pass
snum +=6
for j in sym_data_count:
try:
data_index = int(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j].contents[0])-1
if str(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+1].contents[0]).endswith("B"):
df_data[data_columns[0]].ix[data_index] = float(str(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+1].contents[0]).replace('B', ''))*1000
elif soup.find_all("td",{"align":"right", "class":"body-table-nw"})[j+1].contents[0] == '-':
df_data[data_columns[0]].ix[data_index] = 0
else:
df_data[data_columns[0]].ix[data_index] = str(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+1].contents[0]).replace('M', '')
df_data[data_columns[1]].ix[data_index] = soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+3].contents[0].contents[0]
df_data[data_columns[2]].ix[data_index] = float(str(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+4].contents[0].contents[0]).replace('%', ''))
df_data[data_columns[3]].ix[data_index] = long(str(soup.find_all("td", {"align":"right", "class":"body-table-nw"})[j+5].contents[0]).replace(',', ''))
except:
pass
# wait for a random amount of time between 5 and 60 seconds. Overall agerage wait will be 30 seconds per page.
wait_seconds = random.randint(5, 60)
time.sleep(wait_seconds)
print 'waiting for:' + str(wait_seconds)
df_info.to_csv(file_path +'df_info.csv')
df_data.to_csv(file_path +'df_data.csv')
def scrape_quandl_cboe_data(self, file_path=SYMBOL_FILES_PATH):
pcratio = self.QUANDL_CBOE + 'totalpc.csv'
paratio_file = 'PCRATIO.csv'
skew = self.QUANDL_CBOE + 'Skewdailyprices.csv'
skew_file = 'SKEW.csv'
file_names = {}
file_names[1] = [paratio_file, pcratio]
file_names[2] = [skew_file, skew]
for key in file_names:
self.scrape_remote_file(file_names[key][1], file_path, file_names[key][0])
def scrape_quandl_codes_us(self, file_path=SYMBOL_FILES_PATH):
"""
The code files for this function were taken from: https://www.quandl.com/resources/useful-lists
:param file_path:Path to store the scraped files
:return:
"""
sp500 = self.QUANDL_INDICES + 'SP500.csv'
sp500_file = 'SP500.csv'
djia = self.QUANDL_INDICES + 'dowjonesIA.csv'
djia_file = 'DJIA.csv'
nasd = self.QUANDL_INDICES + 'NASDAQComposite.csv'
nasd_file = 'NASDAQ.csv'
nasd100 = self.QUANDL_INDICES + 'nasdaq100.csv'
nasd100_file = 'NASD100.csv'
nyse = self.QUANDL_INDICES + 'NYSEComposite.csv'
nyse_file = 'NYSE.csv'
nyse100 = self.QUANDL_INDICES + 'nyse100.csv'
nyse100_file = 'NYSE100.csv'
futures = self.QUANDL_FUTURES + 'meta.csv'
futures_file = 'FUTURES.csv'
commodities = self.QUANDL_COMMODITIES + 'commodities.csv'
commodities_file = 'COMMODITIES.csv'
file_names = {}
file_names[1] = [sp500_file, sp500]
file_names[2] = [djia_file, djia]
file_names[3] = [nasd_file, nasd]
file_names[4] = [nasd100_file, nasd100]
file_names[5] = [nyse_file, nyse]
file_names[6] = [nyse100_file, nyse100]
file_names[7] = [futures_file, futures]
file_names[8] = [commodities_file, commodities]
for key in file_names:
self.scrape_remote_file(file_names[key][1], file_path, file_names[key][0])
if __name__ == '__main__':
scrape = Scrape()
scrape.scrape_finviz_codes_overview()
|
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import shutil
import unittest
import tempfile
import filecmp
from cloudify_cli import constants
from cloudify_cli import utils
from cloudify_cli.bootstrap import bootstrap
from cloudify_cli.bootstrap import tasks
from cloudify.exceptions import NonRecoverableError
TEST_DIR = '/tmp/cloudify-cli-unit-tests'
class CliBootstrapUnitTests(unittest.TestCase):
"""Unit tests for functions in bootstrap/bootstrap.py"""
def setUp(self):
os.makedirs(TEST_DIR)
test_workdir = tempfile.mkdtemp(dir=TEST_DIR)
utils.get_cwd = lambda: test_workdir
self.bootstrap_dir = os.path.join(test_workdir, '.cloudify',
'bootstrap')
self.manager_dir = os.path.join(self.bootstrap_dir, 'manager')
os.makedirs(self.bootstrap_dir)
os.chdir(test_workdir)
def tearDown(self):
shutil.rmtree(TEST_DIR)
def test_manager_deployment_dump(self, remove_deployment=True):
manager1_original_dir = os.path.join(
os.path.dirname(__file__),
'resources', 'storage', 'manager1')
if not os.path.exists(self.manager_dir):
shutil.copytree(manager1_original_dir, self.manager_dir)
result = bootstrap.dump_manager_deployment()
if remove_deployment:
shutil.rmtree(self.manager_dir)
self.assertTrue(
bootstrap.read_manager_deployment_dump_if_needed(result))
else:
self.assertFalse(
bootstrap.read_manager_deployment_dump_if_needed(result))
comparison = filecmp.dircmp(manager1_original_dir,
self.manager_dir)
self.assertIn('dir1', comparison.common)
self.assertIn('dir2', comparison.common)
self.assertIn('file1', comparison.common)
self.assertIn('file2', comparison.common)
self.assertEqual(comparison.common_funny, [])
self.assertEqual(comparison.diff_files, [])
self.assertEqual(comparison.funny_files, [])
self.assertEqual(comparison.left_only, [])
self.assertEqual(comparison.right_only, [])
def test_manager_deployment_dump_read_empty(self):
self.assertFalse(
bootstrap.read_manager_deployment_dump_if_needed(''))
self.assertFalse(os.path.exists(self.manager_dir))
def test_manager_deployment_dump_read_already_exists(self):
self.test_manager_deployment_dump(remove_deployment=False)
def test_creation_validation_empty_docker_dict(self):
packages = {
"docker": {}
}
try:
tasks.creation_validation(packages)
except NonRecoverableError as ex:
self.assertIn(
'"docker" must be a non-empty dictionary property under '
'"cloudify_packages"', ex.message)
def test_creation_validation_no_docker(self):
packages = {
}
try:
tasks.creation_validation(packages)
except NonRecoverableError as ex:
self.assertIn(
'"docker" must be a non-empty dictionary property under '
'"cloudify_packages"', ex.message)
def test_ssl_configuration_without_cert_path(self):
configurations = {
constants.SSL_ENABLED_PROPERTY_NAME: True,
constants.SSL_CERTIFICATE_PATH_PROPERTY_NAME: '',
constants.SSL_PRIVATE_KEY_PROPERTY_NAME: ''
}
self.assertRaisesRegexp(
NonRecoverableError,
'SSL is enabled => certificate path must be provided',
tasks._handle_ssl_configuration,
configurations)
def test_ssl_configuration_wrong_cert_path(self):
configurations = {
constants.SSL_ENABLED_PROPERTY_NAME: True,
constants.SSL_CERTIFICATE_PATH_PROPERTY_NAME: 'wrong-path',
constants.SSL_PRIVATE_KEY_PROPERTY_NAME: ''
}
self.assertRaisesRegexp(
NonRecoverableError,
'The certificate path \[wrong-path\] does not exist',
tasks._handle_ssl_configuration,
configurations)
def test_ssl_configuration_without_key_path(self):
this_dir = os.path.dirname(os.path.dirname(__file__))
cert_path = os.path.join(this_dir, 'cert.file')
open(cert_path, 'a+').close()
configurations = {
constants.SSL_ENABLED_PROPERTY_NAME: True,
constants.SSL_CERTIFICATE_PATH_PROPERTY_NAME: cert_path,
constants.SSL_PRIVATE_KEY_PROPERTY_NAME: ''
}
try:
self.assertRaisesRegexp(
NonRecoverableError,
'SSL is enabled => private key path must be provided',
tasks._handle_ssl_configuration,
configurations)
finally:
os.remove(cert_path)
def test_ssl_configuration_wrong_key_path(self):
this_dir = os.path.dirname(os.path.dirname(__file__))
cert_path = os.path.join(this_dir, 'cert.file')
open(cert_path, 'a+').close()
configurations = {
constants.SSL_ENABLED_PROPERTY_NAME: True,
constants.SSL_CERTIFICATE_PATH_PROPERTY_NAME: cert_path,
constants.SSL_PRIVATE_KEY_PROPERTY_NAME: 'wrong-path'
}
try:
self.assertRaisesRegexp(
NonRecoverableError,
'The private key path \[wrong-path\] does not exist',
tasks._handle_ssl_configuration,
configurations)
finally:
os.remove(cert_path)
def test_get_install_agent_pkgs_cmd(self):
agent_packages = {
'agent_tar': 'agent.tar.gz',
'agent_deb': 'agent.deb'
}
agents_pkg_path = '/tmp/work_dir'
agents_dest_dir = '/opt/manager/resources/packages'
command = tasks._get_install_agent_pkgs_cmd(
agent_packages, agents_pkg_path, agents_dest_dir)
self.assertIn('curl -O agent.tar.gz', command)
self.assertIn('curl -O agent.deb', command)
self.assertIn('dpkg -i {1}/*.deb && '
'mkdir -p {0}/agents && '
'mv {1}/agent.tar.gz {0}/agents/agent_tar.tar.gz'.format(
agents_dest_dir, agents_pkg_path), command)
def test_get_install_agent_pkgs_cmd_tars_only(self):
agent_packages = {
'agent_tar1': 'agent1.tar.gz',
'agent_tar2': 'agent2.tar.gz',
}
agents_pkg_path = '/tmp/work_dir'
agents_dest_dir = '/opt/manager/resources/packages'
command = tasks._get_install_agent_pkgs_cmd(
agent_packages, agents_pkg_path, agents_dest_dir)
self.assertIn('curl -O agent1.tar.gz', command)
self.assertIn('curl -O agent2.tar.gz', command)
self.assertIn('mv {1}/agent1.tar.gz {0}/agents/agent_tar1.tar.gz'
.format(agents_dest_dir, agents_pkg_path), command)
self.assertIn('mv {1}/agent2.tar.gz {0}/agents/agent_tar2.tar.gz'
.format(agents_dest_dir, agents_pkg_path), command)
def test_get_install_agent_pkgs_cmd_debs_only(self):
agent_packages = {
'agent_deb1': 'agent1.deb',
'agent_deb2': 'agent2.deb',
}
agents_pkg_path = '/tmp/work_dir'
agents_dest_dir = '/opt/manager/resources/packages'
command = tasks._get_install_agent_pkgs_cmd(
agent_packages, agents_pkg_path, agents_dest_dir)
self.assertIn('curl -O agent1.deb', command)
self.assertIn('curl -O agent2.deb', command)
self.assertIn('dpkg -i {1}/*.deb'.format(
agents_dest_dir, agents_pkg_path), command)
|
|
"""All constants related to the ZHA component."""
from __future__ import annotations
import enum
import logging
import bellows.zigbee.application
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
import zigpy_znp.zigbee.application
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.number import DOMAIN as NUMBER
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_NAMES = "endpoint_names"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NEIGHBORS = "neighbors"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ANALOG_OUTPUT = "analog_output"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
PLATFORMS = (
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
NUMBER,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DEVICE_PAIRING_STATUS = "pairing_status"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
PRESET_SCHEDULE = "schedule"
PRESET_COMPLEX = "complex"
class RadioType(enum.Enum):
"""Possible options for radio type."""
znp = (
"ZNP = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_znp.zigbee.application.ControllerApplication,
)
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"Legacy TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> list[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T):
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
|
|
"""Task TQL Filter"""
# standard library
from enum import Enum
# first-party
from tcex.api.tc.v3.api_endpoints import ApiEndpoints
from tcex.api.tc.v3.filter_abc import FilterABC
from tcex.api.tc.v3.tql.tql import Tql
from tcex.api.tc.v3.tql.tql_operator import TqlOperator
from tcex.api.tc.v3.tql.tql_type import TqlType
class TaskFilter(FilterABC):
"""Filter Object for Tasks"""
@property
def _api_endpoint(self) -> str:
"""Return the API endpoint."""
return ApiEndpoints.TASKS.value
def assigned_to_user_or_group(self, operator: Enum, assigned_to_user_or_group: str) -> None:
"""Filter Assigned To User or Group based on **assignedToUserOrGroup** keyword.
Args:
operator: The operator enum for the filter.
assigned_to_user_or_group: A value of User, Group, or None depending on the assignee.
"""
self._tql.add_filter(
'assignedToUserOrGroup', operator, assigned_to_user_or_group, TqlType.STRING
)
def assignee_name(self, operator: Enum, assignee_name: str) -> None:
"""Filter Assignee Name based on **assigneeName** keyword.
Args:
operator: The operator enum for the filter.
assignee_name: The user or group name assigned to the Task.
"""
self._tql.add_filter('assigneeName', operator, assignee_name, TqlType.STRING)
def automated(self, operator: Enum, automated: bool) -> None:
"""Filter Automated based on **automated** keyword.
Args:
operator: The operator enum for the filter.
automated: A flag indicating whether or not the task is automated.
"""
self._tql.add_filter('automated', operator, automated, TqlType.BOOLEAN)
def case_id(self, operator: Enum, case_id: int) -> None:
"""Filter Case ID based on **caseId** keyword.
Args:
operator: The operator enum for the filter.
case_id: The ID of the case this Task is associated with.
"""
self._tql.add_filter('caseId', operator, case_id, TqlType.INTEGER)
def case_id_as_string(self, operator: Enum, case_id_as_string: str) -> None:
"""Filter CaseID As String based on **caseIdAsString** keyword.
Args:
operator: The operator enum for the filter.
case_id_as_string: The ID of the case as a String.
"""
self._tql.add_filter('caseIdAsString', operator, case_id_as_string, TqlType.STRING)
def case_severity(self, operator: Enum, case_severity: str) -> None:
"""Filter Case Severity based on **caseSeverity** keyword.
Args:
operator: The operator enum for the filter.
case_severity: The severity of the case associated with the task.
"""
self._tql.add_filter('caseSeverity', operator, case_severity, TqlType.STRING)
def completed_by(self, operator: Enum, completed_by: str) -> None:
"""Filter Completed By based on **completedBy** keyword.
Args:
operator: The operator enum for the filter.
completed_by: The account login of the user who completed the task.
"""
self._tql.add_filter('completedBy', operator, completed_by, TqlType.STRING)
def completed_date(self, operator: Enum, completed_date: str) -> None:
"""Filter Completed Date based on **completedDate** keyword.
Args:
operator: The operator enum for the filter.
completed_date: The completion date for the task.
"""
completed_date = self.utils.any_to_datetime(completed_date).strftime('%Y-%m-%dT%H:%M:%S')
self._tql.add_filter('completedDate', operator, completed_date, TqlType.STRING)
def description(self, operator: Enum, description: str) -> None:
"""Filter Description based on **description** keyword.
Args:
operator: The operator enum for the filter.
description: The description of the task.
"""
self._tql.add_filter('description', operator, description, TqlType.STRING)
def due_date(self, operator: Enum, due_date: str) -> None:
"""Filter Due Date based on **dueDate** keyword.
Args:
operator: The operator enum for the filter.
due_date: The due date for the task.
"""
due_date = self.utils.any_to_datetime(due_date).strftime('%Y-%m-%dT%H:%M:%S')
self._tql.add_filter('dueDate', operator, due_date, TqlType.STRING)
@property
def has_artifact(self):
"""Return **ArtifactFilter** for further filtering."""
# first-party
from tcex.api.tc.v3.artifacts.artifact_filter import ArtifactFilter
artifacts = ArtifactFilter(Tql())
self._tql.add_filter('hasArtifact', TqlOperator.EQ, artifacts, TqlType.SUB_QUERY)
return artifacts
@property
def has_case(self):
"""Return **CaseFilter** for further filtering."""
# first-party
from tcex.api.tc.v3.cases.case_filter import CaseFilter
cases = CaseFilter(Tql())
self._tql.add_filter('hasCase', TqlOperator.EQ, cases, TqlType.SUB_QUERY)
return cases
@property
def has_note(self):
"""Return **NoteFilter** for further filtering."""
# first-party
from tcex.api.tc.v3.notes.note_filter import NoteFilter
notes = NoteFilter(Tql())
self._tql.add_filter('hasNote', TqlOperator.EQ, notes, TqlType.SUB_QUERY)
return notes
def id(self, operator: Enum, id: int) -> None: # pylint: disable=redefined-builtin
"""Filter ID based on **id** keyword.
Args:
operator: The operator enum for the filter.
id: The ID of the task.
"""
self._tql.add_filter('id', operator, id, TqlType.INTEGER)
def name(self, operator: Enum, name: str) -> None:
"""Filter Name based on **name** keyword.
Args:
operator: The operator enum for the filter.
name: The name of the task.
"""
self._tql.add_filter('name', operator, name, TqlType.STRING)
def owner(self, operator: Enum, owner: int) -> None:
"""Filter Owner ID based on **owner** keyword.
Args:
operator: The operator enum for the filter.
owner: The Owner ID for the case.
"""
self._tql.add_filter('owner', operator, owner, TqlType.INTEGER)
def owner_name(self, operator: Enum, owner_name: str) -> None:
"""Filter Owner Name based on **ownerName** keyword.
Args:
operator: The operator enum for the filter.
owner_name: The owner name for the case.
"""
self._tql.add_filter('ownerName', operator, owner_name, TqlType.STRING)
def required(self, operator: Enum, required: bool) -> None:
"""Filter Required based on **required** keyword.
Args:
operator: The operator enum for the filter.
required: Flag indicating whether or not the task is required.
"""
self._tql.add_filter('required', operator, required, TqlType.BOOLEAN)
def status(self, operator: Enum, status: str) -> None:
"""Filter Status based on **status** keyword.
Args:
operator: The operator enum for the filter.
status: The status of the task.
"""
self._tql.add_filter('status', operator, status, TqlType.STRING)
def target_id(self, operator: Enum, target_id: int) -> None:
"""Filter Assignee based on **targetId** keyword.
Args:
operator: The operator enum for the filter.
target_id: The assigned user or group ID for the task.
"""
self._tql.add_filter('targetId', operator, target_id, TqlType.INTEGER)
def target_type(self, operator: Enum, target_type: str) -> None:
"""Filter Target Type based on **targetType** keyword.
Args:
operator: The operator enum for the filter.
target_type: The target type for this task (either User or Group).
"""
self._tql.add_filter('targetType', operator, target_type, TqlType.STRING)
def workflow_phase(self, operator: Enum, workflow_phase: int) -> None:
"""Filter Workflow Phase based on **workflowPhase** keyword.
Args:
operator: The operator enum for the filter.
workflow_phase: The workflow phase of the task.
"""
self._tql.add_filter('workflowPhase', operator, workflow_phase, TqlType.INTEGER)
def workflow_step(self, operator: Enum, workflow_step: int) -> None:
"""Filter Workflow Step based on **workflowStep** keyword.
Args:
operator: The operator enum for the filter.
workflow_step: The workflow step of the task.
"""
self._tql.add_filter('workflowStep', operator, workflow_step, TqlType.INTEGER)
def xid(self, operator: Enum, xid: str) -> None:
"""Filter XID based on **xid** keyword.
Args:
operator: The operator enum for the filter.
xid: The XID of the task.
"""
self._tql.add_filter('xid', operator, xid, TqlType.STRING)
|
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import imath
import os
import IECore
import Gaffer
import GafferUI
##########################################################################
# MetadataValueWidgets. These display metadata values, allowing the user
# to edit them.
##########################################################################
class MetadataWidget( GafferUI.Widget ) :
def __init__( self, topLevelWidget, key, target = None, defaultValue = None, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__key = key
self.__target = None
self.__defaultValue = defaultValue
self.setTarget( target )
def setTarget( self, target ) :
assert( isinstance( target, ( Gaffer.Node, Gaffer.Plug, type( None ) ) ) )
self.__target = target
self.setEnabled( self.__target is not None )
if isinstance( self.__target, Gaffer.Node ) :
self.__metadataChangedConnection = Gaffer.Metadata.nodeValueChangedSignal().connect(
Gaffer.WeakMethod( self.__nodeMetadataChanged )
)
elif isinstance( self.__target, Gaffer.Plug ) :
self.__metadataChangedConnection = Gaffer.Metadata.plugValueChangedSignal().connect(
Gaffer.WeakMethod( self.__plugMetadataChanged )
)
else :
self.__metadataChangedConnection = None
self.__update()
def getTarget( self ) :
return self.__target
def setKey( self, key ) :
if key == self.__key :
return
self.__key = key
self.__update()
def getKey( self, key ) :
return self.__key
def defaultValue( self ) :
return self.__defaultValue
## Must be implemented in derived classes to update
# the widget from the value.
def _updateFromValue( self, value ) :
raise NotImplementedError
## Must be called by derived classes to update
# the Metadata value when the widget value changes.
def _updateFromWidget( self, value ) :
if self.__target is None :
return
with Gaffer.UndoScope( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.registerValue( self.__target, self.__key, value )
## May be called by derived classes to deregister the
# metadata value.
def _deregisterValue( self ) :
if self.__target is None :
return
with Gaffer.UndoScope( self.__target.ancestor( Gaffer.ScriptNode ) ) :
Gaffer.Metadata.deregisterValue( self.__target, self.__key )
def __update( self ) :
if self.__target is None :
self._updateFromValue( self.defaultValue() )
return
v = Gaffer.Metadata.value( self.__target, self.__key )
if v is None :
k = self.__fallbackKey( self.__key )
if k is not None :
v = Gaffer.Metadata.value( self.__target, k )
self._updateFromValue( v if v is not None else self.defaultValue() )
def __nodeMetadataChanged( self, nodeTypeId, key, node ) :
if self.__key != key :
return
if node is not None and not node.isSame( self.__target ) :
return
if not self.__target.isInstanceOf( nodeTypeId ) :
return
self.__update()
def __plugMetadataChanged( self, nodeTypeId, plugPath, key, plug ) :
if self.__key != key :
return
if Gaffer.MetadataAlgo.affectedByChange( self.__target, nodeTypeId, plugPath, plug ) :
self.__update()
@staticmethod
def __fallbackKey( k ) :
for oldPrefix, newPrefix in [
( "pathPlugValueWidget:", "path:" ),
( "fileSystemPathPlugValueWidget:", "fileSystemPath:" ),
] :
if k.startswith( newPrefix ) :
return k.replace( newPrefix, oldPrefix )
return None
class BoolMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = False, **kw ) :
self.__boolWidget = GafferUI.BoolWidget()
MetadataWidget.__init__( self, self.__boolWidget, key, target, defaultValue = defaultValue, **kw )
self.__boolWidget.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__stateChanged ), scoped = False
)
def _updateFromValue( self, value ) :
self.__boolWidget.setState( value )
def __stateChanged( self, *unused ) :
self._updateFromWidget( self.__boolWidget.getState() )
class StringMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = "", acceptEmptyString = True, **kw ) :
self.__textWidget = GafferUI.TextWidget()
MetadataWidget.__init__( self, self.__textWidget, key, target, defaultValue = defaultValue, **kw )
self.__acceptEmptyString = acceptEmptyString
self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( str( value ) )
def __editingFinished( self, *unused ) :
text = self.__textWidget.getText()
if text or self.__acceptEmptyString :
self._updateFromWidget( text )
else :
self._deregisterValue()
class MultiLineStringMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = "", role = GafferUI.MultiLineTextWidget.Role.Text, **kw ) :
self.__textWidget = GafferUI.MultiLineTextWidget( role = role )
MetadataWidget.__init__( self, self.__textWidget, key, target, defaultValue = defaultValue, **kw )
self.__textWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def textWidget( self ) :
return self.__textWidget
def _updateFromValue( self, value ) :
self.__textWidget.setText( str( value ) )
def __editingFinished( self, *unused ) :
self._updateFromWidget( self.__textWidget.getText() )
class ColorSwatchMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, defaultValue = imath.Color4f( 0, 0, 0, 0 ), **kw ) :
self.__swatch = GafferUI.ColorSwatch( useDisplayTransform = False )
MetadataWidget.__init__( self, self.__swatch, key, target, defaultValue = defaultValue, **kw )
self.__swatch._qtWidget().setFixedHeight( 18 )
self.__swatch._qtWidget().setMaximumWidth( 40 )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False )
def _updateFromValue( self, value ) :
self.__swatch.setColor( value )
def __buttonRelease( self, swatch, event ) :
if event.button != event.Buttons.Left :
return False
dialogue = GafferUI.ColorChooserDialogue( color = self.__swatch.getColor(), useDisplayTransform = False )
color = dialogue.waitForColor( parentWindow = self.ancestor( GafferUI.Window ) )
if color is not None :
self._updateFromWidget( color )
class MenuMetadataWidget( MetadataWidget ) :
def __init__( self, key, labelsAndValues, target = None, defaultValue = None, **kw ) :
self.__menuButton = GafferUI.MenuButton(
menu = GafferUI.Menu( Gaffer.WeakMethod( self.__menuDefinition ) )
)
self.__labelsAndValues = labelsAndValues
self.__currentValue = None
MetadataWidget.__init__( self, self.__menuButton, key, target, defaultValue = defaultValue, **kw )
def _updateFromValue( self, value ) :
self.__currentValue = value
buttonText = str( value )
for label, value in self.__labelsAndValues :
if value == self.__currentValue :
buttonText = label
break
self.__menuButton.setText( buttonText )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
for label, value in self.__labelsAndValues :
result.append(
"/" + label,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__setValue ), value = value ),
"checkBox" : value == self.__currentValue
}
)
return result
def __setValue( self, unused, value ) :
self._updateFromWidget( value )
class FileSystemPathMetadataWidget( MetadataWidget ) :
def __init__( self, key, target = None, acceptEmptyString = True, defaultValue = "", **kw ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
self.__path = Gaffer.FileSystemPath()
self.__pathWidget = GafferUI.PathWidget( self.__path )
MetadataWidget.__init__( self, self.__row, key, target, defaultValue = defaultValue, **kw )
self.__row.append( self.__pathWidget )
button = GafferUI.Button( image = "pathChooser.png", hasFrame=False )
button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ), scoped = False )
self.__row.append( button )
self.__acceptEmptyString = acceptEmptyString
self.__pathWidget.editingFinishedSignal().connect(
Gaffer.WeakMethod( self.__editingFinished ), scoped = False
)
def _updateFromValue( self, value ) :
self.__path.setFromString( str( value ) )
def __editingFinished( self, *unused ) :
text = str( self.__path )
if text or self.__acceptEmptyString :
self._updateFromWidget( text )
else :
self._deregisterValue()
def __buttonClicked( self, widget ) :
path = str( self.__path )
path = path if os.path.exists( path ) else os.path.expanduser( "~" )
dialogue = GafferUI.PathChooserDialogue( Gaffer.FileSystemPath( path ) )
chosenPath = dialogue.waitForPath( parentWindow = self.ancestor( GafferUI.Window ) )
if chosenPath is not None :
self.__path.setFromString( str( chosenPath ) )
self.__editingFinished()
|
|
"""Read records from normal file and compressed file"""
import zlib
import gzip
import re
from hanzo.warctools.archive_detect import is_gzip_file, guess_record_type
def open_record_stream(record_class=None, filename=None, file_handle=None,
mode="rb+", gzip="auto"):
"""Can take a filename or a file_handle. Normally called
indirectly from A record class i.e WarcRecord.open_archive. If the
first parameter is None, will try to guess"""
if file_handle is None:
file_handle = open(filename, mode=mode)
else:
if not filename:
filename = file_handle.name
if record_class == None:
record_class = guess_record_type(file_handle)
if record_class == None:
raise StandardError('Failed to guess compression')
record_parser = record_class.make_parser()
if gzip == 'auto':
if is_gzip_file(file_handle):
gzip = 'record'
#debug('autodetect: record gzip')
else:
# assume uncompressed file
#debug('autodetected: uncompressed file')
gzip = None
if gzip == 'record':
return GzipRecordStream(file_handle, record_parser)
elif gzip == 'file':
return GzipFileStream(file_handle, record_parser)
else:
return RecordStream(file_handle, record_parser)
class RecordStream(object):
"""A readable/writable stream of Archive Records. Can be iterated over
or read_records can give more control, and potentially offset information.
"""
def __init__(self, file_handle, record_parser):
self.fh = file_handle
self.record_parser = record_parser
self._parser = None
def seek(self, offset, pos=0):
"""Same as a seek on a file"""
self.fh.seek(offset, pos)
def read_records(self, limit=1, offsets=True):
"""Yield a tuple of (offset, record, errors) where
Offset is either a number or None.
Record is an object and errors is an empty list
or record is none and errors is a list"""
nrecords = 0
while nrecords < limit or limit is None:
offset, record, errors = self._read_record(offsets)
nrecords += 1
yield (offset, record, errors)
if not record:
break
def __iter__(self):
while True:
_, record, errors = self._read_record(offsets=False)
if record:
yield record
elif errors:
error_str = ",".join(str(error) for error in errors)
raise StandardError("Errors while decoding %s" % error_str)
else:
break
def _read_record(self, offsets):
"""overridden by sub-classes to read individual records"""
offset = self.fh.tell() if offsets else None
record, errors, offset = self.record_parser.parse(self.fh, offset)
return offset, record, errors
def write(self, record):
"""Writes an archive record to the stream"""
record.write_to(self)
def close(self):
"""Close the underlying file handle."""
self.fh.close()
class GzipRecordStream(RecordStream):
"""A stream to read/write concatted file made up of gzipped
archive records"""
def __init__(self, file_handle, record_parser):
RecordStream.__init__(self, file_handle, record_parser)
self.gz = None
def _read_record(self, offsets):
errors = []
if self.gz is not None:
# we have an open record, so try for a record at the end
# at best will read trailing newlines at end of last record
record, r_errors, _offset = \
self.record_parser.parse(self.gz, offset=None)
if record:
record.error('multiple warc records in gzip record file')
return None, record, errors
self.gz.close()
errors.extend(r_errors)
offset = self.fh.tell() if offsets else None
self.gz = GzipRecordFile(self.fh)
record, r_errors, _offset = \
self.record_parser.parse(self.gz, offset=None)
errors.extend(r_errors)
return offset, record, errors
class GzipFileStream(RecordStream):
"""A stream to read/write gzipped file made up of all archive records"""
def __init__(self, file_handle, record):
RecordStream.__init__(self, gzip.GzipFile(fileobj=file_handle), record)
def _read_record(self, offsets):
# no real offsets in a gzipped file (no seperate records)
return RecordStream._read_record(self, False)
### record-gzip handler, based on zlib
### implements readline() access over a a single
### gzip-record. must be re-created to read another record
CHUNK_SIZE = 1024 # the size to read in, make this bigger things go faster.
line_rx = re.compile('^(?P<line>^[^\r\n]*(?:\r\n|\r(?!\n)|\n))(?P<tail>.*)$',
re.DOTALL)
class GzipRecordFile(object):
"""A file like class providing 'readline' over catted gzip'd records"""
def __init__(self, fh):
self.fh = fh
self.buffer = ""
self.z = zlib.decompressobj(16+zlib.MAX_WBITS)
self.done = False
def _getline(self):
if self.buffer:
#a,nl,b
match = line_rx.match(self.buffer)
#print match
# print 'split:', split[0],split[1], len(split[2])
if match:
output = match.group('line')
self.buffer = ""+match.group('tail')
return output
elif self.done:
output = self.buffer
self.buffer = ""
return output
def readline(self):
while True:
output = self._getline()
if output:
return output
if self.done:
return ""
#print 'read chunk at', self.fh.tell(), self.done
chunk = self.fh.read(CHUNK_SIZE)
out = self.z.decompress(chunk)
# if we hit a \r on reading a chunk boundary, read a little more
# in case there is a following \n
while out.endswith('\r') and not self.z.unused_data:
chunk = self.fh.read(CHUNK_SIZE)
if not chunk:
break
tail = self.z.decompress(chunk)
if tail:
out+=tail
break
if out:
self.buffer += out
if self.z.unused_data:
#print 'unused', len(self.z.unused_data)
self.fh.seek(-len(self. z.unused_data), 1)
self.done = True
continue
if not chunk:
self.done = True
continue
def close(self):
if self.z:
self.z.flush()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dataproc.v1beta2 ClusterController API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import grpc
from google.cloud.dataproc_v1beta2.gapic import cluster_controller_client_config
from google.cloud.dataproc_v1beta2.gapic import enums
from google.cloud.dataproc_v1beta2.gapic.transports import cluster_controller_grpc_transport
from google.cloud.dataproc_v1beta2.proto import clusters_pb2
from google.cloud.dataproc_v1beta2.proto import clusters_pb2_grpc
from google.cloud.dataproc_v1beta2.proto import operations_pb2 as proto_operations_pb2
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
'google-cloud-dataproc', ).version
class ClusterControllerClient(object):
"""
The ClusterControllerService provides methods to manage clusters
of Compute Engine instances.
"""
SERVICE_ADDRESS = 'dataproc.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.dataproc.v1beta2.ClusterController'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ClusterControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(self,
transport=None,
channel=None,
credentials=None,
client_config=cluster_controller_client_config.config,
client_info=None):
"""Constructor.
Args:
transport (Union[~.ClusterControllerGrpcTransport,
Callable[[~.Credentials, type], ~.ClusterControllerGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config:
warnings.warn('The `client_config` argument is deprecated.',
PendingDeprecationWarning)
if channel:
warnings.warn(
'The `channel` argument is deprecated; use '
'`transport` instead.', PendingDeprecationWarning)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cluster_controller_grpc_transport.
ClusterControllerGrpcTransport,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.')
self.transport = transport
else:
self.transport = cluster_controller_grpc_transport.ClusterControllerGrpcTransport(
address=self.SERVICE_ADDRESS,
channel=channel,
credentials=credentials,
)
if client_info is None:
client_info = (
google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO)
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME], )
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_cluster(self,
project_id,
region,
cluster,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Creates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # TODO: Initialize ``cluster``:
>>> cluster = {}
>>>
>>> response = client.create_cluster(project_id, region, cluster)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The cluster to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``CreateClusterRequest`` requests with the same
id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the backend
is returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`_.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'create_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_cluster,
default_retry=self._method_configs['CreateCluster'].retry,
default_timeout=self._method_configs['CreateCluster'].
timeout,
client_info=self._client_info,
)
request = clusters_pb2.CreateClusterRequest(
project_id=project_id,
region=region,
cluster=cluster,
request_id=request_id,
)
operation = self._inner_api_calls['create_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def update_cluster(self,
project_id,
region,
cluster_name,
cluster,
update_mask,
graceful_decommission_timeout=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Updates a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # TODO: Initialize ``cluster_name``:
>>> cluster_name = ''
>>>
>>> # TODO: Initialize ``cluster``:
>>> cluster = {}
>>>
>>> # TODO: Initialize ``update_mask``:
>>> update_mask = {}
>>>
>>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project the
cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster (Union[dict, ~google.cloud.dataproc_v1beta2.types.Cluster]): Required. The changes to the cluster.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Cluster`
update_mask (Union[dict, ~google.cloud.dataproc_v1beta2.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of
the field to update. For example, to change the number of workers
in a cluster to 5, the ``update_mask`` parameter would be
specified as ``config.worker_config.num_instances``,
and the ``PATCH`` request body would specify the new value, as follows:
::
{
\"config\":{
\"workerConfig\":{
\"numInstances\":\"5\"
}
}
}
Similarly, to change the number of preemptible workers in a cluster to 5, the
``update_mask`` parameter would be ``config.secondary_worker_config.num_instances``,
and the ``PATCH`` request body would be set as follows:
::
{
\"config\":{
\"secondaryWorkerConfig\":{
\"numInstances\":\"5\"
}
}
}
.. note::
Currently, only the following fields can be updated:
* ``labels``: Update labels
* ``config.worker_config.num_instances``: Resize primary
worker group
* ``config.secondary_worker_config.num_instances``: Resize
secondary worker group
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.FieldMask`
graceful_decommission_timeout (Union[dict, ~google.cloud.dataproc_v1beta2.types.Duration]): Optional. Timeout for graceful YARN decomissioning. Graceful
decommissioning allows removing nodes from the cluster without
interrupting jobs in progress. Timeout specifies how long to wait for jobs
in progress to finish before forcefully removing nodes (and potentially
interrupting jobs). Default timeout is 0 (for forceful decommission), and
the maximum allowed timeout is 1 day.
Only supported on Dataproc image versions 1.2 and higher.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dataproc_v1beta2.types.Duration`
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``UpdateClusterRequest`` requests with the same
id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the
backend is returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`_.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'update_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_cluster,
default_retry=self._method_configs['UpdateCluster'].retry,
default_timeout=self._method_configs['UpdateCluster'].
timeout,
client_info=self._client_info,
)
request = clusters_pb2.UpdateClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster=cluster,
update_mask=update_mask,
graceful_decommission_timeout=graceful_decommission_timeout,
request_id=request_id,
)
operation = self._inner_api_calls['update_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
clusters_pb2.Cluster,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def delete_cluster(self,
project_id,
region,
cluster_name,
cluster_uuid=None,
request_id=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Deletes a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # TODO: Initialize ``cluster_name``:
>>> cluster_name = ''
>>>
>>> response = client.delete_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
cluster_uuid (str): Optional. Specifying the ``cluster_uuid`` means the RPC should fail
(with error NOT_FOUND) if cluster with specified UUID does not exist.
request_id (str): Optional. A unique id used to identify the request. If the server
receives two ``DeleteClusterRequest`` requests with the same
id, then the second request will be ignored and the
first ``google.longrunning.Operation`` created and stored in the
backend is returned.
It is recommended to always set this value to a
`UUID <https://en.wikipedia.org/wiki/Universally_unique_identifier>`_.
The id must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40 characters.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'delete_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_cluster,
default_retry=self._method_configs['DeleteCluster'].retry,
default_timeout=self._method_configs['DeleteCluster'].
timeout,
client_info=self._client_info,
)
request = clusters_pb2.DeleteClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
cluster_uuid=cluster_uuid,
request_id=request_id,
)
operation = self._inner_api_calls['delete_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.ClusterOperationMetadata,
)
def get_cluster(self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # TODO: Initialize ``cluster_name``:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'get_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs['GetCluster'].retry,
default_timeout=self._method_configs['GetCluster'].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
)
return self._inner_api_calls['get_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
def list_clusters(self,
project_id,
region,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Lists all regions/{region}/clusters in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_clusters(project_id, region):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_clusters(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
filter_ (str): Optional. A filter constraining the clusters to list. Filters are
case-sensitive and have the following syntax:
field = value [AND [field = value]] ...
where **field** is one of ``status.state``, ``clusterName``, or ``labels.[KEY]``,
and ``[KEY]`` is a label key. **value** can be ``*`` to match all values.
``status.state`` can be one of the following: ``ACTIVE``, ``INACTIVE``,
``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE``
contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. ``INACTIVE``
contains the ``DELETING`` and ``ERROR`` states.
``clusterName`` is the name of the cluster provided at creation time.
Only the logical ``AND`` operator is supported; space-separated items are
treated as having an implicit ``AND`` operator.
Example filter:
status.state = ACTIVE AND clusterName = mycluster
AND labels.env = staging AND labels.starred = *
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_clusters' not in self._inner_api_calls:
self._inner_api_calls[
'list_clusters'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_clusters,
default_retry=self._method_configs['ListClusters'].retry,
default_timeout=self._method_configs['ListClusters'].
timeout,
client_info=self._client_info,
)
request = clusters_pb2.ListClustersRequest(
project_id=project_id,
region=region,
filter=filter_,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_clusters'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='clusters',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def diagnose_cluster(self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Gets cluster diagnostic information.
After the operation completes, the Operation.response field
contains ``DiagnoseClusterOutputLocation``.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize ``project_id``:
>>> project_id = ''
>>>
>>> # TODO: Initialize ``region``:
>>> region = ''
>>>
>>> # TODO: Initialize ``cluster_name``:
>>> cluster_name = ''
>>>
>>> response = client.diagnose_cluster(project_id, region, cluster_name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'diagnose_cluster' not in self._inner_api_calls:
self._inner_api_calls[
'diagnose_cluster'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.diagnose_cluster,
default_retry=self._method_configs['DiagnoseCluster'].
retry,
default_timeout=self._method_configs['DiagnoseCluster'].
timeout,
client_info=self._client_info,
)
request = clusters_pb2.DiagnoseClusterRequest(
project_id=project_id,
region=region,
cluster_name=cluster_name,
)
operation = self._inner_api_calls['diagnose_cluster'](
request, retry=retry, timeout=timeout, metadata=metadata)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=clusters_pb2.DiagnoseClusterResults,
)
|
|
# lint as python3
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=superfluous-parens
import functools
from absl import logging
import autograd
import autograd.numpy as np
from neural_structural_optimization import models
from neural_structural_optimization import topo_physics
import scipy.optimize
import tensorflow as tf
import xarray
def optimizer_result_dataset(losses, frames, save_intermediate_designs=False):
# The best design will often but not always be the final one.
best_design = np.nanargmin(losses)
logging.info(f'Final loss: {losses[best_design]}')
if save_intermediate_designs:
ds = xarray.Dataset({
'loss': (('step',), losses),
'design': (('step', 'y', 'x'), frames),
}, coords={'step': np.arange(len(losses))})
else:
ds = xarray.Dataset({
'loss': (('step',), losses),
'design': (('y', 'x'), frames[best_design]),
}, coords={'step': np.arange(len(losses))})
return ds
def train_tf_optimizer(
model, max_iterations, optimizer, save_intermediate_designs=True,
):
loss = 0
model(None) # build model, if not built
tvars = model.trainable_variables
losses = []
frames = []
for i in range(max_iterations + 1):
with tf.GradientTape() as t:
t.watch(tvars)
logits = model(None)
loss = model.loss(logits)
losses.append(loss.numpy().item())
frames.append(logits.numpy())
if i % (max_iterations // 10) == 0:
logging.info(f'step {i}, loss {losses[-1]:.2f}')
if i < max_iterations:
grads = t.gradient(loss, tvars)
optimizer.apply_gradients(zip(grads, tvars))
designs = [model.env.render(x, volume_contraint=True) for x in frames]
return optimizer_result_dataset(np.array(losses), np.array(designs),
save_intermediate_designs)
train_adam = functools.partial(
train_tf_optimizer, optimizer=tf.keras.optimizers.Adam(1e-2))
def _set_variables(variables, x):
shapes = [v.shape.as_list() for v in variables]
values = tf.split(x, [np.prod(s) for s in shapes])
for var, value in zip(variables, values):
var.assign(tf.reshape(tf.cast(value, var.dtype), var.shape))
def _get_variables(variables):
return np.concatenate([
v.numpy().ravel() if not isinstance(v, np.ndarray) else v.ravel()
for v in variables])
def train_lbfgs(
model, max_iterations, save_intermediate_designs=True, init_model=None,
**kwargs
):
model(None) # build model, if not built
losses = []
frames = []
if init_model is not None:
if not isinstance(model, models.PixelModel):
raise TypeError('can only use init_model for initializing a PixelModel')
model.z.assign(tf.cast(init_model(None), model.z.dtype))
tvars = model.trainable_variables
def value_and_grad(x):
_set_variables(tvars, x)
with tf.GradientTape() as t:
t.watch(tvars)
logits = model(None)
loss = model.loss(logits)
grads = t.gradient(loss, tvars)
frames.append(logits.numpy().copy())
losses.append(loss.numpy().copy())
return float(loss.numpy()), _get_variables(grads).astype(np.float64)
x0 = _get_variables(tvars).astype(np.float64)
# rely upon the step limit instead of error tolerance for finishing.
_, _, info = scipy.optimize.fmin_l_bfgs_b(
value_and_grad, x0, maxfun=max_iterations, factr=1, pgtol=1e-14, **kwargs
)
logging.info(info)
designs = [model.env.render(x, volume_contraint=True) for x in frames]
return optimizer_result_dataset(
np.array(losses), np.array(designs), save_intermediate_designs)
def constrained_logits(init_model):
"""Produce matching initial conditions with volume constraints applied."""
logits = init_model(None).numpy().astype(np.float64).squeeze(axis=0)
return topo_physics.physical_density(
logits, init_model.env.args, volume_contraint=True, cone_filter=False)
def method_of_moving_asymptotes(
model, max_iterations, save_intermediate_designs=True, init_model=None,
):
import nlopt # pylint: disable=g-import-not-at-top
if not isinstance(model, models.PixelModel):
raise ValueError('MMA only defined for pixel models')
env = model.env
if init_model is None:
x0 = _get_variables(model.trainable_variables).astype(np.float64)
else:
x0 = constrained_logits(init_model).ravel()
def objective(x):
return env.objective(x, volume_contraint=False)
def constraint(x):
return env.constraint(x)
def wrap_autograd_func(func, losses=None, frames=None):
def wrapper(x, grad):
if grad.size > 0:
value, grad[:] = autograd.value_and_grad(func)(x)
else:
value = func(x)
if losses is not None:
losses.append(value)
if frames is not None:
frames.append(env.reshape(x).copy())
return value
return wrapper
losses = []
frames = []
opt = nlopt.opt(nlopt.LD_MMA, x0.size)
opt.set_lower_bounds(0.0)
opt.set_upper_bounds(1.0)
opt.set_min_objective(wrap_autograd_func(objective, losses, frames))
opt.add_inequality_constraint(wrap_autograd_func(constraint), 1e-8)
opt.set_maxeval(max_iterations + 1)
opt.optimize(x0)
designs = [env.render(x, volume_contraint=False) for x in frames]
return optimizer_result_dataset(np.array(losses), np.array(designs),
save_intermediate_designs)
def optimality_criteria(
model, max_iterations, save_intermediate_designs=True, init_model=None,
):
if not isinstance(model, models.PixelModel):
raise ValueError('optimality criteria only defined for pixel models')
env = model.env
if init_model is None:
x = _get_variables(model.trainable_variables).astype(np.float64)
else:
x = constrained_logits(init_model).ravel()
# start with the first frame but not its loss, since optimality_criteria_step
# returns the current loss and the *next* design.
losses = []
frames = [x]
for _ in range(max_iterations):
c, x = topo_physics.optimality_criteria_step(x, env.ke, env.args)
losses.append(c)
if np.isnan(c):
# no point in continuing to optimize
break
frames.append(x)
losses.append(env.objective(x, volume_contraint=False))
designs = [env.render(x, volume_contraint=False) for x in frames]
return optimizer_result_dataset(np.array(losses), np.array(designs),
save_intermediate_designs)
def train_batch(model_list, flag_values, train_func=train_adam):
batch_hist = []
for batch_ix in range(flag_values.trials):
logging.info(f'Starting trial {batch_ix}')
history = train_func(model_list[batch_ix], flag_values)
batch_hist.append(history)
batch_hist = xarray.concat(batch_hist, dim='batch')
return batch_hist
|
|
# -:- encoding: utf8 -:-
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from io import BytesIO
from mapproxy.compat.image import Image, ImageDraw
from mapproxy.image import ImageSource, ReadBufWrapper, is_single_color_image
from mapproxy.image import peek_image_format
from mapproxy.image.merge import merge_images
from mapproxy.image import _make_transparent as make_transparent, SubImageSource, img_has_transparency, quantize
from mapproxy.image.opts import ImageOptions
from mapproxy.image.tile import TileMerger, TileSplitter
from mapproxy.image.transform import ImageTransformer
from mapproxy.test.image import is_png, is_jpeg, is_tiff, create_tmp_image_file, check_format, create_debug_img, create_image
from mapproxy.srs import SRS
from nose.tools import eq_
from mapproxy.test.image import assert_img_colors_eq
from nose.plugins.skip import SkipTest
PNG_FORMAT = ImageOptions(format='image/png')
JPEG_FORMAT = ImageOptions(format='image/jpeg')
TIFF_FORMAT = ImageOptions(format='image/tiff')
class TestImageSource(object):
def setup(self):
self.tmp_filename = create_tmp_image_file((100, 100))
def teardown(self):
os.remove(self.tmp_filename)
def test_from_filename(self):
ir = ImageSource(self.tmp_filename, PNG_FORMAT)
assert is_png(ir.as_buffer())
assert ir.as_image().size == (100, 100)
def test_from_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
ir = ImageSource(tmp_file, 'png')
assert ir.as_buffer() == tmp_file
assert ir.as_image().size == (100, 100)
def test_from_image(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, (100, 100), PNG_FORMAT)
assert ir.as_image() == img
assert is_png(ir.as_buffer())
def test_from_non_seekable_file(self):
with open(self.tmp_filename, 'rb') as tmp_file:
data = tmp_file.read()
class FileLikeDummy(object):
# "file" without seek, like urlopen response
def read(self):
return data
ir = ImageSource(FileLikeDummy(), 'png')
assert ir.as_buffer(seekable=True).read() == data
assert ir.as_image().size == (100, 100)
assert ir.as_buffer().read() == data
def test_output_formats(self):
img = Image.new('RGB', (100, 100))
for format in ['png', 'gif', 'tiff', 'jpeg', 'GeoTIFF', 'bmp']:
ir = ImageSource(img, (100, 100), image_opts=ImageOptions(format=format))
yield check_format, ir.as_buffer(), format
def test_converted_output(self):
ir = ImageSource(self.tmp_filename, (100, 100), PNG_FORMAT)
assert is_png(ir.as_buffer())
assert is_jpeg(ir.as_buffer(JPEG_FORMAT))
assert is_jpeg(ir.as_buffer())
assert is_tiff(ir.as_buffer(TIFF_FORMAT))
assert is_tiff(ir.as_buffer())
def test_output_formats_png8(self):
img = Image.new('RGBA', (100, 100))
ir = ImageSource(img, image_opts=PNG_FORMAT)
img = Image.open(ir.as_buffer(ImageOptions(colors=256, transparent=True, format='image/png')))
assert img.mode == 'P'
assert img.getpixel((0, 0)) == 255
def test_output_formats_png24(self):
img = Image.new('RGBA', (100, 100))
image_opts = PNG_FORMAT.copy()
image_opts.colors = 0 # TODO image_opts
ir = ImageSource(img, image_opts=image_opts)
img = Image.open(ir.as_buffer())
eq_(img.mode, 'RGBA')
assert img.getpixel((0, 0)) == (0, 0, 0, 0)
class TestSubImageSource(object):
def test_full(self):
sub_img = create_image((100, 100), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_larger(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(0, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_negative_offset(self):
sub_img = create_image((150, 150), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(-50, 0), image_opts=ImageOptions()).as_image()
eq_(img.getcolors(), [(100*100, (100, 120, 130, 140))])
def test_overlap_right(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(75, 25), image_opts=ImageOptions(transparent=True)).as_image()
eq_(sorted(img.getcolors()), [(25*50, (100, 120, 130, 140)), (100*100-25*50, (255, 255, 255, 0))])
def test_outside(self):
sub_img = create_image((50, 50), color=[100, 120, 130, 140])
img = SubImageSource(sub_img, size=(100, 100), offset=(200, 0), image_opts=ImageOptions(transparent=True)).as_image()
eq_(img.getcolors(), [(100*100, (255, 255, 255, 0))])
class ROnly(object):
def __init__(self):
self.data = [b'Hello World!']
def read(self):
if self.data:
return self.data.pop()
return b''
def __iter__(self):
it = iter(self.data)
self.data = []
return it
class TestReadBufWrapper(object):
def setup(self):
rbuf = ROnly()
self.rbuf_wrapper = ReadBufWrapper(rbuf)
def test_read(self):
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
eq_(self.rbuf_wrapper.read(), b'')
def test_seek_read(self):
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
self.rbuf_wrapper.seek(0)
assert self.rbuf_wrapper.read() == b'Hello World!'
def test_iter(self):
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [])
def test_seek_iter(self):
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
self.rbuf_wrapper.seek(0)
data = list(self.rbuf_wrapper)
eq_(data, [b'Hello World!'])
def test_hasattr(self):
assert hasattr(self.rbuf_wrapper, 'seek')
assert hasattr(self.rbuf_wrapper, 'readline')
class TestMergeAll(object):
def setup(self):
self.cleanup_tiles = []
def test_full_merge(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
def test_one(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(transparent=True)
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.mode, 'RGBA')
def test_missing_tiles(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100))]
self.tiles = [ImageSource(self.cleanup_tiles[0])]
self.tiles.extend([None]*8)
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions()
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(80000, (255, 255, 255)), (10000, (0, 0, 0)), ])
def test_invalid_tile(self):
self.cleanup_tiles = [create_tmp_image_file((100, 100)) for _ in range(9)]
self.tiles = [ImageSource(tile) for tile in self.cleanup_tiles]
invalid_tile = self.tiles[0].source
with open(invalid_tile, 'wb') as tmp:
tmp.write(b'invalid')
m = TileMerger(tile_grid=(3, 3), tile_size=(100, 100))
img_opts = ImageOptions(bgcolor=(200, 0, 50))
result = m.merge(self.tiles, img_opts)
img = result.as_image()
eq_(img.size, (300, 300))
eq_(img.getcolors(), [(10000, (200, 0, 50)), (80000, (0, 0, 0))])
assert not os.path.isfile(invalid_tile)
def test_none_merge(self):
tiles = [None]
m = TileMerger(tile_grid=(1, 1), tile_size=(100, 100))
img_opts = ImageOptions(mode='RGBA', bgcolor=(200, 100, 30, 40))
result = m.merge(tiles, img_opts)
img = result.as_image()
eq_(img.size, (100, 100))
eq_(img.getcolors(), [(100*100, (200, 100, 30, 40))])
def teardown(self):
for tile_fname in self.cleanup_tiles:
if tile_fname and os.path.isfile(tile_fname):
os.remove(tile_fname)
class TestGetCrop(object):
def setup(self):
self.tmp_file = create_tmp_image_file((100, 100), two_colored=True)
self.img = ImageSource(self.tmp_file,
image_opts=ImageOptions(format='image/png'), size=(100, 100))
def teardown(self):
if os.path.exists(self.tmp_file):
os.remove(self.tmp_file)
def test_perfect_match(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (100, 100), bbox, image_opts=None)
assert self.img == result
def test_simple_resize_nearest(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='nearest'))
img = result.as_image()
eq_(img.size, (200, 200))
eq_(len(img.getcolors()), 2)
def test_simple_resize_bilinear(self):
bbox = (-10, -5, 30, 35)
transformer = ImageTransformer(SRS(4326), SRS(4326))
result = transformer.transform(self.img, bbox, (200, 200), bbox,
image_opts=ImageOptions(resampling='bilinear'))
img = result.as_image()
eq_(img.size, (200, 200))
# some shades of grey with bilinear
assert len(img.getcolors()) >= 4
class TestLayerMerge(object):
def test_opacity_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (127, 127, 255))
def test_opacity_merge_mixed_modes(self):
img1 = ImageSource(Image.new('RGBA', (10, 10), (255, 0, 255, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)).convert('P'),
image_opts=ImageOptions(opacity=0.5))
result = merge_images([img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert_img_colors_eq(img, [
(10*10, (127, 127, 255, 255)),
])
def test_paletted_merge(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
# generate RGBA images with a transparent rectangle in the lower right
img1 = ImageSource(Image.new('RGBA', (50, 50), (0, 255, 0, 255))).as_image()
draw = ImageDraw.Draw(img1)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
paletted_img = quantize(img1, alpha=True)
assert img_has_transparency(paletted_img)
assert paletted_img.mode == 'P'
rgba_img = Image.new('RGBA', (50, 50), (255, 0, 0, 255))
draw = ImageDraw.Draw(rgba_img)
draw.rectangle((25, 25, 49, 49), fill=(0, 0, 0, 0))
img1 = ImageSource(paletted_img)
img2 = ImageSource(rgba_img)
# generate base image and merge the others above
img3 = ImageSource(Image.new('RGBA', (50, 50), (0, 0, 255, 255)))
result = merge_images([img3, img1, img2], ImageOptions(transparent=True))
img = result.as_image()
assert img.mode == 'RGBA'
eq_(img.getpixel((49, 49)), (0, 0, 255, 255))
eq_(img.getpixel((0, 0)), (255, 0, 0, 255))
def test_solid_merge(self):
img1 = ImageSource(Image.new('RGB', (10, 10), (255, 0, 255)))
img2 = ImageSource(Image.new('RGB', (10, 10), (0, 255, 255)))
result = merge_images([img1, img2], ImageOptions(transparent=False))
img = result.as_image()
eq_(img.getpixel((0, 0)), (0, 255, 255))
class TestLayerCompositeMerge(object):
def test_composite_merge(self):
# http://stackoverflow.com/questions/3374878
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
img1 = Image.new('RGBA', size=(100, 100), color=(255, 0, 0, 255))
draw = ImageDraw.Draw(img1)
draw.rectangle((33, 0, 66, 100), fill=(255, 0, 0, 128))
draw.rectangle((67, 0, 100, 100), fill=(255, 0, 0, 0))
img1 = ImageSource(img1)
img2 = Image.new('RGBA', size =(100, 100), color=(0, 255, 0, 255))
draw = ImageDraw.Draw(img2)
draw.rectangle((0, 33, 100, 66), fill=(0, 255, 0, 128))
draw.rectangle((0, 67, 100, 100), fill=(0, 255, 0, 0))
img2 = ImageSource(img2)
result = merge_images([img2, img1], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(1089, (0, 255, 0, 255)),
(1089, (255, 255, 255, 0)),
(1122, (0, 255, 0, 128)),
(1122, (128, 126, 0, 255)),
(1122, (255, 0, 0, 128)),
(1156, (170, 84, 0, 191)),
(3300, (255, 0, 0, 255))])
def test_composite_merge_opacity(self):
if not hasattr(Image, 'alpha_composite'):
raise SkipTest()
bg = Image.new('RGBA', size=(100, 100), color=(255, 0, 255, 255))
bg = ImageSource(bg)
fg = Image.new('RGBA', size =(100, 100), color=(0, 0, 0, 0))
draw = ImageDraw.Draw(fg)
draw.rectangle((10, 10, 89, 89), fill=(0, 255, 255, 255))
fg = ImageSource(fg, image_opts=ImageOptions(opacity=0.5))
result = merge_images([bg, fg], ImageOptions(transparent=True))
img = result.as_image()
eq_(img.mode, 'RGBA')
assert_img_colors_eq(img, [
(3600, (255, 0, 255, 255)),
(6400, (128, 127, 255, 255))])
class TestTransform(object):
def setup(self):
self.src_img = ImageSource(create_debug_img((200, 200), transparent=False))
self.src_srs = SRS(31467)
self.dst_size = (100, 150)
self.dst_srs = SRS(4326)
self.dst_bbox = (0.2, 45.1, 8.3, 53.2)
self.src_bbox = self.dst_srs.transform_bbox_to(self.src_srs, self.dst_bbox)
def test_transform(self, mesh_div=4):
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=mesh_div)
result = transformer.transform(self.src_img, self.src_bbox, self.dst_size, self.dst_bbox,
image_opts=ImageOptions(resampling='nearest'))
assert isinstance(result, ImageSource)
assert result.as_image() != self.src_img
assert result.size == (100, 150)
def _test_compare_mesh_div(self):
"""
Create transformations with different div values.
"""
for div in [1, 2, 4, 6, 8, 12, 16]:
transformer = ImageTransformer(self.src_srs, self.dst_srs, mesh_div=div)
result = transformer.transform(self.src_img, self.src_bbox,
self.dst_size, self.dst_bbox)
result.as_image().save('/tmp/transform-%d.png' % (div,))
class TestSingleColorImage(object):
def test_one_point(self):
img = Image.new('RGB', (100, 100), color='#ff0000')
draw = ImageDraw.Draw(img)
draw.point((99, 99))
del draw
assert not is_single_color_image(img)
def test_solid(self):
img = Image.new('RGB', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2))
def test_solid_w_alpha(self):
img = Image.new('RGBA', (100, 100), color='#ff0102')
eq_(is_single_color_image(img), (255, 1, 2, 255))
def test_solid_paletted_image(self):
img = Image.new('P', (100, 100), color=20)
palette = []
for i in range(256):
palette.extend((i, i//2, i%3))
img.putpalette(palette)
eq_(is_single_color_image(img), (20, 10, 2))
class TestMakeTransparent(object):
def _make_test_image(self):
img = Image.new('RGB', (50, 50), (130, 140, 120))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120))
return img
def _make_transp_test_image(self):
img = Image.new('RGBA', (50, 50), (130, 140, 120, 100))
draw = ImageDraw.Draw(img)
draw.rectangle((10, 10, 39, 39), fill=(130, 150, 120, 120))
return img
def test_result(self):
img = self._make_test_image()
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_with_color_fuzz(self):
img = self._make_test_image()
img = make_transparent(img, (128, 154, 121), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))]
def test_no_match(self):
img = self._make_test_image()
img = make_transparent(img, (130, 160, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
assert colors == [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 255))]
def test_from_paletted(self):
img = self._make_test_image().quantize(256)
img = make_transparent(img, (130, 150, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = img.getcolors()
eq_(colors, [(1600, (130, 140, 120, 255)), (900, (130, 150, 120, 0))])
def test_from_transparent(self):
img = self._make_transp_test_image()
draw = ImageDraw.Draw(img)
draw.rectangle((0, 0, 4, 4), fill=(130, 100, 120, 0))
draw.rectangle((5, 5, 9, 9), fill=(130, 150, 120, 255))
img = make_transparent(img, (130, 150, 120, 120), tolerance=5)
assert img.mode == 'RGBA'
assert img.size == (50, 50)
colors = sorted(img.getcolors(), reverse=True)
eq_(colors, [(1550, (130, 140, 120, 100)), (900, (130, 150, 120, 0)),
(25, (130, 150, 120, 255)), (25, (130, 100, 120, 0))])
class TestTileSplitter(object):
def test_background_larger_crop(self):
img = ImageSource(Image.new('RGB', (356, 266), (130, 140, 120)))
img_opts = ImageOptions('RGB')
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120)), (256*256-10*100, (255, 255, 255))])
def test_background_larger_crop_with_transparent(self):
img = ImageSource(Image.new('RGBA', (356, 266), (130, 140, 120, 255)))
img_opts = ImageOptions('RGBA', transparent=True)
splitter = TileSplitter(img, img_opts)
tile = splitter.get_tile((0, 0), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(colors, [(256*256, (130, 140, 120, 255))])
tile = splitter.get_tile((256, 256), (256, 256))
eq_(tile.size, (256, 256))
colors = tile.as_image().getcolors()
eq_(sorted(colors), [(10*100, (130, 140, 120, 255)), (256*256-10*100, (255, 255, 255, 0))])
class TestHasTransparency(object):
def test_rgb(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGB', (10, 10))
assert not img_has_transparency(img)
img = quantize(img, alpha=False)
assert not img_has_transparency(img)
def test_rbga(self):
if not hasattr(Image, 'FASTOCTREE'):
raise SkipTest()
img = Image.new('RGBA', (10, 10), (100, 200, 50, 255))
img.paste((255, 50, 50, 0), (3, 3, 7, 7))
assert img_has_transparency(img)
img = quantize(img, alpha=True)
assert img_has_transparency(img)
class TestPeekImageFormat(object):
def test_peek(self):
yield self.check, 'png', 'png'
yield self.check, 'tiff', 'tiff'
yield self.check, 'gif', 'gif'
yield self.check, 'jpeg', 'jpeg'
yield self.check, 'bmp', None
def check(self, format, expected_format):
buf = BytesIO()
Image.new('RGB', (100, 100)).save(buf, format)
eq_(peek_image_format(buf), expected_format)
|
|
#!/usr/bin/env python
"""Collects all the information a robot has of its environment.
A map has three different parts: its properties, its elements, and its
layers. Properties define key intrinsics of the map (such as its
boundaries). Elements define the collections of physical objects on
the map (robots, walls, etc.). Layers (meant to be toggled for
visibility) represent perspectives on the map: where the map's
elements are, what probability distributions are associated with a
given element, etc.
"""
from __future__ import division
__author__ = "Sierra Williams and Nick Sweet"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["Nick Sweet", "Nisar Ahmed"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Sierra Williams"
__email__ = "sierra.williams@colorado.edu"
__status__ = "Development"
import logging
import math
import numpy as np
import json
import sys
import os
import matplotlib.pyplot as plt
from shapely.geometry import Point
from cops_and_robots.helpers.config import load_config
from cops_and_robots.map_tools.map_elements import MapObject, MapArea
from cops_and_robots.map_tools.shape_layer import ShapeLayer
from cops_and_robots.map_tools.feasible_layer import FeasibleLayer
from cops_and_robots.map_tools.probability_layer import ProbabilityLayer
from cops_and_robots.map_tools.particle_layer import ParticleLayer
class Map(object):
"""Environment map composed of multiple elements and layers.
.. image:: img/classes_Map.png
Parameters
----------
mapname : str
The name of the map.
bounds : array_like, optional
Map boundaries as [xmin,ymin,xmax,ymax] in [m].
plot_robbers : list or bool
A list of robbers to plot, True for all, False for None.
map_display_type: {'particle', 'probability'}
Defines which layer to use.
combined_only : bool, optional
Whether to show only the combined plot (as opposed to individual plots
for each robber, plus one combined plot). Defaults to `True`.
publish_to_ROS: bool
Whether to publish an image topic of the map to ROS.
"""
# TODO: @Config Change plot_robbers to just be a string
# TODO: @Refactor Seperate map and interface
def __init__(self, map_name='fleming', bounds=[-5, -5, 5, 5],
plot_robbers=True, map_display_type='probability',
combined_only=True, publish_to_ROS=False):
# Define map properties
# <>TODO: Clean this up- add seperate map creation function?
self.map_name = map_name
if self.map_name == 'fleming':
self.bounds = [-4.5, -3.75, 1, .75]
else:
self.bounds = bounds # [x_min,y_min,x_max,y_max] in [m]
self.plot_robbers = plot_robbers
self.outer_bounds = [i * 1.1 for i in self.bounds]
self.origin = [0, 0] # in [m]
# <>TODO: Make display type relative to each robber
self.display_type = map_display_type
self.combined_only = combined_only
# Set up ROS elements if using ROS
self.publish_to_ROS = publish_to_ROS
if publish_to_ROS:
from cv_bridge import CvBridge
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
self.probability_target = 'Roy'
self.bridge = CvBridge()
self.image_pub = rospy.Publisher("python_probability_map", Image,
queue_size=10)
rospy.Subscriber("robot_probability_map", String,
self.change_published_ax)
# Define map elements
self.objects = {} # For dynamic/static map objects (not robbers/cops)
self.areas = {}
self.cops = {}
self.robbers = {}
self.dynamic_elements = []
self.static_elements = []
self.information_elements = []
self.element_dict = {'dynamic': self.dynamic_elements,
'static': self.static_elements,
'information': self.information_elements}
# Define layers
self.shape_layer = ShapeLayer(self.element_dict, bounds=self.bounds)
self.feasible_layer = FeasibleLayer(bounds=self.bounds)
self.particle_layers = {} # One per robber, plus one combined
self.probability_layers = {} # One per robber, plus one combined
# Set up map
if self.map_name == 'fleming':
set_up_fleming(self) # <>TODO: make a generic 'setup map' function
else:
pass
def add_obj(self, map_obj):
self.objects[map_obj.name] = map_obj
self.static_elements.append(map_obj)
self.feasible_layer.define_feasible_regions(self.static_elements)
def rem_obj(self, map_obj):
self.static_elements.remove(map_obj)
self.feasible_layer.define_feasible_regions(self.static_elements)
del self.objects[map_obj.name]
def add_robot(self, map_obj):
# <>TODO: Modify so it can have relations
self.dynamic_elements.append(map_obj)
def rem_robot(self, map_obj):
self.dynamic_elements.remove(map_obj)
def add_area(self, area):
self.areas[area.name] = area
self.static_elements.append(area)
def rem_area(self, area):
self.static_elements.remove(area)
del self.areas[area.name]
def add_cop(self, cop_obj):
self.dynamic_elements.append(cop_obj)
self.cops[cop_obj.name] = cop_obj
def rem_cop(self, cop_obj):
self.dynamic_elements.remove(cop_obj)
del self.cops[cop_obj.name]
def add_robber(self, robber):
# <>TODO: Make generic imaginary robbers
if self.plot_robbers is True:
robber.visible = True
elif self.plot_robbers is False:
robber.visible = False
elif robber.name not in self.plot_robbers:
robber.visible = False
self.dynamic_elements.append(robber)
self.robbers[robber.name] = robber
def rem_robber(self, robber):
robber.patch.remove()
self.dynamic_elements.remove(robber)
try:
if self.fusion_engine is not None:
if self.display_type == 'particle':
self.rem_particle_layer(robber.name)
elif self.display_type == 'probability':
self.rem_probability_layer(robber.name)
except:
# <>TODO: actually catch other exceptions here
logging.debug('No layer to remove.')
del self.robbers[robber.name]
def found_robber(self, robber):
robber.visible = True
robber.color = 'darkorange'
try:
if self.display_type == 'particle':
self.rem_particle_layer(robber.name)
elif self.display_type == 'probability':
self.rem_probability_layer(robber.name)
except:
# <>TODO: actually catch other exceptions here
logging.debug('No layer to remove.')
def add_particle_layer(self, name, filter_):
self.particle_layers[name] = ParticleLayer(filter_)
def rem_particle_layer(self, name):
self.particle_layers[name].remove()
del self.particle_layers[name]
def add_probability_layer(self, name, filter_):
self.probability_layers[name] = ProbabilityLayer(filter_,
fig=self.fig,
bounds=self.bounds)
def rem_probability_layer(self, name):
self.probability_layers[name].remove()
del self.probability_layers[name]
def occupancy_grid(self, **kwargs):
self.setup_og(**kwargs)
self.update()
# Put in file name here
path = os.path.expanduser('~')
file = "%s/Desktop/occupancy_grid"%(path)
plt.axis('off')
plt.title('')
plt.savefig(file)
def setup_og(self, fig=None, fusion_engine=None):
"""Create the initial plot for the animation.
"""
logging.info('Setting up occupancy grid')
if (len(sys.argv) == 0):
res = float(sys.argv[1])
else:
res = 100
# Fix hard code
x=.0725
y=.0557
if fig is None:
if plt.get_fignums():
self.fig = plt.gcf()
else:
self.fig = plt.figure(figsize =(x*res, y*res))
else:
self.fig = fig
self.fusion_engine = fusion_engine
self._setup_axes()
self._setup_layers()
def plot(self, **kwargs):
self.setup_plot(**kwargs)
self.update()
plt.show()
def setup_plot(self, fig=None, fusion_engine=None):
"""Create the initial plot for the animation.
"""
# logging.info('Setting up plot')
if fig is None:
if plt.get_fignums():
self.fig = plt.gcf()
else:
self.fig = plt.figure(figsize=(14, 10))
else:
self.fig = fig
self.fusion_engine = fusion_engine
self._setup_axes()
self._setup_layers()
def _setup_axes(self):
self.axes = {}
if len(self.robbers) == 1:
name = self.robbers.iterkeys().next()
self.axes[name] = self.fig.add_subplot(111)
pos = self.axes[name].get_position()
print pos
pos = [pos.x0, pos.y0 * 1.2, pos.width, pos.height]
print pos
self.axes[name].set_position(pos)
elif self.combined_only:
self.axes['combined'] = self.fig.add_subplot(111)
else:
num_axes = len(self.robbers) + 1
num_rows = int(math.ceil(num_axes / 2))
i = 0
for robber_name in self.robbers:
ax = plt.subplot2grid((num_rows, 4),
(int(math.floor(i / 2)), (i % 2) * 2),
colspan=2
)
self.axes[robber_name] = ax
i += 1
# Add a plot for the combined estimate
if (num_axes % 2) == 0:
ax = plt.subplot2grid((num_rows, 4), (num_rows - 1, 2),
colspan=2)
else:
ax = plt.subplot2grid((num_rows, 4), (num_rows - 1, 1),
colspan=2)
self.axes['combined'] = ax
# Rescale, setup bounds and title
for ax_name, ax in self.axes.iteritems():
ax.axis('scaled')
ax.set_xlim([self.bounds[0], self.bounds[2]])
ax.set_xlabel('x position (m)')
ax.set_ylim([self.bounds[1], self.bounds[3]])
ax.set_ylabel('y position (m)')
if ax_name == 'combined':
t = ax.set_title('Combined perception of all robots')
else:
t = ax.set_title("Map of {}'s perceived location"
.format(ax_name))
try:
if self.fusion_engine.vel_states is not None:
t.set_y(1.2)
except AttributeError:
logging.debug('No vel states available.')
# plt.tight_layout()
def _setup_layers(self):
# Set up basic layers
self.shape_layers = {}
self.feasible_layers = {}
for ax_name, ax in self.axes.iteritems():
self.shape_layers[ax_name] = ShapeLayer(self.element_dict,
bounds=self.bounds,
ax=ax)
# Set up probability/particle layers
if self.fusion_engine is not None:
filter_ = self.fusion_engine.filters[ax_name]
self.probability_layers[ax_name] = \
ProbabilityLayer(filter_, fig=self.fig, ax=ax,
bounds=self.bounds)
def change_published_ax(self, msg):
self.probability_target = msg.data
def update(self, i=0):
# self.shape_layer.update(i=i)
for ax_name, ax in self.axes.iteritems():
try:
self.shape_layers[ax_name].update(i=i)
# Update probability/particle layers
if self.fusion_engine is not None:
if self.display_type == 'particle':
self.particle_layers[ax_name].update(i=i)
elif self.display_type == 'probability':
self.probability_layers[ax_name].update(i=i)
except KeyError:
logging.debug('Robber already removed.')
if self.publish_to_ROS and ax_name == self.probability_target and \
i % 1 == 0:
import cv2
from cv_bridge import CvBridgeError
extent = ax.get_window_extent().transformed(
self.fig.dpi_scale_trans.inverted())
self.fig.savefig(ax_name + '.png',
bbox_inches=extent.expanded(1.1, 1.2))
img = cv2.imread(ax_name + '.png', 1)
try:
self.image_pub.publish(
self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError, e:
print e
def set_up_fleming(map_):
pi = 3.14159
#Get wall information
with open('walls.json') as wall_file:
wallParameters = json.load(wall_file)
position_wall = wallParameters["position"]
size_wall = wallParameters["size"]
name_wall = wallParameters["model_name"]
orientation_wall = wallParameters["orientation"]
# Form Launch String
wall_pos_x = []
wall_pos_y = []
wall_pos_z = []
wall_size_x = []
wall_size_y = []
wall_name = []
wall_orientation_P = []
wall_orientation_Y = []
wall_orientation_R = []
i=0
for key, value in position_wall.iteritems():
wall_pos_x.insert(i, float(value[0]))
wall_pos_y.insert(i, float(value[1]))
wall_pos_z.insert(i, float(value[2]))
i += 1
i=0
for key, value in size_wall.iteritems():
wall_size_x.insert(i, float(value[0]))
wall_size_y.insert(i, float(value[1]))
i += 1
i=0
for key, value in name_wall.iteritems():
wall_name.insert(i, value[0])
i += 1
i=0
for key, value in orientation_wall.iteritems():
wall_orientation_R.insert(i, float(value[0])*180/pi)
wall_orientation_P.insert(i, float(value[1])*180/pi)
wall_orientation_Y.insert(i, float(value[2])*180/pi)
i += 1
field_width = max(wall_size_x)
field_length = max(wall_size_y)
field = MapArea('Field', [field_width, field_length], has_relations=False)
poses_w = np.array([]).reshape(0,3)
sizes_w = np.array([]).reshape(0,2)
# Wont behave as is
for i in range(len(wall_pos_x)):
poses_w = np.vstack([poses_w, [wall_pos_x[i], wall_pos_y[i], wall_orientation_Y[i]]])
sizes_w = np.vstack([sizes_w, [wall_size_x[i], wall_size_y[i]]])
walls = []
for i in range(poses_w.shape[0]):
name = wall_name[i]
pose = poses_w[i, :]
sizes = sizes_w[i,:]
wall = MapObject(name, sizes, pose=pose, color_str='black',
has_relations=False, map_bounds=map_.bounds)
walls.append(wall)
# Set up models
with open('models.json') as model_file:
modelParameters = json.load(model_file)
position = modelParameters["position"]
size = modelParameters["size"]
model_name = modelParameters["model_name"]
orientation = modelParameters["orientation"]
# Form Launch String
model_pos_x = []
model_pos_y = []
model_pos_z = []
model_size_x = []
model_size_y = []
model_names = []
model_orientation_R = []
model_orientation_P = []
model_orientation_Y = []
i=0
for key, value in position.iteritems():
model_pos_x.insert(i, float(value[0]))
model_pos_y.insert(i, float(value[1]))
model_pos_z.insert(i, float(value[2]))
i += 1
i=0
for key, value in size.iteritems():
model_size_x.insert(i, float(value[0]))
model_size_y.insert(i, float(value[1]))
i += 1
i=0
for key, value in model_name.iteritems():
model_names.insert(i, value[0])
i += 1
i=0
for key, value in orientation.iteritems():
model_orientation_R.insert(i, float(value[0])*180/pi)
model_orientation_P.insert(i, float(value[1])*180/pi)
model_orientation_Y.insert(i, float(value[2])*180/pi)
i += 1
poses_m = np.array([]).reshape(0,3)
sizes_m = np.array([]).reshape(0,2)
for i in range(len(model_pos_x)):
poses_m = np.vstack([poses_m, [model_pos_x[i], model_pos_y[i], model_orientation_Y[i]]])
sizes_m = np.vstack([sizes_m, [model_size_x[i], model_size_y[i]]])
landmarks = []
for i in range(poses_m.shape[0]):
name = model_names[i]
pose = poses_m[i, :]
sizes = sizes_m[i,:]
landmark = MapObject(name, sizes, pose=pose, color_str='black',
has_relations=False, map_bounds=map_.bounds)
landmarks.append(landmark)
# Add walls to map
for wall in walls:
map_.add_obj(wall)
# Add landmarks to ma
for landmark in landmarks:
map_.add_obj(landmark)
# Create areas
# labels = ['Study', 'Library', 'Kitchen', 'Billiard Room', 'Hallway',
# 'Dining Room']
# colors = ['aquamarine', 'lightcoral', 'goldenrod', 'sage',
# 'cornflowerblue', 'orchid']
# # points = np.array([[[-7.0, -3.33], [-7.0, -1], [-2, -1], [-2, -3.33]],
# # [[-2, -3.33], [-2, -1], [4.0, -1], [4.0, -3.33]],
# # [[-9.5, 1.4], [-9.5, 3.68], [0, 3.68], [0, 1.4]],
# # [[0, 1.4], [0, 3.68], [4, 3.68], [4, 1.4]],
# # [[-9.5, -1], [-9.5, 1.4], [4, 1.4], [4, -1]],
# # [[-9.5, -3.33], [-9.5, -1], [-7, -1], [-7, -3.33]],
# # ])
# s = 0.0 # coarse scale factor
# points = np.array([[[-7.0 - s, -3.33], [-7.0 - s, -1 + s], [-2 + s, -1 + s], [-2 + s, -3.33]],
# [[-2 - s, -3.33], [-2 - s, -1 + s], [4.0, -1 + s], [4.0, -3.33]],
# [[-9.5, 1.4 - s], [-9.5, 3.68], [0 + s, 3.68], [0 + s, 1.4 - s]],
# [[0 - s, 1.4 - s], [0 - s, 3.68], [4, 3.68], [4, 1.4 - s]],
# [[-9.5, -1 - s], [-9.5, 1.4 + s], [4, 1.4 + s], [4, -1 - s]],
# [[-9.5, -3.33], [-9.5, -1 + s], [-7 + s, -1 + s], [-7 + s, -3.33]],
# ])
# for i, pts in enumerate(points):
# centroid = [pts[0, 0] + np.abs(pts[2, 0] - pts[0, 0]) / 2,
# pts[0, 1] + np.abs(pts[1, 1] - pts[0, 1]) / 2, 0]
# area = MapArea(name=labels[i], shape_pts=pts, pose=centroid,
# color_str=colors[i], map_bounds=map_.bounds)
# map_.add_area(area)
# # Relate landmarks and areas
# for landmark in landmarks:
# if area.shape.intersects(Point(landmark.pose)):
# area.contained_objects[landmark.name] = landmark
# landmark.container_area = area
# landmark.define_relations(map_.bounds)
# area.define_relations(map_.bounds)
# <>TODO: Include area demarcations
map_.feasible_layer.define_feasible_regions(map_.static_elements)
def find_grid_mask_for_rooms(map_, grid):
"""Define boolean arrays of which grid cells each area contains.
"""
area_masks = {}
pos = grid.pos
for area_name, area in map_.areas.iteritems():
area_mask = np.ones_like(grid.prob.flatten())
for i, pt in enumerate(pos):
if not area.shape.intersects(Point(pt)):
area_mask[i] = 0
area_masks[area_name] = area_mask
return area_masks
# # STUB TO GENERATE AREA MASKS - ADD TO main.py
# from cops_and_robots.map_tools.map import find_grid_mask_for_rooms
# map_ = self.cops['Deckard'].map
# grid = fusion_engine.filters['Roy'].probability
# area_masks = find_grid_mask_for_rooms(map_, grid)
# np.save('coarse_area_masks', area_masks)
# self.cops['Deckard'].map.plot()
# return
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
fleming = Map()
fleming.plot()
fleming.occupancy_grid()
# fleming.feasible_layer.plot()
|
|
# -*- coding: utf-8 -*-
"""
sphinx.application
~~~~~~~~~~~~~~~~~~
Sphinx application object.
Gracefully adapted from the TextPress system by Armin.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import types
import posixpath
import traceback
from os import path
from collections import deque
from six import iteritems, itervalues, text_type
from six.moves import cStringIO
from docutils import nodes
from docutils.parsers.rst import convert_directive_function, \
directives, roles
import sphinx
from sphinx import package_dir, locale
from sphinx.roles import XRefRole
from sphinx.config import Config
from sphinx.errors import SphinxError, SphinxWarning, ExtensionError, \
VersionRequirementError, ConfigError
from sphinx.domains import ObjType, BUILTIN_DOMAINS
from sphinx.domains.std import GenericObject, Target, StandardDomain
from sphinx.builders import BUILTIN_BUILDERS
from sphinx.environment import BuildEnvironment
from sphinx.io import SphinxStandaloneReader
from sphinx.util import pycompat # noqa: imported for side-effects
from sphinx.util import import_object
from sphinx.util.tags import Tags
from sphinx.util.osutil import ENOENT
from sphinx.util.logging import is_suppressed_warning
from sphinx.util.console import bold, lightgray, darkgray, darkgreen, \
term_width_line
if hasattr(sys, 'intern'):
intern = sys.intern
# List of all known core events. Maps name to arguments description.
events = {
'builder-inited': '',
'env-get-outdated': 'env, added, changed, removed',
'env-purge-doc': 'env, docname',
'env-before-read-docs': 'env, docnames',
'source-read': 'docname, source text',
'doctree-read': 'the doctree before being pickled',
'env-merge-info': 'env, read docnames, other env instance',
'missing-reference': 'env, node, contnode',
'doctree-resolved': 'doctree, docname',
'env-updated': 'env',
'html-collect-pages': 'builder',
'html-page-context': 'pagename, context, doctree or None',
'build-finished': 'exception',
}
CONFIG_FILENAME = 'conf.py'
ENV_PICKLE_FILENAME = 'environment.pickle'
class Sphinx(object):
def __init__(self, srcdir, confdir, outdir, doctreedir, buildername,
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0):
self.verbosity = verbosity
self.next_listener_id = 0
self._extensions = {}
self._extension_metadata = {}
self._additional_source_parsers = {}
self._listeners = {}
self._setting_up_extension = ['?']
self.domains = BUILTIN_DOMAINS.copy()
self.buildername = buildername
self.builderclasses = BUILTIN_BUILDERS.copy()
self.builder = None
self.env = None
self.enumerable_nodes = {}
self.srcdir = srcdir
self.confdir = confdir
self.outdir = outdir
self.doctreedir = doctreedir
self.parallel = parallel
if status is None:
self._status = cStringIO()
self.quiet = True
else:
self._status = status
self.quiet = False
if warning is None:
self._warning = cStringIO()
else:
self._warning = warning
self._warncount = 0
self.warningiserror = warningiserror
self._events = events.copy()
self._translators = {}
# keep last few messages for traceback
self.messagelog = deque(maxlen=10)
# say hello to the world
self.info(bold('Running Sphinx v%s' % sphinx.__display_version__))
# status code for command-line application
self.statuscode = 0
if not path.isdir(outdir):
self.info('making output directory...')
os.makedirs(outdir)
# read config
self.tags = Tags(tags)
self.config = Config(confdir, CONFIG_FILENAME,
confoverrides or {}, self.tags)
self.config.check_unicode(self.warn)
# defer checking types until i18n has been initialized
# set confdir to srcdir if -C given (!= no confdir); a few pieces
# of code expect a confdir to be set
if self.confdir is None:
self.confdir = self.srcdir
# extension loading support for alabaster theme
# self.config.html_theme is not set from conf.py at here
# for now, sphinx always load a 'alabaster' extension.
if 'alabaster' not in self.config.extensions:
self.config.extensions.append('alabaster')
# load all user-given extension modules
for extension in self.config.extensions:
self.setup_extension(extension)
# the config file itself can be an extension
if self.config.setup:
self._setting_up_extension = ['conf.py']
# py31 doesn't have 'callable' function for below check
if hasattr(self.config.setup, '__call__'):
self.config.setup(self)
else:
raise ConfigError(
"'setup' that is specified in the conf.py has not been " +
"callable. Please provide a callable `setup` function " +
"in order to behave as a sphinx extension conf.py itself."
)
# now that we know all config values, collect them from conf.py
self.config.init_values(self.warn)
# check the Sphinx version if requested
if self.config.needs_sphinx and \
self.config.needs_sphinx > sphinx.__display_version__:
raise VersionRequirementError(
'This project needs at least Sphinx v%s and therefore cannot '
'be built with this version.' % self.config.needs_sphinx)
# check extension versions if requested
if self.config.needs_extensions:
for extname, needs_ver in self.config.needs_extensions.items():
if extname not in self._extensions:
self.warn('needs_extensions config value specifies a '
'version requirement for extension %s, but it is '
'not loaded' % extname)
continue
has_ver = self._extension_metadata[extname]['version']
if has_ver == 'unknown version' or needs_ver > has_ver:
raise VersionRequirementError(
'This project needs the extension %s at least in '
'version %s and therefore cannot be built with the '
'loaded version (%s).' % (extname, needs_ver, has_ver))
# set up translation infrastructure
self._init_i18n()
# check all configuration values for permissible types
self.config.check_types(self.warn)
# set up source_parsers
self._init_source_parsers()
# set up the build environment
self._init_env(freshenv)
# set up the builder
self._init_builder(self.buildername)
# set up the enumerable nodes
self._init_enumerable_nodes()
def _init_i18n(self):
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
if self.config.language is not None:
self.info(bold('loading translations [%s]... ' %
self.config.language), nonl=True)
locale_dirs = [None, path.join(package_dir, 'locale')] + \
[path.join(self.srcdir, x) for x in self.config.locale_dirs]
else:
locale_dirs = []
self.translator, has_translation = locale.init(locale_dirs,
self.config.language,
charset=self.config.source_encoding)
if self.config.language is not None:
if has_translation or self.config.language == 'en':
# "en" never needs to be translated
self.info('done')
else:
self.info('not available for built-in messages')
def _init_source_parsers(self):
for suffix, parser in iteritems(self._additional_source_parsers):
if suffix not in self.config.source_suffix:
self.config.source_suffix.append(suffix)
if suffix not in self.config.source_parsers:
self.config.source_parsers[suffix] = parser
def _init_env(self, freshenv):
if freshenv:
self.env = BuildEnvironment(self.srcdir, self.doctreedir,
self.config)
self.env.find_files(self.config)
for domain in self.domains.keys():
self.env.domains[domain] = self.domains[domain](self.env)
else:
try:
self.info(bold('loading pickled environment... '), nonl=True)
self.env = BuildEnvironment.frompickle(
self.srcdir, self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME))
self.env.domains = {}
for domain in self.domains.keys():
# this can raise if the data version doesn't fit
self.env.domains[domain] = self.domains[domain](self.env)
self.info('done')
except Exception as err:
if isinstance(err, IOError) and err.errno == ENOENT:
self.info('not yet created')
else:
self.info('failed: %s' % err)
return self._init_env(freshenv=True)
self.env.set_warnfunc(self.warn)
def _init_builder(self, buildername):
if buildername is None:
print('No builder selected, using default: html', file=self._status)
buildername = 'html'
if buildername not in self.builderclasses:
raise SphinxError('Builder name %s not registered' % buildername)
builderclass = self.builderclasses[buildername]
if isinstance(builderclass, tuple):
# builtin builder
mod, cls = builderclass
builderclass = getattr(
__import__('sphinx.builders.' + mod, None, None, [cls]), cls)
self.builder = builderclass(self)
self.emit('builder-inited')
def _init_enumerable_nodes(self):
for node, settings in iteritems(self.enumerable_nodes):
self.env.domains['std'].enumerable_nodes[node] = settings
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
try:
if force_all:
self.builder.compile_all_catalogs()
self.builder.build_all()
elif filenames:
self.builder.compile_specific_catalogs(filenames)
self.builder.build_specific(filenames)
else:
self.builder.compile_update_catalogs()
self.builder.build_update()
status = (self.statuscode == 0 and
'succeeded' or 'finished with problems')
if self._warncount:
self.info(bold('build %s, %s warning%s.' %
(status, self._warncount,
self._warncount != 1 and 's' or '')))
else:
self.info(bold('build %s.' % status))
except Exception as err:
# delete the saved env to force a fresh build next time
envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
if path.isfile(envfile):
os.unlink(envfile)
self.emit('build-finished', err)
raise
else:
self.emit('build-finished', None)
self.builder.cleanup()
# ---- logging handling ----------------------------------------------------
def _log(self, message, wfile, nonl=False):
try:
wfile.write(message)
except UnicodeEncodeError:
encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
wfile.write(message.encode(encoding, 'replace'))
if not nonl:
wfile.write('\n')
if hasattr(wfile, 'flush'):
wfile.flush()
self.messagelog.append(message)
def warn(self, message, location=None, prefix='WARNING: ', type=None, subtype=None):
"""Emit a warning.
If *location* is given, it should either be a tuple of (docname, lineno)
or a string describing the location of the warning as well as possible.
*prefix* usually should not be changed.
*type* and *subtype* are used to suppress warnings with :confval:`suppress_warnings`.
.. note::
For warnings emitted during parsing, you should use
:meth:`.BuildEnvironment.warn` since that will collect all
warnings during parsing for later output.
"""
if is_suppressed_warning(type, subtype, self.config.suppress_warnings):
return
if isinstance(location, tuple):
docname, lineno = location
if docname:
location = '%s:%s' % (self.env.doc2path(docname), lineno or '')
else:
location = None
warntext = location and '%s: %s%s\n' % (location, prefix, message) or \
'%s%s\n' % (prefix, message)
if self.warningiserror:
raise SphinxWarning(warntext)
self._warncount += 1
self._log(warntext, self._warning, True)
def info(self, message='', nonl=False):
"""Emit an informational message.
If *nonl* is true, don't emit a newline at the end (which implies that
more info output will follow soon.)
"""
self._log(message, self._status, nonl)
def verbose(self, message, *args, **kwargs):
"""Emit a verbose informational message.
The message will only be emitted for verbosity levels >= 1 (i.e. at
least one ``-v`` option was given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 1:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(message, self._status)
def debug(self, message, *args, **kwargs):
"""Emit a debug-level informational message.
The message will only be emitted for verbosity levels >= 2 (i.e. at
least two ``-v`` options were given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 2:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(darkgray(message), self._status)
def debug2(self, message, *args, **kwargs):
"""Emit a lowlevel debug-level informational message.
The message will only be emitted for verbosity level 3 (i.e. three
``-v`` options were given).
The message can contain %-style interpolation placeholders, which is
formatted with either the ``*args`` or ``**kwargs`` when output.
"""
if self.verbosity < 3:
return
if args or kwargs:
message = message % (args or kwargs)
self._log(lightgray(message), self._status)
def _display_chunk(chunk):
if isinstance(chunk, (list, tuple)):
if len(chunk) == 1:
return text_type(chunk[0])
return '%s .. %s' % (chunk[0], chunk[-1])
return text_type(chunk)
def old_status_iterator(self, iterable, summary, colorfunc=darkgreen,
stringify_func=_display_chunk):
l = 0
for item in iterable:
if l == 0:
self.info(bold(summary), nonl=1)
l = 1
self.info(colorfunc(stringify_func(item)) + ' ', nonl=1)
yield item
if l == 1:
self.info()
# new version with progress info
def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
stringify_func=_display_chunk):
if length == 0:
for item in self.old_status_iterator(iterable, summary, colorfunc,
stringify_func):
yield item
return
l = 0
summary = bold(summary)
for item in iterable:
l += 1
s = '%s[%3d%%] %s' % (summary, 100*l/length,
colorfunc(stringify_func(item)))
if self.verbosity:
s += '\n'
else:
s = term_width_line(s)
self.info(s, nonl=1)
yield item
if l > 0:
self.info()
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extension):
"""Import and setup a Sphinx extension module. No-op if called twice."""
self.debug('[app] setting up extension: %r', extension)
if extension in self._extensions:
return
self._setting_up_extension.append(extension)
try:
mod = __import__(extension, None, None, ['setup'])
except ImportError as err:
self.verbose('Original exception:\n' + traceback.format_exc())
raise ExtensionError('Could not import extension %s' % extension,
err)
if not hasattr(mod, 'setup'):
self.warn('extension %r has no setup() function; is it really '
'a Sphinx extension module?' % extension)
ext_meta = None
else:
try:
ext_meta = mod.setup(self)
except VersionRequirementError as err:
# add the extension name to the version required
raise VersionRequirementError(
'The %s extension used by this project needs at least '
'Sphinx v%s; it therefore cannot be built with this '
'version.' % (extension, err))
if ext_meta is None:
ext_meta = {}
# special-case for compatibility
if extension == 'rst2pdf.pdfbuilder':
ext_meta = {'parallel_read_safe': True}
try:
if not ext_meta.get('version'):
ext_meta['version'] = 'unknown version'
except Exception:
self.warn('extension %r returned an unsupported object from '
'its setup() function; it should return None or a '
'metadata dictionary' % extension)
ext_meta = {'version': 'unknown version'}
self._extensions[extension] = mod
self._extension_metadata[extension] = ext_meta
self._setting_up_extension.pop()
def require_sphinx(self, version):
# check the Sphinx version if requested
if version > sphinx.__display_version__[:3]:
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
"""Import an object from a 'module.name' string."""
return import_object(objname, source=None)
# event interface
def _validate_event(self, event):
event = intern(event)
if event not in self._events:
raise ExtensionError('Unknown event name: %s' % event)
def connect(self, event, callback):
self._validate_event(event)
listener_id = self.next_listener_id
if event not in self._listeners:
self._listeners[event] = {listener_id: callback}
else:
self._listeners[event][listener_id] = callback
self.next_listener_id += 1
self.debug('[app] connecting event %r: %r [id=%s]',
event, callback, listener_id)
return listener_id
def disconnect(self, listener_id):
self.debug('[app] disconnecting event: [id=%s]', listener_id)
for event in itervalues(self._listeners):
event.pop(listener_id, None)
def emit(self, event, *args):
try:
self.debug2('[app] emitting event: %r%s', event, repr(args)[:100])
except Exception:
# not every object likes to be repr()'d (think
# random stuff coming via autodoc)
pass
results = []
if event in self._listeners:
for _, callback in iteritems(self._listeners[event]):
results.append(callback(self, *args))
return results
def emit_firstresult(self, event, *args):
for result in self.emit(event, *args):
if result is not None:
return result
return None
# registering addon parts
def add_builder(self, builder):
self.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
raise ExtensionError('Builder class %s has no "name" attribute'
% builder)
if builder.name in self.builderclasses:
if isinstance(self.builderclasses[builder.name], tuple):
raise ExtensionError('Builder %r is a builtin builder' %
builder.name)
else:
raise ExtensionError(
'Builder %r already exists (in module %s)' % (
builder.name, self.builderclasses[builder.name].__module__))
self.builderclasses[builder.name] = builder
def add_config_value(self, name, default, rebuild, types=()):
self.debug('[app] adding config value: %r',
(name, default, rebuild) + ((types,) if types else ()))
if name in self.config.values:
raise ExtensionError('Config value %r already present' % name)
if rebuild in (False, True):
rebuild = rebuild and 'env' or ''
self.config.values[name] = (default, rebuild, types)
def add_event(self, name):
self.debug('[app] adding event: %r', name)
if name in self._events:
raise ExtensionError('Event %r already present' % name)
self._events[name] = ''
def set_translator(self, name, translator_class):
self.info(bold('A Translator for the %s builder is changed.' % name))
self._translators[name] = translator_class
def add_node(self, node, **kwds):
self.debug('[app] adding node: %r', (node, kwds))
if not kwds.pop('override', False) and \
hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
self.warn('while setting up extension %s: node class %r is '
'already registered, its visitors will be overridden' %
(self._setting_up_extension, node.__name__))
nodes._add_node_class_names([node.__name__])
for key, val in iteritems(kwds):
try:
visit, depart = val
except ValueError:
raise ExtensionError('Value for key %r must be a '
'(visit, depart) function tuple' % key)
translator = self._translators.get(key)
if translator is not None:
pass
elif key == 'html':
from sphinx.writers.html import HTMLTranslator as translator
elif key == 'latex':
from sphinx.writers.latex import LaTeXTranslator as translator
elif key == 'text':
from sphinx.writers.text import TextTranslator as translator
elif key == 'man':
from sphinx.writers.manpage import ManualPageTranslator \
as translator
elif key == 'texinfo':
from sphinx.writers.texinfo import TexinfoTranslator \
as translator
else:
# ignore invalid keys for compatibility
continue
setattr(translator, 'visit_'+node.__name__, visit)
if depart:
setattr(translator, 'depart_'+node.__name__, depart)
def add_enumerable_node(self, node, figtype, title_getter=None, **kwds):
self.enumerable_nodes[node] = (figtype, title_getter)
self.add_node(node, **kwds)
def _directive_helper(self, obj, content=None, arguments=None, **options):
if isinstance(obj, (types.FunctionType, types.MethodType)):
obj.content = content
obj.arguments = arguments or (0, 0, False)
obj.options = options
return convert_directive_function(obj)
else:
if content or arguments or options:
raise ExtensionError('when adding directive classes, no '
'additional arguments may be given')
return obj
def add_directive(self, name, obj, content=None, arguments=None, **options):
self.debug('[app] adding directive: %r',
(name, obj, content, arguments, options))
if name in directives._directives:
self.warn('while setting up extension %s: directive %r is '
'already registered, it will be overridden' %
(self._setting_up_extension[-1], name))
directives.register_directive(
name, self._directive_helper(obj, content, arguments, **options))
def add_role(self, name, role):
self.debug('[app] adding role: %r', (name, role))
if name in roles._roles:
self.warn('while setting up extension %s: role %r is '
'already registered, it will be overridden' %
(self._setting_up_extension[-1], name))
roles.register_local_role(name, role)
def add_generic_role(self, name, nodeclass):
# don't use roles.register_generic_role because it uses
# register_canonical_role
self.debug('[app] adding generic role: %r', (name, nodeclass))
if name in roles._roles:
self.warn('while setting up extension %s: role %r is '
'already registered, it will be overridden' %
(self._setting_up_extension[-1], name))
role = roles.GenericRole(name, nodeclass)
roles.register_local_role(name, role)
def add_domain(self, domain):
self.debug('[app] adding domain: %r', domain)
if domain.name in self.domains:
raise ExtensionError('domain %s already registered' % domain.name)
self.domains[domain.name] = domain
def override_domain(self, domain):
self.debug('[app] overriding domain: %r', domain)
if domain.name not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain.name)
if not issubclass(domain, self.domains[domain.name]):
raise ExtensionError('new domain not a subclass of registered %s '
'domain' % domain.name)
self.domains[domain.name] = domain
def add_directive_to_domain(self, domain, name, obj,
content=None, arguments=None, **options):
self.debug('[app] adding directive to domain: %r',
(domain, name, obj, content, arguments, options))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].directives[name] = \
self._directive_helper(obj, content, arguments, **options)
def add_role_to_domain(self, domain, name, role):
self.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].roles[name] = role
def add_index_to_domain(self, domain, index):
self.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError('domain %s not yet registered' % domain)
self.domains[domain].indices.append(index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
self.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of GenericObject as the new directive
new_directive = type(directivename, (GenericObject, object),
{'indextemplate': indextemplate,
'parse_node': staticmethod(parse_node),
'doc_field_types': doc_field_types})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
# backwards compatible alias
add_description_unit = add_object_type
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
self.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass,
objname))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of Target as the new directive
new_directive = type(directivename, (Target, object),
{'indextemplate': indextemplate})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
def add_transform(self, transform):
self.debug('[app] adding transform: %r', transform)
SphinxStandaloneReader.transforms.append(transform)
def add_javascript(self, filename):
self.debug('[app] adding javascript: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.script_files.append(filename)
else:
StandaloneHTMLBuilder.script_files.append(
posixpath.join('_static', filename))
def add_stylesheet(self, filename):
self.debug('[app] adding stylesheet: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.css_files.append(filename)
else:
StandaloneHTMLBuilder.css_files.append(
posixpath.join('_static', filename))
def add_latex_package(self, packagename, options=None):
self.debug('[app] adding latex package: %r', packagename)
from sphinx.builders.latex import LaTeXBuilder
LaTeXBuilder.usepackages.append((packagename, options))
def add_lexer(self, alias, lexer):
self.debug('[app] adding lexer: %r', (alias, lexer))
from sphinx.highlighting import lexers
if lexers is None:
return
lexers[alias] = lexer
def add_autodocumenter(self, cls):
self.debug('[app] adding autodocumenter: %r', cls)
from sphinx.ext import autodoc
autodoc.add_documenter(cls)
self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
def add_autodoc_attrgetter(self, type, getter):
self.debug('[app] adding autodoc attrgetter: %r', (type, getter))
from sphinx.ext import autodoc
autodoc.AutoDirective._special_attrgetters[type] = getter
def add_search_language(self, cls):
self.debug('[app] adding search language: %r', cls)
from sphinx.search import languages, SearchLanguage
assert issubclass(cls, SearchLanguage)
languages[cls.lang] = cls
def add_source_parser(self, suffix, parser):
self.debug('[app] adding search source_parser: %r, %r', (suffix, parser))
self._additional_source_parsers[suffix] = parser
class TemplateBridge(object):
"""
This class defines the interface for a "template bridge", that is, a class
that renders templates given a template name and a context.
"""
def init(self, builder, theme=None, dirs=None):
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
value of ``builder.config.templates_path``.
*theme* is a :class:`sphinx.theming.Theme` object or None; in the latter
case, *dirs* can be list of fixed directories to look for templates.
"""
raise NotImplementedError('must be implemented in subclasses')
def newest_template_mtime(self):
"""Called by the builder to determine if output files are outdated
because of template changes. Return the mtime of the newest template
file that was changed. The default implementation returns ``0``.
"""
return 0
def render(self, template, context):
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
|
|
from base64 import urlsafe_b64encode
import glob
import json
import logging
import os
import re
import requests
import time
import uuid
import centinel.utils as utils
logging.getLogger("requests").setLevel(logging.WARNING)
class User:
def __init__(self, config):
self.config = config
self.verify = self.config['server']['verify']
# check for login file
if os.path.isfile(config['server']['login_file']):
with open(config['server']['login_file']) as login_fh:
login_details = json.load(login_fh)
self.username = login_details.get('username')
self.password = login_details.get('password')
self.typeable_handle = login_details.get('typeable_handle')
self.auth = (self.username, self.password)
else:
self.create_user()
def request(self, slug):
url = "%s/%s" % (self.config['server']['server_url'], slug)
try:
req = requests.get(url, auth=self.auth,
proxies=self.config['proxy']['proxy'],
verify=self.verify)
req.raise_for_status()
return req.json()
except Exception as exp:
logging.exception("Exception trying to make request - %s for URL: %s" %
(exp, url))
raise exp
@property
def recommended_version(self):
try:
return int(self.request("version")["version"])
except Exception as exp:
logging.exception("Exception trying to get recommended version: %s " %
(exp))
raise exp
@property
def experiments(self):
try:
return self.request("experiments")["experiments"]
except Exception as exp:
logging.exception("Error trying to get experiments: %s " % (exp))
raise exp
@property
def input_files(self):
try:
return self.request("input_files")["inputs"]
except Exception as exp:
logging.exception("Error trying to get input files: %s " % (exp))
raise exp
@property
def results(self):
try:
return self.request("results")
except Exception as exp:
logging.exception("Error trying to get results: %s " % (exp))
raise exp
@property
def clients(self):
try:
return self.request("clients")
except Exception as exp:
logging.exception("Error trying to get clients: %s " % (exp))
raise exp
def submit_result(self, file_name):
logging.info("Uploading result file - %s", file_name)
with open(file_name) as result_file:
files = {'result': result_file}
url = "%s/%s" % (self.config['server']['server_url'], "results")
timeout = self.config['server']['req_timeout']
try:
req = requests.post(url, files=files, auth=self.auth,
proxies=self.config['proxy']['proxy'],
timeout=timeout,
verify=self.verify)
req.raise_for_status()
if ('delete_after_sync' in self.config['results'].keys()
and self.config['results']['delete_after_sync']):
os.remove(file_name)
except Exception as exp:
logging.exception("Error trying to submit result: %s" % exp)
raise exp
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
"""
# get the server scheduler.info file
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
server_sched = json.loads(req.content)
sched_filename = os.path.join(self.config['dirs']['experiments_dir'],
'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p)
return
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p)
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency']
else:
client_sched[exp] = server_sched[exp]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p)
def download_experiment(self, name):
logging.info("Downloading experiment - %s", name)
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", name)
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download experiments: %s" % exp)
raise exp
name = "%s" % name
with open(os.path.join(self.config['dirs']['experiments_dir'], name),
"w") as exp_fh:
exp_fh.write(req.content)
def download_input_file(self, name):
logging.info("Downloading input data file - %s", name)
url = "%s/%s/%s" % (self.config['server']['server_url'],
"input_files", name)
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download experiments: %s" % exp)
raise exp
name = "%s" % name
with open(os.path.join(self.config['dirs']['data_dir'], name),
"w") as exp_fh:
exp_fh.write(req.content)
def register(self, username, password):
logging.info("Registering new user %s" % (username))
url = "%s/%s" % (self.config['server']['server_url'], "register")
payload = {'username': username, 'password': password,
'is_vpn': self.config['user'].get('is_vpn')}
headers = {'content-type': 'application/json'}
try:
req = requests.post(url, data=json.dumps(payload),
proxies=self.config['proxy']['proxy'],
headers=headers,
verify=self.verify)
req.raise_for_status()
return req.json()
except Exception as exp:
logging.exception("Error trying to submit registration URL: %s " % exp)
raise exp
def set_country(self, country):
url = "%s/%s/%s" % (self.config['server']['server_url'],
"set_country", country)
try:
req = requests.get(url,
auth=self.auth,
proxies=self.config['proxy']['proxy'],
verify=self.verify)
req.raise_for_status()
return req.json()
except Exception as exp:
logging.exception("Error trying to set country: %s " % exp)
raise exp
def set_ip(self, ip):
url = "%s/%s/%s" % (self.config['server']['server_url'],
"set_ip", ip)
try:
req = requests.get(url,
auth=self.auth,
proxies=self.config['proxy']['proxy'],
verify=self.verify)
req.raise_for_status()
return req.json()
except Exception as exp:
logging.exception("Error trying to set ip: %s " % exp)
raise exp
def create_user(self):
self.username = str(uuid.uuid4())
self.password = os.urandom(64).encode('base-64')
self.auth = (self.username, self.password)
self.typeable_handle = None
try:
register_results = self.register(self.username, self.password)
if 'typeable_handle' in register_results.keys():
self.typeable_handle = register_results['typeable_handle']
with open(self.config['server']['login_file'], "w") as login_fh:
login_details = {'username': self.username,
'password': self.password}
if self.typeable_handle is not None:
login_details['typeable_handle'] = self.typeable_handle
json.dump(login_details, login_fh)
except Exception as exp:
logging.exception("Unable to register: %s" % str(exp))
raise exp
def informed_consent(self):
"""Create a URL for the user to give their consent through"""
if self.typeable_handle is None:
consent_url = [self.config['server']['server_url'],
"/get_initial_consent?username="]
consent_url.append(urlsafe_b64encode(self.username))
consent_url.append("&password=")
consent_url.append(urlsafe_b64encode(self.password))
else:
consent_url = [self.config['server']['server_url'],
"/consent/"]
consent_url.append(self.typeable_handle)
consent_url = "".join(consent_url)
print "Please go to %s to give your consent." % (consent_url)
return consent_url
def sync(config):
logging.info("Starting sync with %s", config['server']['server_url'])
start = time.time()
try:
user = User(config)
except Exception, exp:
logging.exception("Unable to create user: %s" % str(exp))
return
# send all results (.bz2)
result_files = glob.glob(os.path.join(config['dirs']['results_dir'],
'[!_]*.bz2'))
# only upload pcaps if it is allowed
if config['results']['upload_pcaps'] is False:
for pcap_file in glob.glob(os.path.join(config['dirs']['results_dir'],
'[!_]*.pcap.bz2')):
if pcap_file in result_files:
result_files.remove(pcap_file)
for path in result_files:
try:
user.submit_result(path)
except Exception, exp:
if re.search("418", str(exp)) is not None:
logging.error("You have not completed the informed consent "
"and will be unable to submit results or get "
"new experiments until you do.")
user.informed_consent()
return
else:
logging.error("Unable to send result file: %s" % str(exp))
raise exp
if time.time() - start > config['server']['total_timeout']:
logging.error("Interaction with server took too long. Preempting")
return
# determine how to sync the experiment files
# Note: we are not checking anything that starts with _
client_exps = utils.hash_folder(config['dirs']['experiments_dir'],
'[!_]*')
try:
server_exps = user.experiments
except Exception as exp:
if re.search("418", str(exp)) is not None:
logging.error("You have not completed the informed consent and "
"will be unable to submit results or get new "
"experiments until you do.")
user.informed_consent()
return
else:
logging.error("Error collecting experiments: %s" % exp)
raise exp
if time.time() - start > config['server']['total_timeout']:
logging.error("Interaction with server took too long. Preempting")
return
dload_exps, del_exps = utils.compute_files_to_download(client_exps,
server_exps)
# delete the files that aren't on the server
for exp_file in del_exps:
filename = os.path.join(config['dirs']['experiments_dir'], exp_file)
os.remove(filename)
# get the files that have changed or we don't have
for exp_file in dload_exps:
try:
if exp_file != "scheduler.info":
user.download_experiment(exp_file)
else:
try:
user.sync_scheduler()
except Exception as e:
logging.exception("Scheduler sync failed: %s", str(e))
except Exception as e:
logging.exception("Unable to download experiment file: %s", str(e))
if time.time() - start > config['server']['total_timeout']:
logging.error("Interaction with server took too long. Preempting")
return
# determine how to sync the input files
client_inputs = utils.hash_folder(config['dirs']['data_dir'], '[!_]*')
try:
server_inputs = user.input_files
except Exception as exp:
logging.exception("Unable to retrive user inputs due to Exception: "
"%s. Preempting" % exp)
return
if time.time() - start > config['server']['total_timeout']:
logging.error("Interaction with server took too long. Preempting")
return
dload_inputs, del_inputs = utils.compute_files_to_download(client_inputs,
server_inputs)
# delete the files that aren't on the server
for input_file in del_inputs:
filename = os.path.join(config['dirs']['data_dir'], input_file)
os.remove(filename)
# get the files that have changed or we don't have
for input_file in dload_inputs:
try:
user.download_input_file(input_file)
except Exception, e:
logging.exception("Unable to download input file %s", str(e))
if time.time() - start > config['server']['total_timeout']:
logging.error("Interaction with server took too long. Preempting")
return
logging.info("Finished sync with %s", config['server']['server_url'])
def set_vpn_info(config, ip=None, country=None):
logging.info("Setting country as "
"%s and IP address as %s" % (country, ip))
try:
user = User(config)
except Exception as exp:
logging.exception("Unable to create user: %s" % str(exp))
return False
if country is not None:
user.set_country(country)
if ip is not None:
user.set_ip(ip)
def get_meta(config, ip=''):
url = "%s/%s/%s" % (config['server']['server_url'],
"meta", ip)
try:
req = requests.get(url,
proxies=config['proxy']['proxy'],
verify=config['server']['verify'],
timeout=10)
req.raise_for_status()
return req.json()
except Exception as exp:
logging.exception("Error trying to get metadata: %s " % exp)
raise exp
|
|
#!/usr/bin/env python2
from math import*
import re
import logging
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA, TruncatedSVD
import seaborn as sns
import nltk as nl
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
import os
from scipy.spatial.distance import jaccard
import yaml
#from scrapy.conf import settings
#from ConfigParser import SafeConfigParser
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class AnalyzeArticles:
def __init__(self):
# The values below can be changed to tweak the recommender algorithm
self.n_most_similar = 10
self.n_features_title = 25
self.n_features_content = 50
#commented by shwenag
#self.n_features_tags = 25
self.n_features_total = 30
# Do not change the values below
self.df = None
self.df_article_vectors = None
self.similarity_score_dict = {}
self.X = None
self.X_title = None
self.X_content = None
self.type = 'Cos'
self.is_pca = 'PCA'
self.is_weight = 'TF-IDF'
def run(self):
"""
Load and transform the articles, train a content-based recommender system and make a recommendation for each
article.
:return:
"""
self.load_config()
self.load_articles()
self.vectorize_articles()
self.reduce_dimensionality_articles()
self.visualize_data()#Dont want to see the visualization now
self.find_similar_articles()
self.save_output_to_csv()
# Load data
def load_config(self):
# import config files
#print("Reading configuration")
logging.debug("Reading configuration")
with open("ContentConfig.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
self.ip_file_path = cfg['project_test_conf']['ip_file_path']
self.ip_file_name = cfg['project_test_conf']['ip_file_name']
self.op_file_path = cfg['project_test_conf']['op_file_path']
self.op_file_name = cfg['project_test_conf']['op_file_name']
def load_articles(self):
"""
Loads the DataFrame with all the articles.
Return: DataFrame with the title, content, tags and author of all articles
"""
#parser = SafeConfigParser()
#parser.read('Config.ini')
#file_path = settings['IP_FILE_PATH']
#file_name = settings['IP_FILE_NAME']
#logging.debug("Directory Name : {0} and File name is {1} \n".format(file_path,file_name))
#logging.debug("Directory Name : {0} and File name is {1} \n".format(parser.get('Article_input_dir', 'ip_file_path'),parser.get('Article_input_file', 'ip_file_name'))
#file_path = '/Users/shwetanknagar/Downloads/Personal/Project Eventstreet/Boconni Project'
#file_name = os.path.basename("TestSet300_User_Ratings.xlsx")
path = os.path.join(self.ip_file_path, self.ip_file_name)
#commented by shwenag
#self.df = pd.read_csv('TrainSet700_User_Ratings.xlsx', encoding='utf-8') # Load articles in a DataFrame
self.df = pd.read_excel(path, na_values=['NA'], parse_cols = "A,B,C")
#self.df = self.df[['Sno', 'title', 'content_text']] # Slice to remove redundant columns
#commenting the below by shwenag
print(self.df)
logging.debug("Number of articles: {0} and no of columns are {1} \n".format(len(self.df),self.df.shape))
def vectorize_articles(self):
self.vectorize_title() # Add title as dummies# Vectorize article
self.vectorize_content() # Add content as dummies
#Commented by shwenag
#self.vectorize_tags() # Add title as dummies
# Concatenate all article vectors, i.e. title and content
article_metrics = (self.X_title, self.X_content)
self.X = np.concatenate(article_metrics, axis=1)#shwenag - dont know why this is done??
logging.debug("Number of features in total DataFrame: {0}".format(self.X.shape[1]))
def get_vectorizer(self, ngram_range=(1, 3), min_df=2, max_df=1.0):
"""
Define a binary CountVectorizer (Feature Presence) using n-grams and min and max document frequency
:param ngram_range: n-grams are created for all numbers within this range
:param min_df: min document frequency of features
:param max_df: max document frequency of features
:return:
"""
if self.is_weight == 'FP':#Feature Presence
vectorizer = CountVectorizer(ngram_range=ngram_range,
tokenizer=self.tokenize,
min_df=min_df,
max_df=max_df,
binary=True,
stop_words='english')
if self.is_weight == 'TF-IDF':#Feature Presence
vectorizer = TfidfVectorizer(ngram_range=ngram_range,
tokenizer=self.tokenize,
min_df=min_df,
max_df=max_df,
binary=True,
stop_words='english')
return vectorizer
@staticmethod
def tokenize(text):
"""
Tokenizes sequences of text and stems the tokens.
:param text: String to tokenize
:return: List with stemmed tokens
"""
tokens = nl.WhitespaceTokenizer().tokenize(text)
tokens = list(set(re.sub("[^a-zA-Z\']", "", token) for token in tokens))
tokens = [word for word in tokens if word not in stopwords.words('english')]
tokens = list(set(re.sub("[^a-zA-Z]", "", token) for token in tokens))
stems = []
stemmer = SnowballStemmer("english")
for token in tokens:
token = stemmer.stem(token)
if token != "":
stems.append(token)
return stems
def vectorize_title(self):
"""
Vectorize the titles of all articles.
:return:
"""
# Define vectorizer and apply on content to obtain an M x N array
vectorizer = self.get_vectorizer(ngram_range=(1, 2),
min_df=2)
self.X_title = vectorizer.fit_transform(self.df['title'])
self.X_title = self.X_title.toarray()
self.X_title = np.array(self.X_title, dtype=float)
logging.debug("Number of features in title: {0}".format(len(vectorizer.vocabulary_)))
# Reduce dimensionality of title features
self.X_title = self.reduce_dimensionality(self, self.X_title, self.n_features_title)
def vectorize_content(self):
"""
Vectorize the content of all articles.
:return:
"""
# Define vectorizer and apply on content to obtain an M x N array
vectorizer = self.get_vectorizer(ngram_range=(1, 1),
min_df=4,
max_df=0.3)
self.X_content = vectorizer.fit_transform(self.df['content_text'])
self.X_content = self.X_content.toarray()
self.X_content = np.array(self.X_content, dtype=float)
logging.debug("Number of features in content: {0}".format(len(vectorizer.vocabulary_)))
# Reduce dimensionality of content features
self.X_content = self.reduce_dimensionality(self, self.X_content, n_features=self.n_features_content)
def reduce_dimensionality_articles(self):
"""
Reduce the dimensionality of the vectorized articles.
:return:
"""
# Reduce dimensionality
self.X = self.reduce_dimensionality(self, self.X, n_features=self.n_features_total)
@staticmethod
def reduce_dimensionality(self, X, n_features):
"""
Apply PCA or SVD to reduce dimension to n_features.
:param X:
:param n_features:
:return:
"""
# Initialize reduction method: PCA or SVD
if self.is_pca == 'PCA':
reducer = PCA(n_components=n_features)
#reducer = PCA(n_components=n_features)
if self.is_pca == 'SVD':
reducer = TruncatedSVD(n_components=n_features)
# Fit and transform data to n_features-dimensional space
reducer.fit(X)
self.X = reducer.transform(X)
logging.debug("Reduced number of features to {0}".format(n_features))
logging.debug("Percentage explained: %s\n" % reducer.explained_variance_ratio_.sum())
return X
def prepare_dataframe(self, X):
"""
Prepare DataFrame for further use, e.g. finding similar articles or visualizing articles.
:param X:
:return: Dataframe with all articles and its corresponding vectorized coordinates + other article metrics
"""
df_article_vectors = pd.DataFrame(None)
#df_article_vectors['tags_first'] = self.df['tags_first']
#df_article_vectors['author'] = self.df['author']
df_article_vectors['title'] = self.df['title']
df_article_vectors['numbers'] = range(0, len(df_article_vectors))
df_article_vectors['coordinates'] = df_article_vectors['numbers'].apply(lambda index: X[index, :])
del df_article_vectors['numbers']
# Initialize dataframe by appending new columns to store the titles of the n most similar articles
for i in range(0, self.n_most_similar):
df_article_vectors['most_similar_'+str(i+1)] = ""
return df_article_vectors
# Visualize data
def visualize_data(self):
"""
Transform the DataFrame to the 2-dimensional case and visualizes the data. The first tags are used as labels.
:return:
"""
logging.debug("Preparing visualization of DataFrame")
# Reduce dimensionality to 2 features for visualization purposes
X_visualization = self.reduce_dimensionality(self, self.X, n_features=2)
df = self.prepare_dataframe(X_visualization)
# Set X and Y coordinate for each articles
df['X coordinate'] = df['coordinates'].apply(lambda x: x[0])# shwenag ...No clue whats happening??
df['Y coordinate'] = df['coordinates'].apply(lambda x: x[1])# shwenag ...No clue whats happening??
'''
# Create a list of markers, each tag has its own marker
n_tags_first = len(self.df['tags_first'].unique())
markers_choice_list = ['o', 's', '^', '.', 'v', '<', '>', 'D']
markers_list = [markers_choice_list[i % 8] for i in range(n_tags_first)]
'''
# Create scatter plot
sns.lmplot("X coordinate",
"Y coordinate",
#hue="tags_first",#commented by shwenag
data=df,
fit_reg=False,
#markers=markers_list,#commented by shwenag
scatter_kws={"s": 150})
# Adjust borders and add title
sns.set(font_scale=2)
sns.plt.title('Visualization of articles in a 2-dimensional space')
sns.plt.subplots_adjust(right=0.80, top=0.90, left=0.12, bottom=0.12)
# Show plot
sns.plt.show()
def find_similar_articles(self):
"""
Find the n most similar articles for each article in the DataFrame
:return:
"""
# Prepare DataFrame by assigning each article in the DataFrame its corresponding coordinates
self.df_article_vectors = self.prepare_dataframe(self.X)
# Calculate similarity for all articles and defines the n most similar articles
self.calculate_similarity_scores_of_all_articles()
# Find the n most similar articles using the similarity score dictionary
self.find_n_most_similar_articles()
# Remove redundant columns
del self.df_article_vectors['coordinates']#shwenag dont know what the heck this is doing??
def calculate_similarity_scores_of_all_articles(self):
"""
Calculate the similarity scores of all articles compared to all other articles.
:return:
"""
# Iterate over each article in DataFrame
for index1, row1 in self.df_article_vectors.iterrows():
# Initialize a dict to store the similarity scores to all other articles in
similarity_scores = {}
# Iterate again over all articles to calculate the similarity between article 1 and 2
for index2, row2 in self.df_article_vectors.iterrows():
if index1 != index2:
similarity_scores[index2] = self.calculate_similarity(row1['coordinates'], row2['coordinates'],self.type)#dont know what row1['coordinates'] would fetch
# Save in dictionary
self.similarity_score_dict[index1] = similarity_scores
def find_n_most_similar_articles(self):
"""
Find the n most similar articles with the highest similarity score for each article in the DataFrame.
:return:
"""
# Iterate over each article in DataFrame
for index, row in self.df_article_vectors.iterrows():
# Get the similarity scores of the current article compared to all other articles
similarity_scores = self.similarity_score_dict[index]
# Find the highest similarity scores in the similarity_score_dict until we have found the n most similar.
for i in range(0, self.n_most_similar):
# Find most similar article, i.e. with highest cosine similarity. Note: if Euclidean distance, then min!
most_similar_article_index = max(similarity_scores, key=similarity_scores.get)
most_similar_article_score = similarity_scores[most_similar_article_index]
del similarity_scores[most_similar_article_index]
# Find corresponding title and set it as most similar article i in DataFrame
title = self.df_article_vectors.loc[most_similar_article_index]['title'].encode('utf-8')
title_plus_score = "{} ({:.2f})".format(title, most_similar_article_score)
self.df_article_vectors.set_value(index, 'most_similar_'+str(i+1), title_plus_score)
def calculate_similarity(self, article1, article2, type):
"""
Calculate the similarity between two articles, e.g. the cosine similarity or the Euclidean distance.
:param article1: coordinates (feature values) of article 1
:param article2: coordinates (feature values) of article 2
:return:
"""
if self.type == 'Cos':
similarity = self.cosine_similarity(article1, article2) # Cosine similarity formula
if self.type == 'Euc':
similarity = self.euclidean_distance(article1, article2) # Euclidean distance formula
'''
if self.type == 'Jac':
similarity = self.calculate_jaccard_score(article1, article2) # jaccard distance formula
'''
similarity = "{0:.2f}".format(round(similarity, 2))
return float(similarity)
@staticmethod
def cosine_similarity(x, y):
def square_rooted(v):
return round(sqrt(sum([a * a for a in v])), 3)
numerator = sum(a * b for a, b in zip(x, y))
denominator = square_rooted(x) * square_rooted(y)
return round(numerator/float(denominator), 3)
@staticmethod
def euclidean_distance(x, y):
return np.linalg.norm(x-y)
'''
def calculate_jaccard_score(dataset, vector1, vector2):
jaccard_score = jaccard(dataset[vector1], dataset[vector2])
return jaccard_score
'''
"""
Save output DataFrame to csv file
:return:
"""
def save_output_to_csv(self):
#op_file_path = '/Users/shwetanknagar/Downloads/Personal/Project Eventstreet/Boconni Project'
#op_file_name = os.path.basename("ContentOutput.csv")
path = os.path.join(self.op_file_path, self.op_file_name)
print(path)
#file_name = 'output.csv'
try:
self.df_article_vectors.to_csv(path, encoding='utf-8', sep=',')
except IOError:
logging.warning("Error while trying to save output file to %s!" % file_name)
if __name__ == "__main__":
AnalyzeArticles().run()
|
|
"""
@package mi.instrument.harvard.massp.ooicore.driver
@file marine-integrations/mi/instrument/harvard/massp/ooicore/driver.py
@author Peter Cable
@brief Driver for the ooicore
Release notes:
Driver for the MASSP in-situ mass spectrometer
"""
import functools
import time
import mi.core.log
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.port_agent_client import PortAgentClient
from mi.core.instrument.protocol_param_dict import ParameterDictType
from mi.core.instrument.instrument_protocol import InstrumentProtocol
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.instrument.instrument_driver import ResourceAgentEvent
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_driver import ConfigMetadataKey
from mi.core.common import BaseEnum, Units
from mi.core.exceptions import InstrumentParameterException, InstrumentProtocolException
from mi.core.exceptions import InstrumentConnectionException
from mi.core.instrument.instrument_fsm import InstrumentFSM
from mi.instrument.harvard.massp.common import MASSP_STATE_ERROR, MASSP_CLEAR_ERROR
import mi.instrument.harvard.massp.mcu.driver as mcu
import mi.instrument.harvard.massp.rga.driver as rga
import mi.instrument.harvard.massp.turbo.driver as turbo
__author__ = 'Peter Cable'
__license__ = 'Apache 2.0'
log = mi.core.log.get_logger()
META_LOGGER = mi.core.log.get_logging_metaclass()
###
# Driver Constant Definitions
###
NEWLINE = '\r'
MASTER = 'massp'
RGA = 'rga'
TURBO = 'turbo'
MCU = 'mcu'
DA_COMMAND_DELIMITER = ':'
DA_EXIT_MAX_RETRIES = 3
class DataParticleType(mcu.DataParticleType, turbo.DataParticleType, rga.DataParticleType):
"""
Data particle types produced by this driver
"""
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
POLL = DriverProtocolState.POLL
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
CALIBRATE = DriverProtocolState.CALIBRATE
REGEN = 'PROTOCOL_STATE_REGEN'
ERROR = MASSP_STATE_ERROR
MANUAL_OVERRIDE = 'PROTOCOL_STATE_MANUAL_OVERRIDE'
class ProtocolEvent(mcu.ProtocolEvent, turbo.ProtocolEvent, rga.ProtocolEvent):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
GET = DriverEvent.GET
SET = DriverEvent.SET
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
CALIBRATE = DriverEvent.CALIBRATE
STOP = 'PROTOCOL_EVENT_STOP'
START_NAFION = 'PROTOCOL_EVENT_START_NAFION_REGEN'
START_ION = 'PROTOCOL_EVENT_START_ION_REGEN'
ERROR = 'PROTOCOL_EVENT_ERROR'
CLEAR = MASSP_CLEAR_ERROR
POWEROFF = 'PROTOCOL_EVENT_POWEROFF'
STOP_REGEN = 'PROTOCOL_EVENT_STOP_REGEN'
START_MANUAL = 'PROTOCOL_EVENT_START_MANUAL_OVERRIDE'
STOP_MANUAL = 'PROTOCOL_EVENT_STOP_MANUAL_OVERRIDE'
GET_SLAVE_STATES = 'PROTOCOL_EVENT_GET_SLAVE_STATES'
REGEN_COMPLETE = 'PROTOCOL_EVENT_REGEN_COMPLETE'
class Capability(mcu.Capability, turbo.Capability, rga.Capability):
"""
Protocol events that should be exposed to users (subset of above).
"""
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
CALIBRATE = ProtocolEvent.CALIBRATE
START_NAFION = ProtocolEvent.START_NAFION
START_ION = ProtocolEvent.START_ION
CLEAR = ProtocolEvent.CLEAR
POWEROFF = ProtocolEvent.POWEROFF
STOP_REGEN = ProtocolEvent.STOP_REGEN
START_MANUAL = ProtocolEvent.START_MANUAL
STOP_MANUAL = ProtocolEvent.STOP_MANUAL
GET_SLAVE_STATES = ProtocolEvent.GET_SLAVE_STATES
GET = DriverEvent.GET
SET = DriverEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = ProtocolEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
SAMPLE_INTERVAL = 'massp_sample_interval'
class SlaveProtocol(BaseEnum):
"""
Names for the slave protocols
"""
TURBO = TURBO
RGA = RGA
MCU = MCU
class ScheduledJob(BaseEnum):
"""
Scheduled jobs in this driver
"""
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
###############################################################################
# Driver
###############################################################################
# noinspection PyProtectedMember,PyMethodMayBeStatic
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
__metaclass__ = META_LOGGER
def __init__(self, evt_callback, refdes=None):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SingleConnectionInstrumentDriver.__init__(self, evt_callback, refdes)
self._slave_protocols = {}
def _massp_got_config(self, name, port_agent_packet):
data = port_agent_packet.get_data()
configuration = {'name': name}
for each in data.split('\n'):
if each == '':
continue
key, value = each.split(None, 1)
try:
value = int(value)
except ValueError:
pass
configuration[key] = value
self._driver_event(DriverAsyncEvent.DRIVER_CONFIG, configuration)
########################################################################
# Disconnected handlers.
########################################################################
def _handler_disconnected_connect(self, *args, **kwargs):
"""
Establish communications with the device via port agent / logger and
construct and initialize a protocol FSM for device interaction.
@return (next_state, result) tuple, (DriverConnectionState.CONNECTED, None) if successful.
@raises InstrumentConnectionException if the attempt to connect failed.
"""
self._build_protocol()
try:
for name, connection in self._connection.items():
connection.init_comms(self._slave_protocols[name].got_data,
self._slave_protocols[name].got_raw,
functools.partial(self._massp_got_config, name),
self._got_exception,
self._lost_connection_callback)
self._slave_protocols[name]._connection = connection
next_state = DriverConnectionState.CONNECTED
except InstrumentConnectionException as e:
log.error("Connection Exception: %s", e)
log.error("Instrument Driver returning to unconfigured state.")
next_state = DriverConnectionState.UNCONFIGURED
return next_state, None
########################################################################
# Connected handlers.
########################################################################
def _handler_connected_disconnect(self, *args, **kwargs):
"""
Disconnect to the device via port agent / logger and destroy the protocol FSM.
@returns: (next_state, result) tuple, (DriverConnectionState.DISCONNECTED, None) if successful.
"""
for connection in self._connection.values():
connection.stop_comms()
scheduler = self._protocol._scheduler
if scheduler:
scheduler._scheduler.shutdown()
scheduler = None
self._protocol = None
self._slave_protocols = {}
return DriverConnectionState.UNCONFIGURED, None
def _handler_connected_connection_lost(self, *args, **kwargs):
"""
The device connection was lost. Stop comms, destroy protocol FSM and revert to disconnected state.
@returns: (next_state, result) tuple, (DriverConnectionState.DISCONNECTED, None).
"""
for connection in self._connection.values():
connection.stop_comms()
scheduler = self._protocol._scheduler
if scheduler:
scheduler._scheduler.shutdown()
scheduler = None
self._protocol = None
self._slave_protocols = {}
# Send async agent state change event.
log.info("_handler_connected_connection_lost: sending LOST_CONNECTION "
"event, moving to DISCONNECTED state.")
self._driver_event(DriverAsyncEvent.AGENT_EVENT,
ResourceAgentEvent.LOST_CONNECTION)
return DriverConnectionState.UNCONFIGURED, None
########################################################################
# Helpers.
########################################################################
def _build_connection(self, *args, **kwargs):
"""
Constructs and returns a Connection object according to the given
configuration. The connection object is a LoggerClient instance in
this base class. Subclasses can overwrite this operation as needed.
The value returned by this operation is assigned to self._connections
and also to self._protocol._connection upon entering in the
DriverConnectionState.CONNECTED state.
@param all_configs configuration dict
@returns a dictionary of Connection instances, which will be assigned to self._connection
@throws InstrumentParameterException Invalid configuration.
"""
all_configs = kwargs.get('config', None) # via kwargs
if all_configs is None and len(args) > 0:
all_configs = args[0] # via first argument
if all_configs is None:
all_configs = {MCU: self._get_config_from_consul(self.refdes + '-MCU'),
TURBO: self._get_config_from_consul(self.refdes + '-TURBO'),
RGA: self._get_config_from_consul(self.refdes + '-RGA')}
for key in all_configs:
if all_configs[key] is None:
raise InstrumentParameterException('No %s port agent config supplied and failed to auto-discover' % key)
connections = {}
for name, config in all_configs.items():
if not isinstance(config, dict):
continue
if 'mock_port_agent' in config:
mock_port_agent = config['mock_port_agent']
# check for validity here...
if mock_port_agent is not None:
connections[name] = mock_port_agent
else:
try:
addr = config['addr']
port = config['port']
cmd_port = config.get('cmd_port')
if isinstance(addr, basestring) and isinstance(port, int) and len(addr) > 0:
connections[name] = PortAgentClient(addr, port, cmd_port)
else:
raise InstrumentParameterException('Invalid comms config dict.')
except (TypeError, KeyError):
raise InstrumentParameterException('Invalid comms config dict.')
return connections
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(self._driver_event)
callbacks = {}
for name in SlaveProtocol.list():
callbacks[name] = functools.partial(self._protocol._slave_protocol_event, name=name)
self._slave_protocols[MCU] = mcu.Protocol(mcu.Prompt, NEWLINE, callbacks[MCU])
self._slave_protocols[RGA] = rga.Protocol(rga.Prompt, NEWLINE, callbacks[RGA])
self._slave_protocols[TURBO] = turbo.Protocol(turbo.Prompt, NEWLINE, callbacks[TURBO])
for name in SlaveProtocol.list():
self._protocol.register_slave_protocol(name, self._slave_protocols[name])
# build the dynamic event handlers for manual override
self._protocol._add_manual_override_handlers()
###########################################################################
# Protocol
###########################################################################
# noinspection PyMethodMayBeStatic,PyUnusedLocal,PyProtectedMember
class Protocol(InstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
__metaclass__ = META_LOGGER
def __init__(self, driver_event):
"""
Protocol constructor.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
InstrumentProtocol.__init__(self, driver_event)
# Build protocol state machine.
self._protocol_fsm = InstrumentFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.GET, self._handler_command_get),
(ProtocolEvent.SET, self._handler_command_set),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_start_poll),
(ProtocolEvent.CALIBRATE, self._handler_command_start_calibrate),
(ProtocolEvent.START_NAFION, self._handler_command_start_nafion_regen),
(ProtocolEvent.START_ION, self._handler_command_start_ion_regen),
(ProtocolEvent.ERROR, self._handler_error),
(ProtocolEvent.POWEROFF, self._handler_command_poweroff),
(ProtocolEvent.START_MANUAL, self._handler_command_start_manual),
],
ProtocolState.AUTOSAMPLE: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_autosample_acquire_sample),
(ProtocolEvent.STOP, self._handler_stop_generic),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_stop_generic),
(ProtocolEvent.ERROR, self._handler_error),
],
ProtocolState.POLL: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.STOP, self._handler_stop_generic),
(ProtocolEvent.ERROR, self._handler_error),
],
ProtocolState.ERROR: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.CLEAR, self._handler_error_clear),
],
ProtocolState.CALIBRATE: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.STOP, self._handler_stop_generic),
(ProtocolEvent.ERROR, self._handler_error),
],
ProtocolState.REGEN: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.STOP_REGEN, self._handler_stop_regen),
(ProtocolEvent.REGEN_COMPLETE, self._handler_regen_complete),
(ProtocolEvent.ERROR, self._handler_error),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXIT, self._handler_direct_access_exit),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
],
ProtocolState.MANUAL_OVERRIDE: [
(ProtocolEvent.ENTER, self._handler_generic_enter),
(ProtocolEvent.EXIT, self._handler_generic_exit),
(ProtocolEvent.STOP_MANUAL, self._handler_manual_override_stop),
(ProtocolEvent.GET_SLAVE_STATES, self._handler_manual_get_slave_states),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._slave_protocols = {}
self.initialize_scheduler()
def _add_manual_override_handlers(self):
for slave in self._slave_protocols:
for event in self._slave_protocols[slave]._cmd_dict._cmd_dict:
self._protocol_fsm.add_handler(ProtocolState.MANUAL_OVERRIDE,
event, self._build_override_handler(slave, event))
def _build_override_handler(self, slave, event):
log.debug('Building event handler for protocol: %s event: %s', slave, event)
def inner():
return None, self._slave_protocols[slave]._protocol_fsm.on_event(event)
return inner
def register_slave_protocol(self, name, protocol):
"""
@param name: slave protocol name
@param protocol: slave protocol instance
@return: None
"""
self._slave_protocols[name] = protocol
def _slave_protocol_event(self, event, *args, **kwargs):
"""
Handle an event from a slave protocol.
@param event: event to be processed
"""
name = kwargs.get('name')
if name is not None and name in self._slave_protocols:
# only react to slave protocol events once we have transitioned out of unknown
if self.get_current_state() != ProtocolState.UNKNOWN or event == DriverAsyncEvent.ERROR:
if event == DriverAsyncEvent.STATE_CHANGE:
self._react()
elif event == DriverAsyncEvent.CONFIG_CHANGE:
# do nothing, we handle this ourselves in set_param
pass
else:
# pass the event up to the instrument agent
log.debug('Passing event up to the Instrument agent: %r %r %r', event, args, kwargs)
self._driver_event(event, *args)
def _build_param_dict(self):
"""
Populate the parameter dictionary with parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
self._param_dict.add(Parameter.SAMPLE_INTERVAL, '', None, int,
type=ParameterDictType.INT,
display_name='Autosample Interval',
description='Interval between sample starts during autosample state',
units=Units.SECOND)
def _build_command_dict(self):
"""
Populate the command dictionary with commands.
"""
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.CALIBRATE, display_name="Acquire Calibration Samples")
self._cmd_dict.add(Capability.START_ION, display_name="Start Ion Chamber Regeneration")
self._cmd_dict.add(Capability.START_NAFION, display_name="Start Nafion Regeneration")
self._cmd_dict.add(Capability.STOP_REGEN, display_name="Stop Current Regeneration")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.POWEROFF, display_name='Low Power State')
self._cmd_dict.add(Capability.GET_SLAVE_STATES,
display_name='Get Slave States')
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _react(self):
"""
Determine if an action is necessary based on the states of the slave protocols.
(MCU STATE, TURBO STATE, RGA STATE) : (TARGET, EVENT)
The specified event will be sent to the specified target.
"""
state = self.get_current_state()
slave_states = self._get_slave_states()
if MASSP_STATE_ERROR in slave_states:
return self._error()
if state == ProtocolState.REGEN and slave_states[0] == ProtocolState.COMMAND:
self._async_raise_fsm_event(ProtocolEvent.REGEN_COMPLETE)
# these actions are only applicable in POLL, AUTOSAMPLE or CALIBRATE states
if state not in [ProtocolState.POLL, ProtocolState.AUTOSAMPLE, ProtocolState.CALIBRATE]:
return
mps = mcu.ProtocolState
tps = turbo.ProtocolState
rps = rga.ProtocolState
action_map = {
# Waiting Turbo (RGA is off)
(mps.WAITING_TURBO, tps.COMMAND, rps.COMMAND): (TURBO, turbo.Capability.START_TURBO),
(mps.WAITING_TURBO, tps.AT_SPEED, rps.COMMAND): (MCU, mcu.Capability.START2),
# Waiting RGA
(mps.WAITING_RGA, tps.AT_SPEED, rps.SCAN): (MCU, mcu.Capability.SAMPLE),
(mps.WAITING_RGA, tps.AT_SPEED, rps.COMMAND): (RGA, rga.Capability.START_SCAN),
(mps.WAITING_RGA, tps.COMMAND, rps.SCAN): (RGA, rga.Capability.STOP_SCAN), # this should never happen!
(mps.WAITING_RGA, tps.COMMAND, rps.COMMAND): (MCU, mcu.Capability.STANDBY), # this should never happen!
# Stopping
(mps.STOPPING, tps.AT_SPEED, rps.SCAN): (RGA, rga.Capability.STOP_SCAN),
(mps.STOPPING, tps.AT_SPEED, rps.COMMAND): (TURBO, turbo.Capability.STOP_TURBO),
(mps.STOPPING, tps.COMMAND, rps.SCAN): (RGA, rga.Capability.STOP_SCAN), # this should never happen!
(mps.STOPPING, tps.COMMAND, rps.COMMAND): (MCU, mcu.Capability.STANDBY),
}
action = action_map.get(self._get_slave_states())
if action is not None:
if not isinstance(action, list):
action = [action]
# iterate through the action list, sending the events to the targets
# if we are in POLL or CALIBRATE and we see a STANDBY event, return this driver to COMMAND.
for target, command in action:
if command == mcu.Capability.SAMPLE and state == ProtocolState.CALIBRATE:
command = mcu.Capability.CALIBRATE
if command == mcu.Capability.STANDBY and state in [ProtocolState.CALIBRATE, ProtocolState.POLL]:
self._send_event_to_slave(target, command)
self._async_raise_fsm_event(ProtocolEvent.STOP)
else:
self._send_event_to_slave(target, command)
return action
def _error(self):
"""
Handle error state in slave protocol
"""
state = self.get_current_state()
slave_states = self._get_slave_states()
# if we are not currently in the error state, make the transition
if state != ProtocolState.ERROR:
self._async_raise_fsm_event(ProtocolEvent.ERROR)
mcu_state, turbo_state, rga_state = slave_states
# before we do anything else, the RGA must be stopped.
if rga_state not in [rga.ProtocolState.COMMAND, rga.ProtocolState.ERROR]:
self._send_event_to_slave(RGA, rga.ProtocolEvent.STOP_SCAN)
# RGA must be in COMMAND or ERROR, the TURBO must be stopped.
elif turbo_state not in [turbo.ProtocolState.COMMAND, turbo.ProtocolState.SPINNING_DOWN]:
self._send_event_to_slave(TURBO, turbo.ProtocolEvent.STOP_TURBO)
# Turbo and RGA must be in COMMAND or ERROR, stop the MCU
elif mcu_state != mcu.ProtocolState.COMMAND:
self._send_event_to_slave(MCU, mcu.ProtocolEvent.STANDBY)
def _got_chunk(self, chunk):
"""
This driver has no chunker...
"""
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
@param events: Events to be filtered
@return: list of events which are also capabilities
"""
return [x for x in events if Capability.has(x) or mcu.Capability.has(x)
or turbo.Capability.has(x) or rga.Capability.has(x)]
def _get_slave_states(self):
"""
Retrieve the current protocol state from each of the slave protocols and return them as a tuple.
"""
return (
self._slave_protocols[MCU].get_current_state(),
self._slave_protocols[TURBO].get_current_state(),
self._slave_protocols[RGA].get_current_state(),
)
def _send_event_to_all(self, event):
"""
Send the same event to all slave protocols.
@return: List of (name, result) for a slave protocols
"""
return [(name, slave._protocol_fsm.on_event(event)) for name, slave in self._slave_protocols.items()]
def _send_event_to_slave(self, name, event):
"""
Send an event to a specific protocol
@param name: Name of slave protocol
@param event: Event to be sent
"""
slave_protocol = self._slave_protocols.get(name)
if slave_protocol is None:
raise InstrumentProtocolException('Attempted to send event to non-existent protocol: %s' % name)
slave_protocol._async_raise_fsm_event(event)
def _send_massp_direct_access(self, command):
"""
Handle a direct access command. Driver expects direct access commands to specify the target
using the following format:
target:command
It then routes the command to the appropriate slave protocol.
@param command: Direct access command received
"""
err_string = 'Invalid command. Command must be in the following format: "target:command' + NEWLINE + \
'Valid targets are: %r' % self._slave_protocols.keys()
try:
target, command = command.split(DA_COMMAND_DELIMITER, 1)
target = target.lower()
except ValueError:
target = None
log.debug('_do_cmd_direct - target: %s command: %r', target, command)
if target not in self._slave_protocols:
self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, err_string)
else:
self._slave_protocols[target]._protocol_fsm.on_event(ProtocolEvent.EXECUTE_DIRECT, command)
def _set_params(self, *args, **kwargs):
"""
Set one or more parameters. This method will determine where the parameter actually resides and
forward it to the appropriate parameter dictionary based on name.
@param args: arglist which must contain a parameter dictionary
@throws InstrumentParameterException
"""
params = args[0]
if not isinstance(params, dict):
raise InstrumentParameterException('Attempted to set parameters with a non-dictionary argument')
_, old_config = self._handler_command_get([Parameter.ALL])
temp_dict = {}
for key in params:
split_key = key.split('_', 1)
if len(split_key) == 1:
raise InstrumentParameterException('Missing target in MASSP parameter: %s' % key)
target = split_key[0]
if not target in self._slave_protocols:
# this is a master driver parameter, set it here
if key in self._param_dict.get_keys():
log.debug("Setting value for %s to %s", key, params[key])
self._param_dict.set_value(key, params[key])
else:
raise InstrumentParameterException('Invalid key in SET action: %s' % key)
else:
temp_dict.setdefault(target, {})[key] = params[key]
# set parameters for slave protocols
for name in temp_dict:
if name in self._slave_protocols:
self._slave_protocols[name]._set_params(temp_dict[name])
else:
# how did we get here? This should never happen, but raise an exception if it does.
raise InstrumentParameterException('Invalid key(s) in SET action: %r' % temp_dict[name])
_, new_config = self._handler_command_get([Parameter.ALL])
if not new_config == old_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def set_init_params(self, config):
"""
Set initial parameters. Parameters are forwarded to the appropriate parameter dictionary based on name.
@param config: Init param config to be handled
"""
temp_dict = {}
self._startup_config = config
config = config.get(DriverConfigKey.PARAMETERS, {})
for key in config:
target, _ = key.split('_', 1)
if not target in self._slave_protocols:
# master driver parameter
log.debug("Setting init value for %s to %s", key, config[key])
self._param_dict.set_init_value(key, config[key])
else:
temp_dict.setdefault(target, {})[key] = config[key]
for name in temp_dict:
if name in self._slave_protocols:
self._slave_protocols[name].set_init_params({DriverConfigKey.PARAMETERS: temp_dict[name]})
else:
# how did we get here? This should never happen, but raise an exception if it does.
raise InstrumentParameterException('Invalid key(s) in INIT PARAMS action: %r' % temp_dict[name])
def get_config_metadata_dict(self):
"""
See base class for full description. This method is overridden to retrieve the parameter
dictionary from each slave protocol and merge them.
@return: dictionary containing driver metadata
"""
log.debug("Getting metadata dict from protocol...")
return_dict = {ConfigMetadataKey.DRIVER: self._driver_dict.generate_dict(),
ConfigMetadataKey.COMMANDS: self._cmd_dict.generate_dict(),
ConfigMetadataKey.PARAMETERS: self._param_dict.generate_dict()}
for protocol in self._slave_protocols.values():
return_dict[ConfigMetadataKey.PARAMETERS].update(protocol._param_dict.generate_dict())
return_dict[ConfigMetadataKey.COMMANDS].update(protocol._cmd_dict.generate_dict())
return return_dict
def get_resource_capabilities(self, current_state=True):
"""
Overrides base class to include slave protocol parameters
@param current_state: Boolean indicating whether we should return only the current state events
@return: (resource_commands, resource_parameters)
"""
res_cmds = self._protocol_fsm.get_events(current_state)
res_cmds = self._filter_capabilities(res_cmds)
res_params = self._param_dict.get_keys()
for protocol in self._slave_protocols.values():
res_params.extend(protocol._param_dict.get_keys())
return res_cmds, res_params
def _build_scheduler(self):
"""
Build a scheduler for periodic status updates
"""
job_name = ScheduledJob.ACQUIRE_SAMPLE
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)
},
}
}
}
self.set_init_params(config)
self._add_scheduler_event(ScheduledJob.ACQUIRE_SAMPLE, ProtocolEvent.ACQUIRE_SAMPLE)
def _delete_scheduler(self):
"""
Remove the autosample schedule.
"""
try:
self._remove_scheduler(ScheduledJob.ACQUIRE_SAMPLE)
except KeyError:
log.info('Failed to remove scheduled job for ACQUIRE_SAMPLE')
########################################################################
# Generic handlers.
########################################################################
def _handler_generic_enter(self, *args, **kwargs):
"""
Generic enter handler, raise STATE CHANGE
"""
if self.get_current_state() != ProtocolState.UNKNOWN:
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_generic_exit(self, *args, **kwargs):
"""
Generic exit handler, do nothing.
"""
def _handler_stop_generic(self, *args, **kwargs):
"""
Generic stop method to return to COMMAND (via POLL if appropriate)
@return next_state, (next_agent_state, None)
"""
self._delete_scheduler()
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
# check if we are in autosample AND currently taking a sample, if so, move to POLL
# otherwise go back to COMMAND.
if self.get_current_state() == ProtocolState.AUTOSAMPLE:
if self._get_slave_states() != (ProtocolState.COMMAND, ProtocolState.COMMAND, ProtocolState.COMMAND):
next_state = ProtocolState.POLL
next_agent_state = ResourceAgentState.BUSY
# notify the agent we have changed states
self._async_agent_state_change(next_agent_state)
return next_state, (next_agent_state, None)
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state
@return (next_state, next_agent_state)
"""
result = self._send_event_to_all(ProtocolEvent.DISCOVER)
log.debug('_handler_unknown_discover -- send DISCOVER to all: %r', result)
target_state = (ProtocolState.COMMAND, ProtocolState.COMMAND, ProtocolState.COMMAND)
success = False
# wait for the slave protocols to discover
for attempt in xrange(5):
slave_states = self._get_slave_states()
if slave_states == target_state:
success = True
break
time.sleep(1)
if not success:
return ProtocolState.ERROR, ResourceAgentState.IDLE
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Command handlers.
########################################################################
def _handler_command_get(self, *args, **kwargs):
"""
Get parameter. Query this protocol plus all slave protocols.
@param args: arglist which should contain a list of parameters to get
@return None, results
"""
params = args[0]
if not isinstance(params, list):
params = [params]
temp_dict = {}
result_dict = {}
# request is for all parameters, send get(ALL) to each protocol then combine the results.
if Parameter.ALL in params:
params = [Parameter.ALL]
_, result = self._handler_get(params, **kwargs)
result_dict.update(result)
for protocol in self._slave_protocols.values():
_, result = protocol._handler_get(params, **kwargs)
result_dict.update(result)
# request is for specific parameters. Determine which protocol should service each,
# call the appropriate _handler_get and combine the results
else:
for key in params:
log.debug('about to split: %s', key)
target, _ = key.split('_', 1)
temp_dict.setdefault(target, []).append(key)
for key in temp_dict:
if key == MASTER:
_, result = self._handler_get(params, **kwargs)
else:
if key in self._slave_protocols:
_, result = self._slave_protocols[key]._handler_get(params, **kwargs)
else:
raise InstrumentParameterException('Invalid key(s) in GET action: %r' % temp_dict[key])
result_dict.update(result)
return None, result_dict
def _handler_command_set(self, *args, **kwargs):
"""
Set parameter, just pass through to _set_params, which knows how to set the params
in the slave protocols.
"""
self._set_params(*args, **kwargs)
return None, None
def _handler_command_start_direct(self):
"""
Start direct access
@return next_state, (next_agent_state, result)
"""
return ProtocolState.DIRECT_ACCESS, (ResourceAgentState.DIRECT_ACCESS, None)
def _handler_command_start_autosample(self):
"""
Move my FSM to autosample and start the sample sequence by sending START1 to the MCU.
Create the scheduler to automatically start the next sample sequence
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.START1)
self._build_scheduler()
return ProtocolState.AUTOSAMPLE, (ResourceAgentState.STREAMING, None)
def _handler_command_start_poll(self):
"""
Move my FSM to poll and start the sample sequence by sending START1 to the MCU
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.START1)
return ProtocolState.POLL, (ResourceAgentState.BUSY, None)
def _handler_command_start_calibrate(self):
"""
Move my FSM to calibrate and start the calibrate sequence by sending START1 to the MCU
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.START1)
return ProtocolState.CALIBRATE, (ResourceAgentState.BUSY, None)
def _handler_command_start_nafion_regen(self):
"""
Move my FSM to NAFION_REGEN and send NAFION_REGEN to the MCU
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.NAFREG)
return ProtocolState.REGEN, (ResourceAgentState.BUSY, None)
def _handler_command_start_ion_regen(self):
"""
Move my FSM to ION_REGEN and send ION_REGEN to the MCU
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.IONREG)
return ProtocolState.REGEN, (ResourceAgentState.BUSY, None)
def _handler_command_poweroff(self):
"""
Send POWEROFF to the MCU
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.POWEROFF)
return None, (None, None)
def _handler_command_start_manual(self):
"""
Move FSM to MANUAL OVERRIDE state
@return next_state, (next_agent_state, result)
"""
return ProtocolState.MANUAL_OVERRIDE, (ResourceAgentState.COMMAND, None)
########################################################################
# Error handlers.
########################################################################
def _handler_error(self):
"""
@return next_state, next_agent_state
"""
return ProtocolState.ERROR, ResourceAgentState.BUSY
def _handler_error_clear(self):
"""
Send the CLEAR event to any slave protocol in the error state and return this driver to COMMAND
@return next_state, (next_agent_state, result)
"""
for protocol in self._slave_protocols:
state = protocol.get_current_state()
if state == MASSP_STATE_ERROR:
# do this synchronously, to allow each slave protocol to complete the CLEAR action
# before transitioning states.
protocol._protocol_fsm.on_event(ProtocolEvent.CLEAR)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# Autosample handlers.
########################################################################
def _handler_autosample_acquire_sample(self):
"""
Fire off a sample sequence while in the autosample state.
@return None, None
@throws InstrumentProtocolException
"""
slave_states = self._get_slave_states()
# verify the MCU is not already in a sequence
if slave_states[0] == ProtocolState.COMMAND:
result = self._send_event_to_slave(MCU, mcu.Capability.START1)
else:
raise InstrumentProtocolException("Attempted to acquire sample while sampling")
return None, None
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state. Forward to all slave protocols.
"""
self._send_event_to_all(ProtocolEvent.START_DIRECT)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state. Check slave protocol states and verify they all
return to COMMAND, otherwise raise InstrumentProtocolException.
@throws InstrumentProtocolException
"""
for attempt in range(DA_EXIT_MAX_RETRIES):
slave_states = self._get_slave_states()
if ProtocolState.DIRECT_ACCESS in slave_states:
log.error('Slave protocol failed to return to command, attempt %d', attempt)
time.sleep(1)
else:
return
raise InstrumentProtocolException('Slave protocol never returned to command from DA.')
def _handler_direct_access_execute_direct(self, data):
"""
Execute a direct access command. For MASSP, this means passing the actual command to the
correct slave protocol. This is handled by _send_massp_direct_access.
@return next_state, (next_agent_state, result)
"""
self._send_massp_direct_access(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return None, (None, None)
def _handler_direct_access_stop_direct(self):
"""
@return next_state, (next_agent_state, result)
"""
self._send_event_to_all(ProtocolEvent.STOP_DIRECT)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# Regen handlers.
########################################################################
def _handler_stop_regen(self):
"""
Abort the current regeneration sequence, return to COMMAND
@return next_state, (next_agent_state, result)
"""
self._send_event_to_slave(MCU, mcu.Capability.STANDBY)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
def _handler_regen_complete(self):
"""
Regeneration sequence is complete, return to COMMAND
@return next_state, (next_agent_state, result)
"""
self._async_agent_state_change(ResourceAgentState.COMMAND)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
def _handler_manual_override_stop(self):
"""
Exit manual override. Attempt to bring the slave drivers back to COMMAND.
"""
mcu_state, turbo_state, rga_state = self._get_slave_states()
if rga_state == rga.ProtocolState.SCAN:
self._slave_protocols[RGA]._protocol_fsm.on_event(rga.Capability.STOP_SCAN)
if turbo_state == turbo.ProtocolState.AT_SPEED:
self._slave_protocols[TURBO]._protocol_fsm.on_event(turbo.Capability.STOP_TURBO)
while rga_state not in [rga.ProtocolState.COMMAND, rga.ProtocolState.ERROR] or \
turbo_state not in [turbo.ProtocolState.COMMAND, turbo.ProtocolState.ERROR]:
time.sleep(.1)
mcu_state, turbo_state, rga_state = self._get_slave_states()
if mcu_state != mcu.ProtocolState.COMMAND:
self._slave_protocols[MCU]._protocol_fsm.on_event(mcu.Capability.STANDBY)
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
def _handler_manual_get_slave_states(self):
"""
Get the slave states and return them to the user
@return: next_state, (next_agent_state, result)
"""
mcu_state, turbo_state, rga_state = self._get_slave_states()
return None, (None, {MCU: mcu_state, RGA: rga_state, TURBO: turbo_state})
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Juliano Martinez, Locaweb.
import os
import inspect
import unittest
import mox
from quantum.agent.linux import iptables_manager
class IptablesManagerStateFulTestCase(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.root_helper = 'sudo'
self.iptables = (iptables_manager.
IptablesManager(root_helper=self.root_helper))
self.mox.StubOutWithMock(self.iptables, "execute")
def tearDown(self):
self.mox.UnsetStubs()
def test_binary_name(self):
self.assertEqual(iptables_manager.binary_name,
os.path.basename(inspect.stack()[-1][1])[:16])
def test_add_and_remove_chain(self):
bn = iptables_manager.binary_name
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
nat_dump = (':%s-OUTPUT - [0:0]\n:%s-snat - [0:0]\n:%s-PREROUTING -'
' [0:0]\n:%s-float-snat - [0:0]\n:%s-POSTROUTING - [0:0]'
'\n:quantum-postrouting-bottom - [0:0]\n-A PREROUTING -j'
' %s-PREROUTING\n-A OUTPUT -j %s-OUTPUT\n-A POSTROUTING '
'-j %s-POSTROUTING\n-A POSTROUTING -j quantum-postroutin'
'g-bottom\n-A quantum-postrouting-bottom -j %s-snat\n-A '
'%s-snat -j %s-float-snat\n' % (bn, bn, bn, bn, bn, bn,
bn, bn, bn, bn, bn))
self.iptables.execute(['iptables-restore'],
process_input=(':%s-FORWARD - [0:0]\n:%s-INPUT'
' - [0:0]\n:%s-local - [0:0]\n:%s-filter - [0:'
'0]\n:%s-OUTPUT - [0:0]\n:quantum-filter-top -'
' [0:0]\n-A FORWARD -j quantum-filter-top\n-A '
'OUTPUT -j quantum-filter-top\n-A quantum-filt'
'er-top -j %s-local\n-A INPUT -j %s-INPUT\n-A '
'OUTPUT -j %s-OUTPUT\n-A FORWARD -j %s-FORWARD'
'\n' % (bn, bn, bn, bn, bn, bn, bn, bn, bn)
), root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=nat_dump,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=(':%s-FORWARD - [0:0]\n:%s-INPUT'
' - [0:0]\n:%s-local - [0:0]\n:%s-OUTPUT - [0:'
'0]\n:quantum-filter-top - [0:0]\n-A FORWARD -'
'j quantum-filter-top\n-A OUTPUT -j quantum-fi'
'lter-top\n-A quantum-filter-top -j %s-local\n'
'-A INPUT -j %s-INPUT\n-A OUTPUT -j %s-OUTPUT'
'\n-A FORWARD -j %s-FORWARD\n' % (bn, bn, bn, bn,
bn, bn, bn, bn)), root_helper=self.root_helper
).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=nat_dump,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.apply()
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_filter_rule(self):
bn = iptables_manager.binary_name
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
nat_dump = (':%s-OUTPUT - [0:0]\n:%s-snat - [0:0]\n:%s-PREROUTING -'
' [0:0]\n:%s-float-snat - [0:0]\n:%s-POSTROUTING - [0:0]'
'\n:quantum-postrouting-bottom - [0:0]\n-A PREROUTING -j'
' %s-PREROUTING\n-A OUTPUT -j %s-OUTPUT\n-A POSTROUTING '
'-j %s-POSTROUTING\n-A POSTROUTING -j quantum-postroutin'
'g-bottom\n-A quantum-postrouting-bottom -j %s-snat\n-A '
'%s-snat -j %s-float-snat\n' % (bn, bn, bn, bn, bn, bn,
bn, bn, bn, bn, bn))
self.iptables.execute(['iptables-restore'],
process_input=(':%s-FORWARD - [0:0]\n:%s-INPUT'
' - [0:0]\n:%s-local - [0:0]\n:%s-filter - [0:'
'0]\n:%s-OUTPUT - [0:0]\n:quantum-filter-top -'
' [0:0]\n-A FORWARD -j quantum-filter-top\n-A '
'OUTPUT -j quantum-filter-top\n-A quantum-filt'
'er-top -j %s-local\n-A INPUT -j %s-INPUT\n-A '
'OUTPUT -j %s-OUTPUT\n-A FORWARD -j %s-FORWARD'
'\n-A %s-filter -j DROP\n-A %s-INPUT -s 0/0 -d'
' 192.168.0.2 -j %s-filter\n' % (bn, bn, bn, bn,
bn, bn, bn, bn, bn, bn, bn, bn)),
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=nat_dump,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=(':%s-FORWARD - [0:0]\n:%s-INPUT -'
' [0:0]\n:%s-local - [0:0]\n:%s-OUTPUT - [0:0]\n'
':quantum-filter-top - [0:0]\n-A FORWARD -j quan'
'tum-filter-top\n-A OUTPUT -j quantum-filter-top'
'\n-A quantum-filter-top -j %s-local\n-A INPUT -'
'j %s-INPUT\n-A OUTPUT -j %s-OUTPUT\n-A FORWARD '
'-j %s-FORWARD\n' % (bn, bn, bn, bn, bn, bn, bn,
bn)), root_helper=self.root_helper
).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=nat_dump,
root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['filter'].add_chain('filter')
self.iptables.ipv4['filter'].add_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].add_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %s-filter' %
(iptables_manager.binary_name))
self.iptables.apply()
self.iptables.ipv4['filter'].remove_rule('filter', '-j DROP')
self.iptables.ipv4['filter'].remove_rule('INPUT',
'-s 0/0 -d 192.168.0.2 -j'
' %s-filter' %
(iptables_manager.
binary_name))
self.iptables.ipv4['filter'].remove_chain('filter')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_nat_rule(self):
bn = iptables_manager.binary_name
filter_dump = (':%s-FORWARD - [0:0]\n:%s-INPUT - [0:0]\n:%s-local - '
'[0:0]\n:%s-OUTPUT - [0:0]\n:quantum-filter-top - [0:'
'0]\n-A FORWARD -j quantum-filter-top\n-A OUTPUT -j q'
'uantum-filter-top\n-A quantum-filter-top -j %s-local'
'\n-A INPUT -j %s-INPUT\n-A OUTPUT -j %s-OUTPUT\n-A F'
'ORWARD -j %s-FORWARD\n' % (bn, bn, bn, bn, bn,
bn, bn, bn))
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=filter_dump,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=(':%s-float-snat - [0:0]\n:%s-POS'
'TROUTING - [0:0]\n:%s-PREROUTING - [0:0]\n:%s-'
'nat - [0:0]\n:%s-OUTPUT - [0:0]\n:%s-snat - [0'
':0]\n:quantum-postrouting-bottom - [0:0]\n-A P'
'REROUTING -j %s-PREROUTING\n-A OUTPUT -j %s-OU'
'TPUT\n-A POSTROUTING -j %s-POSTROUTING\n-A POS'
'TROUTING -j quantum-postrouting-bottom\n-A qua'
'ntum-postrouting-bottom -j %s-snat\n-A %s-snat'
' -j %s-float-snat\n-A %s-PREROUTING -d 192.168'
'.0.3 -j %s-nat\n-A %s-nat -p tcp --dport 8080 '
'-j REDIRECT --to-port 80\n' % (bn, bn, bn, bn,
bn, bn, bn, bn, bn, bn, bn, bn, bn, bn, bn)),
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'filter'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=filter_dump,
root_helper=self.root_helper).AndReturn(None)
self.iptables.execute(['iptables-save', '-t', 'nat'],
root_helper=self.root_helper).AndReturn('')
self.iptables.execute(['iptables-restore'],
process_input=(':%s-float-snat - [0:0]\n:%s-POST'
'ROUTING - [0:0]\n:%s-PREROUTING - [0:0]\n:%s-OU'
'TPUT - [0:0]\n:%s-snat - [0:0]\n:quantum-postro'
'uting-bottom - [0:0]\n-A PREROUTING -j %s-PRERO'
'UTING\n-A OUTPUT -j %s-OUTPUT\n-A POSTROUTING -'
'j %s-POSTROUTING\n-A POSTROUTING -j quantum-pos'
'trouting-bottom\n-A quantum-postrouting-bottom '
'-j %s-snat\n-A %s-snat -j %s-float-snat\n' % (
bn, bn, bn, bn, bn, bn, bn, bn, bn, bn, bn)
), root_helper=self.root_helper).AndReturn(None)
self.mox.ReplayAll()
self.iptables.ipv4['nat'].add_chain('nat')
self.iptables.ipv4['nat'].add_rule('PREROUTING',
'-d 192.168.0.3 -j %s-nat' %
(iptables_manager.binary_name))
self.iptables.ipv4['nat'].add_rule('nat',
'-p tcp --dport 8080' +
' -j REDIRECT --to-port 80')
self.iptables.apply()
self.iptables.ipv4['nat'].remove_rule('nat',
'-p tcp --dport 8080 -j'
' REDIRECT --to-port 80')
self.iptables.ipv4['nat'].remove_rule('PREROUTING',
'-d 192.168.0.3 -j %s-nat' %
(iptables_manager.binary_name))
self.iptables.ipv4['nat'].remove_chain('nat')
self.iptables.apply()
self.mox.VerifyAll()
def test_add_rule_to_a_nonexistent_chain(self):
self.assertRaises(LookupError, self.iptables.ipv4['filter'].add_rule,
'nonexistent', '-j DROP')
def test_remove_nonexistent_chain(self):
self.mox.StubOutWithMock(iptables_manager, "LOG")
iptables_manager.LOG.warn(('Attempted to remove chain %s which does '
'not exist'), 'nonexistent')
self.mox.ReplayAll()
self.iptables.ipv4['filter'].remove_chain('nonexistent')
self.mox.VerifyAll()
def test_remove_nonexistent_rule(self):
self.mox.StubOutWithMock(iptables_manager, "LOG")
iptables_manager.LOG.warn('Tried to remove rule that was not there: '
'%(chain)r %(rule)r %(wrap)r %(top)r',
{'wrap': True, 'top': False,
'rule': '-j DROP',
'chain': 'nonexistent'})
self.mox.ReplayAll()
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
self.mox.VerifyAll()
class IptablesManagerStateLessTestCase(unittest.TestCase):
def setUp(self):
self.iptables = (iptables_manager.IptablesManager(state_less=True))
def test_nat_not_found(self):
self.assertFalse('nat' in self.iptables.ipv4)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for reduction operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class ReduceOpsTest(xla_test.XLATestCase):
def _testReduction(self,
tf_reduce_fn,
np_reduce_fn,
dtype,
test_inputs,
rtol=1e-4,
atol=1e-4):
"""Tests that the output of 'tf_reduce_fn' matches numpy's output."""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = tf_reduce_fn(a, index)
result = sess.run(out, {a: test_input, index: [0]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=0), rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [1]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)
result = sess.run(out, {a: test_input, index: [-1]})
self.assertAllClose(
result, np_reduce_fn(test_input, axis=1), rtol=rtol, atol=atol)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [-33]})
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError, 'Invalid reduction dim'):
sess.run(out, {a: test_input, index: [2]})
REAL_DATA = [
np.zeros(shape=(2, 0)),
np.zeros(shape=(0, 30)),
np.arange(1, 7).reshape(2, 3),
np.arange(-10, -4).reshape(2, 3),
np.arange(-4, 2).reshape(2, 3),
]
COMPLEX_DATA = [
np.zeros(shape=(2, 0)).astype(np.complex64),
np.zeros(shape=(0, 30)).astype(np.complex64),
np.arange(1, 13, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-14, -2, dtype=np.float32).view(np.complex64).reshape(2, 3),
np.arange(-4, 8, dtype=np.float32).view(np.complex64).reshape(2, 3),
]
NONEMPTY_REAL_DATA = [x for x in REAL_DATA if np.size(x) > 0]
NONEMPTY_COMPLEX_DATA = [x for x in COMPLEX_DATA if np.size(x) > 0]
BOOL_DATA = [
np.array([], dtype=np.bool).reshape(2, 0),
np.array([], dtype=np.bool).reshape(0, 3),
np.array([[False, True, False], [True, True, False]]),
]
def testReduceSumF32(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.float32, self.REAL_DATA)
def testReduceSumC64(self):
self._testReduction(math_ops.reduce_sum, np.sum, np.complex64,
self.COMPLEX_DATA)
def testReduceProdF32(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.float32,
self.REAL_DATA)
def testReduceProdC64(self):
self._testReduction(math_ops.reduce_prod, np.prod, np.complex64,
self.COMPLEX_DATA)
def testReduceMin(self):
def reference_min(dtype, inp, axis):
"""Wrapper around np.amin that returns +infinity for an empty input."""
if inp.shape[axis] == 0:
if np.issubdtype(dtype, np.floating):
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:], float('inf'))
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
np.iinfo(dtype).max)
return np.amin(inp, axis)
for dtype in set(self.all_types).intersection(
[np.float32, np.int32, np.int64]):
self._testReduction(math_ops.reduce_min,
functools.partial(reference_min, dtype), dtype,
self.REAL_DATA)
def testReduceMax(self):
def reference_max(dtype, inp, axis):
"""Wrapper around np.amax that returns -infinity for an empty input."""
if inp.shape[axis] == 0:
if np.issubdtype(dtype, np.floating):
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
float('-inf'))
return np.full(inp.shape[0:axis] + inp.shape[axis + 1:],
np.iinfo(dtype).min)
return np.amax(inp, axis)
for dtype in set(self.all_types).intersection(
[np.float32, np.int32, np.int64]):
self._testReduction(math_ops.reduce_max,
functools.partial(reference_max, dtype), dtype,
self.REAL_DATA)
def testReduceMeanF32(self):
# TODO(phawkins): mean on XLA currently returns 0 instead of NaN when
# reducing across zero inputs.
self._testReduction(math_ops.reduce_mean, np.mean, np.float32,
self.NONEMPTY_REAL_DATA)
def testReduceMeanC64(self):
self._testReduction(math_ops.reduce_mean, np.mean, np.complex64,
self.NONEMPTY_COMPLEX_DATA)
def testReduceAll(self):
self._testReduction(math_ops.reduce_all, np.all, np.bool, self.BOOL_DATA)
def testReduceAny(self):
self._testReduction(math_ops.reduce_any, np.any, np.bool, self.BOOL_DATA)
class ReduceOpPrecisionTest(xla_test.XLATestCase):
def _testReduceSum(self,
expected_result,
dtype,
test_inputs,
rtol=1e-3,
atol=1e-4):
"""Tests reduce sum on a list of input arrays.
For each array in test_inputs, check that performing reduce sum on the array
produces a value that is close to the expected result.
Args:
expected_result: the expected result.
dtype: the data type of the reduce sum operation.
test_inputs: a list of input arrays for the reduce sum operation.
rtol: the relative error.
atol: the absolute error.
"""
for test_input in test_inputs:
with self.test_session() as sess:
with self.test_scope():
a = array_ops.placeholder(dtype)
index = array_ops.placeholder(dtypes.int32)
out = math_ops.reduce_sum(a, index)
result = sess.run(out, {
a: np.array(test_input, dtype=dtype),
index: [0]
})
# Compare the results using float32 type.
self.assertAllClose(
np.float32(result),
np.float32(expected_result),
rtol=rtol,
atol=atol)
def testReduceSumF16(self):
"""Tests the reduce sum of float16 doesn't lose too much precision."""
if np.float16 not in self.all_types:
return
f16_max = np.finfo(np.float16).max
self._testReduceSum(
f16_max, np.float16,
itertools.permutations([f16_max, f16_max, f16_max * (-1.0)], 3))
def testReduceSumBF16(self):
"""Tests the reduce sum of bfloat16 doesn't lose too much precision."""
if dtypes.bfloat16.as_numpy_dtype not in self.all_types:
return
bf16_max = np.float32(dtypes.bfloat16.max)
f32_max = dtypes.float32.max
value = min(bf16_max, f32_max - bf16_max)
self._testReduceSum(
dtypes.bfloat16.as_numpy_dtype(value), dtypes.bfloat16.as_numpy_dtype,
itertools.permutations([bf16_max, value, bf16_max * (-1.0)], 3))
if __name__ == '__main__':
googletest.main()
|
|
import functools
import hashlib
from django import http
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.db.transaction import non_atomic_requests
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext, ugettext_lazy as _lazy
import six
from django_statsd.clients import statsd
from rest_framework import serializers
from rest_framework.viewsets import ModelViewSet
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import AccountViewSet
from olympia.addons.models import Addon
from olympia.addons.views import BaseFilter
from olympia.amo import messages
from olympia.amo.decorators import (
allow_mine, json_view, login_required, post_required, use_primary_db)
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import paginate, render, urlparams
from olympia.api.filters import OrderingAliasFilter
from olympia.api.permissions import (
AllOf, AllowReadOnlyIfPublic, AnyOf, PreventActionPermission)
from olympia.legacy_api.utils import addon_to_dict
from olympia.translations.query import order_by_translation
from olympia.users.decorators import process_user_id
from olympia.users.models import UserProfile
from . import forms
from .models import SPECIAL_SLUGS, Collection, CollectionAddon
from .permissions import (
AllowCollectionAuthor, AllowCollectionContributor, AllowContentCurators)
from .serializers import (
CollectionAddonSerializer, CollectionSerializer,
CollectionWithAddonsSerializer)
log = olympia.core.logger.getLogger('z.collections')
@non_atomic_requests
def get_collection(request, user_id, slug):
if (slug in SPECIAL_SLUGS.values() and request.user.is_authenticated and
request.user.id == user_id):
return getattr(request.user, slug + '_collection')()
else:
return get_object_or_404(Collection.objects,
author_id=user_id, slug=slug)
def owner_required(f=None, require_owner=True):
"""Requires collection to be owned, by someone."""
def decorator(func):
@functools.wraps(func)
def wrapper(request, user_id, slug, *args, **kw):
collection = get_collection(request, user_id, slug)
if acl.check_collection_ownership(request, collection,
require_owner=require_owner):
return func(request, collection, user_id, slug, *args, **kw)
else:
raise PermissionDenied
return wrapper
return decorator(f) if f else decorator
@non_atomic_requests
def legacy_redirect(request, uuid, edit=False):
# Nicknames have a limit of 30, so len == 36 implies a uuid.
key = 'uuid' if len(uuid) == 36 else 'nickname'
collection = get_object_or_404(Collection.objects, **{key: uuid})
if edit:
return http.HttpResponseRedirect(collection.edit_url())
to = collection.get_url_path()
params = request.GET.urlencode()
if params:
to += '?' + params
return http.HttpResponseRedirect(to)
@non_atomic_requests
def legacy_directory_redirects(request, page):
sorts = {'editors_picks': 'featured', 'popular': 'popular'}
loc = base = reverse('collections.list')
if page in sorts:
loc = urlparams(base, sort=sorts[page])
elif request.user.is_authenticated:
if page == 'mine':
loc = reverse('collections.user', args=[request.user.id])
return http.HttpResponseRedirect(loc)
@non_atomic_requests
def render_cat(request, template, data=None, extra=None):
if extra is None:
extra = {}
if data is None:
data = {}
data.update(dict(search_cat='collections'))
return render(request, template, data, **extra)
@non_atomic_requests
def collection_listing(request, base=None):
qs = (
Collection.objects.listed()
.filter(Q(application=request.APP.id) | Q(application=None))
.filter(type=amo.COLLECTION_FEATURED)
.exclude(addon_count=0)
)
# Counts are hard to cache automatically, and accuracy for this
# one is less important. Remember it for 5 minutes.
countkey = hashlib.sha256(str(qs.query) + '_count').hexdigest()
count = cache.get(countkey)
if count is None:
count = qs.count()
cache.set(countkey, count, 300)
collections = paginate(request, qs, count=count)
return render_cat(request, 'bandwagon/impala/collection_listing.html',
{'collections': collections, 'src': 'co-hc-sidebar',
'dl_src': 'co-dp-sidebar'})
@allow_mine
@process_user_id
@non_atomic_requests
def user_listing(request, user_id):
author = get_object_or_404(UserProfile, id=user_id)
qs = (Collection.objects.filter(author_id=user_id)
.order_by('-created'))
mine = (request.user.is_authenticated and
request.user.id == user_id)
if mine:
page = 'mine'
else:
page = 'user'
qs = qs.filter(listed=True)
collections = paginate(request, qs)
return render_cat(request, 'bandwagon/user_listing.html',
{'collections': collections,
'page': page, 'author': author})
class CollectionAddonFilter(BaseFilter):
opts = (('added', _lazy(u'Added')),
('popular', _lazy(u'Popularity')),
('name', _lazy(u'Name')))
def filter_added(self):
return self.base_queryset.order_by('collectionaddon__created')
def filter_name(self):
return order_by_translation(self.base_queryset, 'name')
def filter_popular(self):
return self.base_queryset.order_by('-weekly_downloads')
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not collection.listed:
if not request.user.is_authenticated:
return redirect_for_login(request)
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
base = Addon.objects.valid() & collection.addons.all()
filter = CollectionAddonFilter(request, base,
key='sort', default='popular')
notes = get_notes(collection)
# Go directly to CollectionAddon for the count to avoid joins.
count = CollectionAddon.objects.filter(
Addon.objects.all().valid_q(
amo.VALID_ADDON_STATUSES, prefix='addon__'),
collection=collection.id)
addons = paginate(request, filter.qs, per_page=15, count=count.count())
# `perms` is defined in django.contrib.auth.context_processors. Gotcha!
user_perms = {
'view_stats': acl.check_ownership(
request, collection, require_owner=False),
}
return render_cat(request, 'bandwagon/collection_detail.html',
{'collection': collection, 'filter': filter,
'addons': addons, 'notes': notes,
'user_perms': user_perms})
@json_view(has_trans=True)
@allow_mine
@process_user_id
@non_atomic_requests
def collection_detail_json(request, user_id, slug):
collection = get_collection(request, user_id, slug)
if not (collection.listed or acl.check_collection_ownership(
request, collection)):
raise PermissionDenied
# We evaluate the QuerySet with `list` to work around bug 866454.
addons_dict = [addon_to_dict(a) for a in list(collection.addons.valid())]
return {
'name': collection.name,
'url': collection.get_abs_url(),
'addons': addons_dict
}
def get_notes(collection, raw=False):
# This might hurt in a big collection with lots of notes.
# It's a generator so we don't evaluate anything by default.
notes = CollectionAddon.objects.filter(collection=collection,
comments__isnull=False)
rv = {}
for note in notes:
# Watch out for comments in a language we didn't pick up.
if note.comments:
rv[note.addon_id] = (note.comments.localized_string if raw
else note.comments)
yield rv
def initial_data_from_request(request):
return {'author': request.user, 'application': request.APP.id}
def collection_message(request, collection, option):
if option == 'add':
title = ugettext('Collection created!')
msg = ugettext(
'Your new collection is shown below. You can '
'<a href="%(url)s">edit additional settings</a> if you\'d '
'like.'
) % {'url': collection.edit_url()}
elif option == 'update':
title = ugettext('Collection updated!')
msg = ugettext(
'<a href="%(url)s">View your collection</a> to see the changes.'
) % {'url': collection.get_url_path()}
else:
raise ValueError('Incorrect option "%s", '
'takes only "add" or "update".' % option)
messages.success(request, title, msg, message_safe=True)
@use_primary_db
@login_required
def add(request):
"""Displays/processes a form to create a collection."""
ctx = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
ctx['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
ctx['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
ctx['form'] = form
return render_cat(request, 'bandwagon/add.html', ctx)
@use_primary_db
@login_required(redirect=False)
def ajax_new(request):
form = forms.CollectionForm(
request.POST or None,
initial=initial_data_from_request(request))
if request.method == 'POST' and form.is_valid():
collection = form.save()
addon_id = request.POST['addon_id']
collection.add_addon(Addon.objects.get(pk=addon_id))
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(reverse('collections.ajax_list') +
'?addon_id=%s' % addon_id)
return render(request, 'bandwagon/ajax_new.html', {'form': form})
@login_required(redirect=False)
@non_atomic_requests
def ajax_list(request):
try:
addon_id = int(request.GET['addon_id'])
except (KeyError, ValueError):
return http.HttpResponseBadRequest()
qs = Collection.objects.owned_by(request.user).with_has_addon(addon_id)
return render(request, 'bandwagon/ajax_list.html',
{'collections': order_by_translation(qs, 'name')})
@use_primary_db
@login_required
@post_required
@process_user_id
def collection_alter(request, user_id, slug, action):
collection = get_collection(request, user_id, slug)
return change_addon(request, collection, action)
def change_addon(request, collection, action):
if not acl.check_collection_ownership(request, collection):
raise PermissionDenied
try:
addon = get_object_or_404(Addon.objects, pk=request.POST['addon_id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
getattr(collection, action + '_addon')(addon)
log.info(u'%s: %s %s to collection %s' %
(request.user, action, addon.id, collection.id))
if request.is_ajax():
url = '%s?addon_id=%s' % (reverse('collections.ajax_list'), addon.id)
else:
url = collection.get_url_path()
return http.HttpResponseRedirect(url)
@use_primary_db
@login_required
@post_required
def ajax_collection_alter(request, action):
try:
collection = get_object_or_404(
Collection.objects, pk=request.POST['id'])
except (ValueError, KeyError):
return http.HttpResponseBadRequest()
return change_addon(request, collection, action)
@use_primary_db
@login_required
@process_user_id
# Contributors are allowed to *see* the page, but there is another
# permission check below to prevent them from doing any modifications.
@owner_required(require_owner=False)
def edit(request, collection, user_id, slug):
is_admin = acl.action_allowed(request, amo.permissions.ADMIN_CURATION)
if not acl.check_collection_ownership(
request, collection, require_owner=True):
if request.method == 'POST':
raise PermissionDenied
form = None
elif request.method == 'POST':
initial = initial_data_from_request(request)
if collection.author_id: # Don't try to change the author.
initial['author'] = collection.author
form = forms.CollectionForm(request.POST, request.FILES,
initial=initial,
instance=collection)
if form.is_valid():
collection = form.save()
collection_message(request, collection, 'update')
log.info(u'%s edited collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url())
else:
form = forms.CollectionForm(instance=collection)
qs = (CollectionAddon.objects.using('default')
.filter(collection=collection))
meta = {c.addon_id: c for c in qs}
addons = collection.addons.all()
comments = next(get_notes(collection, raw=True))
data = {
'collection': collection,
'form': form,
'user_id': user_id,
'slug': slug,
'meta': meta,
'is_admin': is_admin,
'addons': addons,
'comments': comments
}
return render_cat(request, 'bandwagon/edit.html', data)
@use_primary_db
@login_required
@process_user_id
@owner_required(require_owner=False)
@post_required
def edit_addons(request, collection, user_id, slug):
if request.method == 'POST':
form = forms.AddonsForm(request.POST)
if form.is_valid():
form.save(collection)
collection_message(request, collection, 'update')
log.info(u'%s added add-ons to %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.edit_url() + '#addons-edit')
@use_primary_db
@login_required
@process_user_id
@owner_required
@post_required
def edit_privacy(request, collection, user_id, slug):
collection.listed = not collection.listed
collection.save()
log.info(u'%s changed privacy on collection %s' %
(request.user, collection.id))
return http.HttpResponseRedirect(collection.get_url_path())
@use_primary_db
@login_required
@process_user_id
def delete(request, user_id, slug):
collection = get_object_or_404(Collection, author_id=user_id, slug=slug)
if not acl.check_collection_ownership(request, collection, True):
log.info(u'%s is trying to delete collection %s'
% (request.user, collection.id))
raise PermissionDenied
data = dict(collection=collection, user_id=user_id, slug=slug)
if request.method == 'POST':
if request.POST['sure'] == '1':
collection.delete()
log.info(u'%s deleted collection %s' %
(request.user, collection.id))
url = reverse('collections.user', args=[user_id])
return http.HttpResponseRedirect(url)
else:
return http.HttpResponseRedirect(collection.get_url_path())
return render_cat(request, 'bandwagon/delete.html', data)
@login_required
@allow_mine
@non_atomic_requests
def mine(request, user_id=None, slug=None):
if slug is None:
return user_listing(request, user_id)
else:
return collection_detail(request, user_id, slug)
class CollectionViewSet(ModelViewSet):
# Note: CollectionAddonViewSet will call CollectionViewSet().get_object(),
# causing the has_object_permission() method of these permissions to be
# called. It will do so without setting an action however, bypassing the
# PreventActionPermission() parts.
permission_classes = [
AnyOf(
# Collection authors can do everything.
AllowCollectionAuthor,
# Collection contributors can access the featured themes collection
# (it's community-managed) and change it's addons, but can't delete
# or edit it's details.
AllOf(AllowCollectionContributor,
PreventActionPermission(('create', 'list', 'update',
'destroy', 'partial_update'))),
# Content curators can modify existing mozilla collections as they
# see fit, but can't list or delete them.
AllOf(AllowContentCurators,
PreventActionPermission(('create', 'destroy', 'list'))),
# Everyone else can do read-only stuff, except list.
AllOf(AllowReadOnlyIfPublic,
PreventActionPermission('list'))),
]
lookup_field = 'slug'
def get_account_viewset(self):
if not hasattr(self, 'account_viewset'):
self.account_viewset = AccountViewSet(
request=self.request,
permission_classes=[], # We handled permissions already.
kwargs={'pk': self.kwargs['user_pk']})
return self.account_viewset
def get_serializer_class(self):
with_addons = ('with_addons' in self.request.GET and
self.action == 'retrieve')
return (CollectionSerializer if not with_addons
else CollectionWithAddonsSerializer)
def get_queryset(self):
return Collection.objects.filter(
author=self.get_account_viewset().get_object()).order_by(
'-modified')
def get_addons_queryset(self):
collection_addons_viewset = CollectionAddonViewSet(
request=self.request
)
# Set this to avoid a pointless lookup loop.
collection_addons_viewset.collection = self.get_object()
# This needs to be list to make the filtering work.
collection_addons_viewset.action = 'list'
qs = collection_addons_viewset.get_queryset()
# Now limit and sort
limit = settings.REST_FRAMEWORK['PAGE_SIZE']
sort = collection_addons_viewset.ordering[0]
return qs.order_by(sort)[:limit]
class TranslationAwareOrderingAliasFilter(OrderingAliasFilter):
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if len(ordering) > 1:
# We can't support multiple orderings easily because of
# how order_by_translation works.
raise serializers.ValidationError(
'You can only specify one "sort" argument. Multiple '
'orderings are not supported')
order_by = ordering[0]
if order_by in ('name', '-name'):
return order_by_translation(queryset, order_by, Addon)
sup = super(TranslationAwareOrderingAliasFilter, self)
return sup.filter_queryset(request, queryset, view)
class CollectionAddonViewSet(ModelViewSet):
permission_classes = [] # We don't need extra permissions.
serializer_class = CollectionAddonSerializer
lookup_field = 'addon'
filter_backends = (TranslationAwareOrderingAliasFilter,)
ordering_fields = ()
ordering_field_aliases = {'popularity': 'addon__weekly_downloads',
'name': 'name',
'added': 'created'}
ordering = ('-addon__weekly_downloads',)
def get_collection(self):
if not hasattr(self, 'collection'):
# We're re-using CollectionViewSet and making sure its get_object()
# method is called, which triggers the permission checks for that
# class so we don't need our own.
# Note that we don't pass `action`, so the PreventActionPermission
# part of the permission checks won't do anything.
self.collection = CollectionViewSet(
request=self.request,
kwargs={'user_pk': self.kwargs['user_pk'],
'slug': self.kwargs['collection_slug']}).get_object()
return self.collection
def get_object(self):
self.lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup_value = self.kwargs.get(self.lookup_url_kwarg)
# if the lookup is not a number, its probably the slug instead.
if lookup_value and not six.text_type(lookup_value).isdigit():
self.lookup_field = '%s__slug' % self.lookup_field
return super(CollectionAddonViewSet, self).get_object()
def get_queryset(self):
qs = (
CollectionAddon.objects
.filter(collection=self.get_collection())
.prefetch_related('addon'))
filter_param = self.request.GET.get('filter')
# We only filter list action.
include_all_with_deleted = (filter_param == 'all_with_deleted' or
self.action != 'list')
# If deleted addons are requested, that implies all addons.
include_all = filter_param == 'all' or include_all_with_deleted
if not include_all:
qs = qs.filter(
addon__status=amo.STATUS_PUBLIC, addon__disabled_by_user=False)
elif not include_all_with_deleted:
qs = qs.exclude(addon__status=amo.STATUS_DELETED)
return qs
|
|
"""
Module that holds classes that represent agents.
"""
import sys
import string
import abc
import random
import pygame
import entity
import interaction
import interactionmemory
import events
from appstate import AppState
import settings
class Agent(entity.Entity):
"""
Class that represents an agent.
"""
color = (3, 124, 146, 255)
def __init__(self):
super(Agent, self).__init__()
self.setup_interaction_memory()
self.name = 'Agent ' + ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(6))
@abc.abstractmethod
def prepare_interaction(self):
"""
Prepare an interaction to enact.
Interaction enaction is split into two parts to better handle multiple
agents. First, all agents prepare the interaction to enact without
manipulating the world state, and afterwards all these interactions
are enacted (in randomized order). The world notifies the agent
of which interaction was actually enacted.
This ensures that an agent does not have access to world state
information it should not yet know at any point in time (especially
when it becomes more complex with e.g. a visual subsystem). If
preparation was immediately followed by enaction, an agent could
potentially respond to the interaction of another agent made "earlier"
at the same discrete point in time!
:return: The primitive interaction that is to be enacted and optionally
something that will be passed to enacted_interaction of this
agent.
"""
raise NotImplementedError("Should be implemented by child.")
@abc.abstractmethod
def enacted_interaction(self, interaction, data):
"""
Tell the agent which primitive interaction was actually enacted.
:param interaction: The primitive interaction that was actually
enacted.
:param data: The data that was (optionally) returned by
prepare_interaction this step.
"""
raise NotImplementedError("Should be implemented by child.")
def setup_interaction_memory(self):
"""
Setup the interaction memory of this agent.
"""
self.interaction_memory = interactionmemory.InteractionMemory()
def get_name(self):
"""
Get this agent's name
:return: This agent's name
"""
return self.name
def get_perception(self, world):
if self.has_perception_handler():
return self.perception_handler.perceive(self, world)
else:
raise Exception("No perception handler has been set")
def set_perception_handler(self, perception_handler):
self.perception_handler = perception_handler
def has_perception_handler(self):
return hasattr(self, "perception_handler")
def add_primitives(self, primitives):
for primitive in primitives:
self.interaction_memory.add_interaction(primitive)
def add_motivations(self, motivation):
for primitive, valence in motivation.iteritems():
self.interaction_memory.set_valence(primitive, valence)
def get_interaction_memory(self):
return self.interaction_memory
def set_interaction_memory(self, interaction_memory):
self.interaction_memory = interaction_memory
def collidable(self):
return False
def to_json(self):
return {
'name': self.name,
'interaction_memory': self.interaction_memory
}
class SimpleAgent(Agent):
"""
An agent with a simple existence.
"""
enacted = None
def anticipate(self):
"""
Anticipate the possible interactions based on the current context.
:return: A list of possible (primitive) interactions.
"""
interactions = []
for composite_interaction in self.interaction_memory.get_composite_interactions():
if composite_interaction.get_pre() == self.enacted:
interactions.append(composite_interaction.get_post())
return interactions
def select_experiment(self, anticipations):
"""
Select the best interaction from a list of anticipated interactions.
If the list of anticipated interactions is empty or if the best
interaction has negative valence, return a random primitive interaction.
:param anticipations: The list of interactions to choose an experiment
from.
:return: A chosen primitive interaction.
"""
anticipations.sort(
key = lambda x: self.interaction_memory.get_valence(x),
reverse = True
)
if len(anticipations) > 0 and self.interaction_memory.get_valence(anticipations[0]) > 0:
return anticipations[0]
else:
return random.choice(self.interaction_memory.get_primitive_interactions())
def learn_composite_interaction(self, context, enacted):
"""
Learn a composite interaction or reinforce it if it is already known.
:param context: The context (pre-interaction).
:param enacted: The newly enecated interaction (post-interaction).
"""
composite = interaction.CompositeInteraction(context, enacted)
if composite not in self.interaction_memory.get_composite_interactions():
self.interaction_memory.add_interaction(composite)
else:
self.interaction_memory.increment_weight(composite)
def prepare_interaction(self):
anticipations = self.anticipate()
experiment = self.select_experiment(anticipations)
# Post interaction preparation event
AppState.state.get_event_manager().post_event(events.AgentPreparationEvent(
self,
experiment,
self.interaction_memory.get_valence(experiment)))
return experiment
def enacted_interaction(self, interaction, data):
# Learn interaction if it is not yet known
if interaction not in self.interaction_memory.get_primitive_interactions():
self.interaction_memory.add_interaction(interaction)
# Post enacted interaction event
AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
self,
interaction,
self.interaction_memory.get_valence(interaction)))
self.interaction_memory.add_interaction_to_history(interaction)
if not self.enacted == None:
self.learn_composite_interaction(self.enacted, interaction)
self.enacted = interaction
class ConstructiveAgent(Agent):
"""
An agent with a fully recursive existence. It considers all experiment as
abstract and processes all experiments in the same way.
"""
def __init__(self):
super(ConstructiveAgent, self).__init__()
self.enacting_interaction = False
self.enacting_interaction_step = 0
self.enacting_interaction_sequence = []
self.enacted_sequence = []
self.context = []
self.history = []
def activate_interactions(self):
"""
Step 1 of the sequential system.
Known composite interactions whose pre-interaction belongs to the
context are activated.
"""
activated = []
for composite_interaction in self.interaction_memory.get_composite_interactions():
if composite_interaction.get_pre() in self.context:
activated.append(composite_interaction)
return activated
def propose_interactions(self):
"""
Step 2 of the sequential system.
Post-interactions of activated interactions are proposed together with
the weight of the activation interaction (as a tuple).
"""
activated = self.activate_interactions()
# Keep track of added post interactions with the highest (composite
# pre and post) interaction weight, so that we do not add duplicate
# post interactions
added = {}
for interaction_ in activated:
weight = self.interaction_memory.get_weight(interaction_)
if interaction_ in added:
if weight > added[interaction_]:
added[interaction_] = weight
else:
added[interaction_] = weight
# Create the list of proposed interactions [(post interaction, weight)]
proposed = added.iteritems()
return proposed
def consider_alternative_interactions(self):
"""
Add-on to the sequential system, between steps 2 and 3.
Proposed post-interactions are scrutinized by looking at alternative
interactions that have occurred when trying to activate that interaction.
If the alternative interaction itself is also proposed, the context
indicates the alternative is likely to occur. Thus, the agent
anticipates that the alternative might happen instead of the intended
interaction. The intended interaction's proclivity is temporarily
adjusted to reflect this.
"""
proposed = self.propose_interactions()
proposed = map(lambda (proposed_interaction, weight):
(
proposed_interaction,
weight * self.interaction_memory.get_valence(proposed_interaction, process_boredom = True)
), proposed)
n = 0
proposed_interactions = map(lambda (proposed_interaction, proclivity): proposed_interaction, proposed)
for (proposed_interaction, proclivity) in proposed:
for alternative in self.interaction_memory.get_alternative_interactions(proposed_interaction):
if alternative in proposed_interactions:
AppState.get_state().get_logger().info("%s - Anticipating alternative %s for %s" % (self.name, alternative, proposed_interaction))
proclivity += self.interaction_memory.get_proclivity(alternative)
proposed[n] = (proposed_interaction, proclivity)
n += 1
return proposed
def select_intended_interaction(self):
"""
Step 3 of the sequential system.
The decisional mechanism; choose an interaction to enact (primitive
or composite).
The intended interaction is selected from the proposed interactions
based on the weight of the activated interactions and the values of the
proposed post interactions.
"""
proposed = self.consider_alternative_interactions()
proposed.sort(
key = lambda x: x[1],
reverse = True
)
proposed = map(lambda x: x[0], proposed)
"""
Without alternatives:
proposed = self.propose_interactions()
proposed.sort(
key = lambda x: x[1] * self.interaction_memory.get_valence(x[0], process_boredom = True),
reverse = True
)
proposed = map(lambda x: x[0], proposed)
"""
if len(proposed) > 0 and self.interaction_memory.get_proclivity(proposed[0]) > 0:
return proposed[0]
elif len(proposed) == 0:
# TODO: in Katja's implementation the activated interactions contain
# some set of default interactions. The paper itself does not seem
# to mention how to deal with an empty activated set.
AppState.get_state().get_logger().info("%s - No proposed interactions: exploring" % self.name)
return random.choice(self.interaction_memory.get_primitive_interactions())
else:
AppState.get_state().get_logger().info("%s - Negative proclivity: exploring" % self.name)
return random.choice(self.interaction_memory.get_primitive_interactions())
def update_context(self, enacted_interaction, learned_or_reinforced):
"""
Step 6 of the sequential system.
Add all learned / reinforced interactions to the context.
:param enacted_interaction: The interaction that was enacted (can be
different from the intended interaction)
:param learned_or_reinforced: A list of interactions that were just
learned or reinforced.
"""
self.context = []
"""
According to paper:
Update the context of the agent. The new context includes the enacted
interaction (e_d), the post-interaction of e_d if it exists, and the
interactions that were just learned or reinforced and that pass a
certain weight ("stabilized" interactions).
"""
for interaction_ in learned_or_reinforced:
if self.interaction_memory.get_weight(interaction_) > 3:
self.context.append(interaction_)
if isinstance(enacted_interaction, interaction.CompositeInteraction):
self.context.append(enacted_interaction.get_post())
self.context.append(enacted_interaction)
"""
Alternative context method:
"""
"""
self.context.append(enacted_interaction)
for interaction in learned_or_reinforced:
self.context.append(interaction.get_pre())
"""
def prepare_interaction(self):
if not self.enacting_interaction:
# Decisional mechanism.
# We are not currently enacting the primitives in a sequence of
# interactions. Choose a new interaction to enact (steps 1-3).
self.enacting_interaction = True
self.enacting_interaction_step = 0
self.enacted_sequence = []
# Exploration
if random.random() <= 0.1:
# Choose a random primitive interaction (not a primitive perception interaction)
self.intended_interaction = random.choice(filter(lambda x: isinstance(x, interaction.PrimitiveInteraction), self.interaction_memory.get_primitive_interactions()))
AppState.get_state().get_logger().info("%s - EXPLORING" % (self.name))
else:
self.intended_interaction = self.select_intended_interaction()
self.enacting_interaction_sequence = self.intended_interaction.unwrap()
AppState.get_state().get_logger().info("%s - Intending: %s" % (self.name, self.intended_interaction))
# Enact a primitive interaction from the sequence we are currently
# enacting.
intended_interaction = self.enacting_interaction_sequence[self.enacting_interaction_step]
AppState.get_state().get_logger().info("%s - > %s" % (self.name, intended_interaction))
# Step 4 of the sequential system, enact the interaction:
# Post interaction preparation event
AppState.state.get_event_manager().post_event(events.AgentPreparationEvent(
self,
intended_interaction,
self.interaction_memory.get_valence(intended_interaction, process_boredom = True)))
return (intended_interaction, intended_interaction)
def enacted_interaction(self, interaction_, data):
self.enacting_interaction_step += 1
intended_primitive_interaction = data
self.enacted_sequence.append(interaction_)
# Learn interaction if it is not yet known
if interaction_ not in self.interaction_memory.get_primitive_interactions():
self.interaction_memory.add_interaction(interaction_)
# Post enacted interaction event
AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
self,
interaction_,
self.interaction_memory.get_valence(interaction_)))
self.interaction_memory.add_interaction_to_history(interaction_)
if (
not interaction_ == intended_primitive_interaction
or
self.enacting_interaction_step >= len(self.enacting_interaction_sequence)
):
# Failed or done enacting
self.enacting_interaction = False
# Reconstruct enacted interaction from hierarchy of intended
# interaction
enacted = self.intended_interaction.reconstruct_from_hierarchy(self.enacted_sequence)
AppState.get_state().get_logger().info("%s - Enacted: %s" % (self.name, enacted))
# Add the interaction as an alternative interaction if the intended interaction failed
if enacted != self.intended_interaction:
if self.interaction_memory.add_alternative_interaction(self.intended_interaction, enacted):
AppState.get_state().get_logger().info("%s - Interaction added as alternative" % self.name)
# Step 5: add new or reinforce existing composite interactions
learned_or_reinforced = []
if isinstance(enacted, interaction.CompositeInteraction):
learned_or_reinforced.append(enacted)
if len(self.history) >= 1:
previous = self.history[-1]
# <interaction at t-1, enacted interaction>
t1enacted = interaction.CompositeInteraction(previous, enacted)
learned_or_reinforced.append(t1enacted)
if len(self.history) >= 2:
penultimate = self.history[-2]
# <interaction at t-2, interaction at t-1>
t2t1 = interaction.CompositeInteraction(penultimate, previous)
# <<interaction at t-2, interaction at t-1>, enacted interaction>
t2t1_enacted = interaction.CompositeInteraction(t2t1, enacted)
learned_or_reinforced.append(t2t1_enacted)
# <interaction at t-2, <interaction at t-1, enacted interaction>>
t2_t1enacted = interaction.CompositeInteraction(penultimate, t1enacted)
learned_or_reinforced.append(t2_t1enacted)
for composite in learned_or_reinforced:
if composite not in self.interaction_memory.get_composite_interactions():
self.interaction_memory.add_interaction(composite)
else:
self.interaction_memory.increment_weight(composite)
# Keep history of last 100 actions performed
if len(self.history) > 100:
self.history.pop(0)
self.history.append(enacted)
"""
According to the paper:
for pre_interaction in self.context:
composite = interaction.CompositeInteraction(pre_interaction, enacted)
learned_or_reinforced.append(composite)
if composite not in self.interaction_memory.get_composite_interactions():
self.interaction_memory.add_interaction(composite)
else:
self.interaction_memory.increment_weight(composite)
"""
# Step 6: update context
self.update_context(enacted, learned_or_reinforced)
else:
# Not done
pass
class HomeostaticConstructiveAgent(ConstructiveAgent):
"""
A homeostatic agent is a constructive agent where valences of interactions
are a function of internal energy levels of the agent (these homeostatic
values are not directly observable by the agent).
"""
def __init__(self):
super(HomeostaticConstructiveAgent, self).__init__()
self.homeostasis = {}
def set_homeostatic_value(self, homeostatic_property, value):
self.homeostasis[homeostatic_property] = value
def get_homeostatic_value(self, homeostatic_property):
return self.homeostasis[homeostatic_property]
def add_to_homeostatic_value(self, homeostatic_property, delta_value):
self.homeostasis[homeostatic_property] += delta_value
def setup_interaction_memory(self):
self.interaction_memory = interactionmemory.HomeostaticInteractionMemory(self)
class HumanAgent(Agent):
"""
An agent that is controlled by the user.
"""
color = (146, 124, 3, 255)
def prepare_interaction(self):
chosen = None
self.color_old = self.color # Temporarily change color to indicate this agent has to be controlled
self.color = (255,255,0,255)
# Secondary pygame loop to process events until the user made a decision
while chosen == None:
if pygame.key.get_pressed()[pygame.K_LALT]:
# If left alt is pressed, use the regular controller(s)
AppState.get_state().get_event_manager().post_event(events.ControlEvent())
else:
# If left alt is not pressed, use this agent's controller
interaction = self.get_interaction_from_input()
if not interaction == None:
self.color = self.color_old
chosen = interaction
# Draw views
AppState.get_state().get_event_manager().post_event(events.DrawEvent())
# Pygame tick control
AppState.get_state().get_clock().tick(settings.MAX_FPS)
# Post interaction preparation event
AppState.state.get_event_manager().post_event(events.AgentPreparationEvent(
self,
chosen,
-1))
AppState.get_state().get_logger().info("%s - > %s" % (self.name, chosen))
return chosen
def enacted_interaction(self, interaction, data):
# Post enacted interaction event
AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
self,
interaction,
-1))
self.interaction_memory.add_interaction_to_history(interaction)
AppState.get_state().get_logger().info("%s - Enacted: %s" % (self.name, interaction))
def get_interaction_from_input(self):
"""
Get the interaction the agent should enact from user input
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
quitEvent = events.QuitEvent()
AppState.get_state().get_event_manager().post_event(quitEvent)
return
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
return self.interaction_memory.find_interaction_by_name_and_result("Step")
elif event.key == pygame.K_LEFT:
return self.interaction_memory.find_interaction_by_name_and_result("Turn Left")
elif event.key == pygame.K_RIGHT:
return self.interaction_memory.find_interaction_by_name_and_result("Turn Right")
elif event.key == pygame.K_SLASH:
return self.choose_from_list()
def choose_from_list(self):
"""
Method to choose interaction from a list of all interactions known by
this agent.
"""
interactions = self.interaction_memory.get_primitive_interactions()
print "Choose an interaction from the following list:"
n = 1
for interaction in interactions:
print "%s. %s" % (n, interaction)
n += 1
choice = None
keydict = {
pygame.K_0: '0',
pygame.K_1: '1',
pygame.K_2: '2',
pygame.K_3: '3',
pygame.K_4: '4',
pygame.K_5: '5',
pygame.K_6: '6',
pygame.K_7: '7',
pygame.K_8: '8',
pygame.K_9: '9'
}
# Get user input until we have a valid choice
while not isinstance(choice, int) or choice < 1 or choice > len(interactions):
print "Please choose the number of an interaction and press [enter]: ",
inputstring = ""
while True:
# Process pygame key events to get user input
event = pygame.event.poll()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
# Stop and process the input retrieved so far
break
elif event.key == pygame.K_BACKSPACE:
# Remove one character
if len(inputstring) > 0:
inputstring = inputstring[:-1] # Delete one character from the input string
sys.stdout.write('\b \b') # Delete one character from the console output
elif event.key == pygame.K_ESCAPE:
# Stop
print ""
return None
try:
key = keydict[event.key]
except:
key = ""
inputstring += key
sys.stdout.write(key)
try:
choice = int(inputstring)
except ValueError:
choice = None
print ""
return interactions[choice - 1]
class ProgrammableAgent(Agent):
"""
An agent that can be programmed (with prior knowledge) to interact
with the world.
"""
color = (111, 3, 146, 255)
def __init__(self, program = None):
super(ProgrammableAgent, self).__init__()
self.program = program
def prepare_interaction(self):
if self.program == None:
raise Exception("No program has been set for the programmable agent")
else:
if self.has_perception_handler():
percept = self.get_perception()
else:
percept = None
interaction_ = self.program.get_interaction(percept)
return interaction_
def enacted_interaction(self, interaction, data):
# Post enacted interaction event
AppState.state.get_event_manager().post_event(events.AgentEnactionEvent(
self,
interaction,
-1))
self.interaction_memory.add_interaction_to_history(interaction)
AppState.get_state().get_logger().info("%s - Enacted: %s" % (self.name, interaction))
def set_program(self, program):
"""
Set the programmable agent's program.
"""
self.program = program
|
|
"""
Tests for rendering value as selection from list of options
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import unittest
import re
from annalist.views.fields.render_select import (
get_select_renderer, get_choice_renderer,
SelectValueMapper
)
from annalist.views.form_utils.fieldchoice import FieldChoice
from .field_rendering_support import FieldRendererTestSupport
# ---- support methods ----
def expect_render_option(choice_val, select_val, placeholder):
selected = ''' selected="selected"''' if choice_val == select_val else ''''''
if choice_val == "":
option_val = ""
choice_text = placeholder
value_text = ''' value=""'''
else:
option_val = "opt_type/"+choice_val
choice_text = "label "+choice_val
value_text = ''' value="%s"'''%option_val
fc = FieldChoice(id=option_val, label=choice_text, choice_value=(choice_val=="dup"))
return '''<option%s%s>%s</option>'''%(value_text, selected, fc.choice_html())
def expect_render_select(select_name, choice_list, select_val, placeholder):
sel = """<select name="%s">"""%select_name
if select_val not in choice_list:
val = "opt_type/"+select_val if select_val else ""
lab = "opt_type/"+select_val if select_val else placeholder
fc = FieldChoice(id=val, label=lab)
nopt = ['''<option value="%s" selected="selected">%s</option>'''%(val, fc.choice_html())]
else:
nopt = []
opts = [ expect_render_option(choice_val, select_val, placeholder) for choice_val in choice_list ]
end = "</select>\n"
return "\n".join([sel]+nopt+opts+[end])
# ---- test class ----
class SelectRenderingTest(FieldRendererTestSupport):
def setUp(self):
return
def tearDown(self):
return
def _make_select_test_context(self, valkey, vallink, valchoices, valplaceholder):
valtext = "opt_type/"+valkey if valkey else ""
if valkey not in valchoices:
vallink = None
# log.info(
# "_make_select_test_context: valtext %r, vallink %r, valchoices %r"%
# (valtext, vallink, valchoices)
# )
valoptions = []
for c in valchoices:
if c != "":
val = "opt_type/"+c
lab = "label "+c
else:
val = ""
lab = valplaceholder
lnk = vallink if val == valtext else "@@no_link@@"
valoptions.append(FieldChoice(val, label=lab, link=lnk))
# log.info(
# "_make_select_test_context: valoptions %r"%
# (valoptions,)
# )
return self._make_test_context(valtext, field_ref_type="ref_type", options=valoptions, field_link=vallink)
def test_RenderChoiceValue(self):
def expect_render(valkey, vallabel, vallink, valchoices):
valcont = "?continuation_url=test_cont"
if vallink and valkey in valchoices:
render_view = """<a href="%s">%s</a> """%(vallink+valcont, vallabel)
elif valkey == "":
render_view = """<span class="value-blank">%s</span> """%(vallabel)
else:
render_view = """<span class="value-missing">%s</span> """%(vallabel)
render_edit = expect_render_select(
"repeat_prefix_test_field",
valchoices,
valkey,
"(test placeholder)"
)
return {'view': render_view, 'edit': render_edit}
noval = "(No 'test label' selected)"
test_values = (
[ ( "aa", "label aa", "http://example.org/aa", ["aa", "bb", "cc"])
, ( "", noval, None, ["", "aa", "bb", "cc"])
, ( "dd", "opt_type/dd", "http://example.org/dd", ["aa", "bb", "cc"])
, ( "", noval, None, ["aa", "bb", "cc"])
, ( "dup", "label dup", "http://example.org/dup", ["aa", "bb", "cc", "dup", "dup"])
])
test_value_context_renders = (
[ ( self._make_select_test_context(valkey, vallink, valchoices, noval),
expect_render(valkey, vallabel, vallink, valchoices)
) for valkey, vallabel, vallink, valchoices in test_values
])
renderer = get_choice_renderer()
for render_context, expect_render in test_value_context_renders:
# log.info("test_RenderChoiceValue: value %(field_value)r"%render_context['field'])
# log.info("test_RenderChoiceValue: expect %s"%(expect_render,))
self._check_value_renderer_results(
renderer,
context=render_context,
expect_rendered_view=expect_render['view'],
expect_rendered_edit=expect_render['edit'],
collapse_whitespace=True
)
return
def test_RenderSelectValue(self):
def expect_render(valkey, vallabel, vallink, valchoices):
valcont = "?continuation_url=test_cont"
if vallink and valkey in valchoices:
render_view = (
"""<a href="%s">%s</a> """%
(vallink+valcont, vallabel)
)
elif valkey == "":
render_view = """<span class="value-blank">%s</span> """%(vallabel)
else:
render_view = """<span class="value-missing">%s</span> """%(vallabel)
select = expect_render_select(
"repeat_prefix_test_field",
valchoices,
valkey,
"(test placeholder)"
)
render_edit = (
'''<div class="row">\n'''+
''' <div class="small-10 columns view-value less-new-button">\n'''+
''' %s\n'''+
''' </div>\n'''+
''' <div class="small-2 columns view-value new-button left small-text-right">\n'''+
''' <button type="submit" \n'''+
''' name="%s__new_edit" \n'''+
''' value="New"\n'''+
''' title="Define new or edit %s"\n'''+
''' >\n'''+
''' <span class="select-edit-button-text">+✍</span>\n'''+
''' </button>\n'''+
''' </div>\n'''+
'''</div>\n'''
)%(select, "repeat_prefix_test_field", "test label")
return {'view': render_view, 'edit': render_edit}
noval = "(No 'test label' selected)"
test_values = (
[ ( "aa", "label aa", "http://example.org/aa", ["aa", "bb", "cc"])
, ( "", noval, None, ["", "aa", "bb", "cc"])
, ( "dd", "opt_type/dd", "http://example.org/dd", ["aa", "bb", "cc"])
, ( "", noval, None, ["aa", "bb", "cc"])
])
test_value_context_renders = (
[ ( self._make_select_test_context(valtext, vallink, valchoices, noval),
expect_render(valtext, vallabel, vallink, valchoices)
) for valtext, vallabel, vallink, valchoices in test_values
])
renderer = get_select_renderer()
for render_context, expect_render in test_value_context_renders:
# log.info(repr(render_context['field']['field_value']))
self._check_value_renderer_results(
renderer,
context=render_context,
expect_rendered_view=expect_render['view'],
expect_rendered_edit=expect_render['edit'],
collapse_whitespace=True
)
return
def test_DecodeSelectValue(self):
test_decode_values = (
{ None: None
, "": ""
, "text": "text"
})
for valtext, expect_valdata in test_decode_values.items():
valdata = SelectValueMapper.decode(valtext)
self.assertEqual(
valdata, expect_valdata,
"Value decode(%s) = %r, expect %r"%(valtext, valdata, expect_valdata)
)
return
# End.
if __name__ == "__main__":
# import django
# django.setup() # Needed for template loader
# Runtests in this module
# runner = unittest.TextTestRunner(verbosity=2)
# tests = unittest.TestSuite()
# tests = getSuite(select=sel)
# if tests: runner.run(tests)
unittest.main()
|
|
import os,coral,re
from RecoLuminosity.LumiDB import nameDealer
class correctionTerm(object):
constfactor=1.141 # const upshift , same for everyone
class nonlinearSingle(correctionTerm):
t1=0.076 #slop
class nonlinearV2(correctionTerm):
drift=0.01258 # drift
t1=0.063 # slop1
t2=-0.0037# slop2
class nonlinearV3(correctionTerm):
drift=0.00813# drift
t1=0.073 # slop1
t2=-0.0037 # slop2
def afterglowByFillscheme(fillscheme,afterglowPatterns):
'''
search in the list of (pattern,afterglowfactor) for a match in regex
'''
for (apattern,cfactor) in afterglowPatterns:
if re.match(apattern,fillscheme):
return cfactor
return 1.0
#=======================================================================================================
#below : correction formula version_2
#======================================================================================================
def driftcorrectionsForRange(schema,inputRange,correctionTerm,startrun=160403):
'''
select intglumi from intglumi where runnum=:runnum and startrun=:startrun
input : inputRange. str if a single run, [runs] if a list of runs
output: {run:driftcorrection}
'''
result={}
runs=[]
if isinstance(inputRange,str):
runs.append(int(inputRange))
else:
runs=inputRange
for r in runs:
defaultresult=1.0
intglumi=0.0
lint=0.0
if r<150008 :# no drift corrections for 2010 data
result[r]=defaultresult
continue
if r>189738: # no drift correction for 2012 data
result[r]=defaultresult
continue
qHandle=schema.newQuery()
try:
qHandle.addToTableList(nameDealer.intglumiTableName())
qResult=coral.AttributeList()
qResult.extend('INTGLUMI','float')
qHandle.addToOutputList('INTGLUMI')
qConditionStr='RUNNUM=:runnum AND STARTRUN=:startrun'
qCondition=coral.AttributeList()
qCondition.extend('runnum','unsigned int')
qCondition.extend('startrun','unsigned int')
qCondition['runnum'].setData(int(r))
qCondition['startrun'].setData(int(startrun))
qHandle.setCondition(qConditionStr,qCondition)
qHandle.defineOutput(qResult)
cursor=qHandle.execute()
while cursor.next():
intglumi=cursor.currentRow()['INTGLUMI'].data()
lint=intglumi*6.37*1.0e-9 #(convert to /fb)
#print lint
except :
del qHandle
raise
del qHandle
if not lint:
print '[WARNING] null intglumi for run ',r,' '
result[r]=defaultresult+correctionTerm.drift*lint
#print 'lint ',lint,' result ',result
return result
def applyfinecorrectionBXV2(bxlumi,avglumi,norm,constfactor,afterglowfactor,ncollidingbx,nonlinear_1,nonlinear_2,driftfactor):
if bxlumi<=0:#do nothing about the negative bx lumi
return bxlumi
correctbxlumi=bxlumi*norm*constfactor*afterglowfactor*driftfactor
if ncollidingbx and ncollidingbx!=0:
dldt=avglumi/float(ncollidingbx)
nonlinearTerm=1.0+nonlinear_1*dldt+nonlinear_2*dldt*dldt
correctbxlumi=correctbxlumi/nonlinearTerm
#print 'avglumi,nonlinearfactor,nonlinearTerm ',avglumi,nonlinearfactor,nonlinearTerm
#print 'bxlumi,avglumi,norm,const,after',bxlumi,avglumi,norm,constfactor,afterglowfactor,correctbxlumi
return correctbxlumi
def applyfinecorrectionV2(avglumi,constfactor,afterglowfactor,ncollidingbx,nonlinear_1,nonlinear_2,driftfactor):
'''
input :
avglumi : normalized lumi with 6370
constfactor,afterglowfactor,ncollidingbx,nonlinear_1,nonlinear_2
driftfactor: default
'''
#print avglumi,constfactor,afterglowfactor,ncollidingbx,nonlinear_1,nonlinear_2,driftfactor
instlumi=avglumi*afterglowfactor*constfactor*driftfactor
if ncollidingbx and ncollidingbx!=0:
dldt=avglumi/float(ncollidingbx)
nonlinearTerm=1.0+nonlinear_1*dldt+nonlinear_2*dldt*dldt
instlumi=instlumi/nonlinearTerm
#print 'avglumi,const,after,nonlinear,instlumi ',avglumi,constfactor,afterglowfactor,nonlinearfactor,instlumi
return instlumi
def correctionsForRangeV2(schema,inputRange,correctionTerm):
'''
decide on the corrections to apply in the input range depending on amodetag,egev and runrange
select fillschemepattern,correctionfactor from fillscheme;
[(fillschemepattern,afterglow),...]
select fillnum,runnum,fillscheme,ncollidingbunches,egev from cmsrunsummary where amodetag='PROTPYHS' and egev>3000
{runnum: (fillnum,fillscheme,ncollidingbunches),...}
input: correctionTerm correction terms used in the formula
output:
{runnum:(constantfactor,afterglowfactor,ncollidingbx,nonlinearfactor1,nonlinearfactor2)}
'''
runs=[]
result={}
constfactor=1.0 #default
afterglow=1.0 #default
ncollidingbunches=0 #default
nonlinear_1=1.0 #default
nonlinear_2=1.0 #default
if isinstance(inputRange,str):
runs.append(int(inputRange))
else:
runs=inputRange
for r in runs:
if r<150008 :
result[r]=(constfactor,afterglow,ncollidingbunches,nonlinear_1, nonlinear_2)
afterglows=[]
s=nameDealer.fillschemeTableName()
r=nameDealer.cmsrunsummaryTableName()
qHandle=schema.newQuery()
try:
qHandle.addToTableList(s)
qResult=coral.AttributeList()
qResult.extend('FILLSCHEMEPATTERN','string')
qResult.extend('CORRECTIONFACTOR','float')
qHandle.defineOutput(qResult)
qHandle.addToOutputList('FILLSCHEMEPATTERN')
qHandle.addToOutputList('CORRECTIONFACTOR')
cursor=qHandle.execute()
while cursor.next():
fillschemePattern=cursor.currentRow()['FILLSCHEMEPATTERN'].data()
afterglowfac=cursor.currentRow()['CORRECTIONFACTOR'].data()
afterglows.append((fillschemePattern,afterglowfac))
except :
del qHandle
raise
del qHandle
qHandle=schema.newQuery()
try:
qHandle.addToTableList(r)
qHandle.addToOutputList('FILLNUM', 'fillnum')
qHandle.addToOutputList('RUNNUM', 'runnum')
qHandle.addToOutputList('FILLSCHEME','fillscheme')
qHandle.addToOutputList('NCOLLIDINGBUNCHES','ncollidingbunches')
qResult=coral.AttributeList()
qResult.extend('fillnum','unsigned int')
qResult.extend('runnum','unsigned int')
qResult.extend('fillscheme','string')
qResult.extend('ncollidingbunches','unsigned int')
qConditionStr='AMODETAG=:amodetag AND EGEV>=:egev'#filter out lowenergy and non-proton runs
qCondition=coral.AttributeList()
qCondition.extend('amodetag','string')
qCondition.extend('egev','unsigned int')
qCondition['amodetag'].setData('PROTPHYS')
qCondition['egev'].setData(3000)
qHandle.defineOutput(qResult)
qHandle.setCondition(qConditionStr,qCondition)
cursor=qHandle.execute()
while cursor.next():
runnum=cursor.currentRow()['runnum'].data()
#print 'runnum ',runnum
if runnum not in runs or result.has_key(runnum):
continue
fillnum=cursor.currentRow()['fillnum'].data()
afterglow=1.0
constfactor=correctionTerm.constfactor
nonlinear_1=correctionTerm.t1
nonlinear_2=correctionTerm.t2
ncollidingbunches=0
if cursor.currentRow()['ncollidingbunches']:
ncollidingbunches=cursor.currentRow()['ncollidingbunches'].data()
fillscheme=''
if cursor.currentRow()['fillscheme']:
fillscheme=cursor.currentRow()['fillscheme'].data()
if fillscheme and len(fillscheme)!=0:
if fillnum>=2124: #afterglow'salready applied by lumidaq in hf root for fill<2124
afterglow=afterglowByFillscheme(fillscheme,afterglows)
result[runnum]=(constfactor,afterglow,ncollidingbunches,nonlinear_1,nonlinear_2)
except :
del qHandle
raise
del qHandle
for run in runs:
if run not in result.keys():
result[run]=(constfactor,afterglow,ncollidingbunches,nonlinear_1,nonlinear_2)
return result
#=======================================================================================================
#below : below correction formula version_1, default untill April 2012, no more used.
#======================================================================================================
#def applyfinecorrectionBX(bxlumi,avglumi,norm,constfactor,afterglowfactor,nonlinearfactor):
# if bxlumi<=0:
# return bxlumi
# correctbxlumi=bxlumi*norm*constfactor*afterglowfactor
# if constfactor!=1.0 and nonlinearfactor!=0:
# if avglumi<0:
# avglumi=0.0
# nonlinearTerm=1.0+avglumi*nonlinearfactor#0.076/ncollidinbunches
# correctbxlumi=correctbxlumi/nonlinearTerm
# #print 'avglumi,nonlinearfactor,nonlinearTerm ',avglumi,nonlinearfactor,nonlinearTerm
# #print 'bxlumi,avglumi,norm,const,after',bxlumi,avglumi,norm,constfactor,afterglowfactor,correctbxlumi
# return correctbxlumi
#def applyfinecorrection(avglumi,constfactor,afterglowfactor,nonlinearfactor):
# instlumi=avglumi*afterglowfactor*constfactor
# if nonlinearfactor!=0 and constfactor!=1.0:
# nonlinearTerm=1.0+avglumi*nonlinearfactor#0.076/ncollidinbunches
# instlumi=instlumi/nonlinearTerm
# #print 'avglumi,const,after,nonlinear,instlumi ',avglumi,constfactor,afterglowfactor,nonlinearfactor,instlumi
# return instlumi
#def correctionsForRange(schema,inputRange,correctionTerm):
# '''
# select fillschemepattern,correctionfactor from fillscheme;
# [(fillschemepattern,afterglow),...]
# select fillnum,runnum,fillscheme,ncollidingbunches,egev from cmsrunsummary where amodetag='PROTPYHS' and egev>3000
# {runnum: (fillnum,fillscheme,ncollidingbunches),...}
# output:
# {runnum:(constantfactor,afterglowfactor,nonlinearfactor)}
# '''
# runs=[]
# result={}
# if isinstance(inputRange,str):
# runs.append(int(inputRange))
# else:
# runs=inputRange
# for r in runs:
# if r<150008 :
# result[r]=(1.0,1.0,0.0)
# afterglows=[]
# constfactor=correctionTerm.constfactor
# s=nameDealer.fillschemeTableName()
# r=nameDealer.cmsrunsummaryTableName()
# qHandle=schema.newQuery()
# try:
# qHandle.addToTableList(s)
# qResult=coral.AttributeList()
# qResult.extend('FILLSCHEMEPATTERN','string')
# qResult.extend('CORRECTIONFACTOR','float')
# qHandle.defineOutput(qResult)
# qHandle.addToOutputList('FILLSCHEMEPATTERN')
# qHandle.addToOutputList('CORRECTIONFACTOR')
# cursor=qHandle.execute()
# while cursor.next():
# fillschemePattern=cursor.currentRow()['FILLSCHEMEPATTERN'].data()
# afterglowfac=cursor.currentRow()['CORRECTIONFACTOR'].data()
# afterglows.append((fillschemePattern,afterglowfac))
# except :
# del qHandle
# raise
# del qHandle
# qHandle=schema.newQuery()
# try:
# qHandle.addToTableList(r)
# qHandle.addToOutputList('FILLNUM', 'fillnum')
# qHandle.addToOutputList('RUNNUM', 'runnum')
# qHandle.addToOutputList('FILLSCHEME','fillscheme')
# qHandle.addToOutputList('NCOLLIDINGBUNCHES','ncollidingbunches')
# qResult=coral.AttributeList()
# qResult.extend('fillnum','unsigned int')
# qResult.extend('runnum','unsigned int')
# qResult.extend('fillscheme','string')
# qResult.extend('ncollidingbunches','unsigned int')
# qConditionStr='AMODETAG=:amodetag AND EGEV>=:egev'
# qCondition=coral.AttributeList()
# qCondition.extend('amodetag','string')
# qCondition.extend('egev','unsigned int')
# qCondition['amodetag'].setData('PROTPHYS')
# qCondition['egev'].setData(3000)
# qHandle.defineOutput(qResult)
# qHandle.setCondition(qConditionStr,qCondition)
# cursor=qHandle.execute()
# while cursor.next():
# runnum=cursor.currentRow()['runnum'].data()
# #print 'runnum ',runnum
# if runnum not in runs or result.has_key(runnum):
# continue
# fillnum=cursor.currentRow()['fillnum'].data()
# constfactor=correctionTerm.constfactor
# afterglow=1.0
# nonlinear=correctionTerm.t1
# nonlinearPerBX=0.0
# ncollidingbunches=0
# if cursor.currentRow()['ncollidingbunches']:
# ncollidingbunches=cursor.currentRow()['ncollidingbunches'].data()
# fillscheme=''
# if cursor.currentRow()['fillscheme']:
# fillscheme=cursor.currentRow()['fillscheme'].data()
# if fillscheme and len(fillscheme)!=0:
# afterglow=afterglowByFillscheme(fillscheme,afterglows)
# if ncollidingbunches and ncollidingbunches!=0:
# nonlinearPerBX=float(1)/float(ncollidingbunches)
# nonlinear=nonlinearPerBX*nonlinear
# result[runnum]=(constfactor,afterglow,nonlinear)
# except :
# del qHandle
# raise
# del qHandle
# for run in runs:
# if run not in result.keys():
# result[run]=(1.0,1.0,0.0) #those have no fillscheme 2011 runs
# return result
#=======================================================================================================
#below : correction on pixellumi, afterglow only
#======================================================================================================
def pixelcorrectionsForRange(schema,inputRange):
'''
select fillschemepattern,correctionfactor from fillscheme;
[(fillschemepattern,afterglow),...]
select fillnum,runnum,fillscheme from cmsrunsummary where amodetag='PROTPHYS'
{runnum: (fillnum,fillscheme),...}
output:
{runnum:(afterglowfactor)}
'''
runs=[]
result={}
if isinstance(inputRange,str):
runs.append(int(inputRange))
else:
runs=inputRange
afterglows=[]
s=nameDealer.fillschemeTableName()
r=nameDealer.cmsrunsummaryTableName()
qHandle=schema.newQuery()
try:
qHandle.addToTableList(s)
qResult=coral.AttributeList()
qResult.extend('FILLSCHEMEPATTERN','string')
qResult.extend('PIXELCORRECTIONFACTOR','float')
qHandle.defineOutput(qResult)
qHandle.addToOutputList('FILLSCHEMEPATTERN')
qHandle.addToOutputList('PIXELCORRECTIONFACTOR')
cursor=qHandle.execute()
while cursor.next():
fillschemePattern=cursor.currentRow()['FILLSCHEMEPATTERN'].data()
afterglowfac=cursor.currentRow()['PIXELCORRECTIONFACTOR'].data()
afterglows.append((fillschemePattern,afterglowfac))
except :
del qHandle
raise
del qHandle
qHandle=schema.newQuery()
try:
qConditionStr='FILLNUM>:minfillnum'
qCondition=coral.AttributeList()
qCondition.extend('minfillnum','unsigned int')
qCondition['minfillnum'].setData(1600)
qHandle.addToTableList(r)
qHandle.addToOutputList('FILLNUM', 'fillnum')
qHandle.addToOutputList('RUNNUM', 'runnum')
qHandle.addToOutputList('FILLSCHEME','fillscheme')
qResult=coral.AttributeList()
qResult.extend('fillnum','unsigned int')
qResult.extend('runnum','unsigned int')
qResult.extend('fillscheme','string')
qHandle.setCondition(qConditionStr,qCondition)
qHandle.defineOutput(qResult)
cursor=qHandle.execute()
while cursor.next():
runnum=cursor.currentRow()['runnum'].data()
if runnum not in runs or result.has_key(runnum):
continue
fillnum=cursor.currentRow()['fillnum'].data()
afterglow=1.0
fillscheme=''
if cursor.currentRow()['fillscheme']:
fillscheme=cursor.currentRow()['fillscheme'].data()
if fillscheme and len(fillscheme)!=0:
afterglow=afterglowByFillscheme(fillscheme,afterglows)
result[runnum]=afterglow
except :
del qHandle
raise
del qHandle
for run in runs:
if run not in result.keys():
result[run]=1.0 #those have no fillscheme
return result
if __name__ == "__main__":
import sessionManager
#myconstr='oracle://cms_orcoff_prod/cms_lumi_prod'
myconstr='oracle://cms_orcoff_prep/cms_lumi_dev_offline'
svc=sessionManager.sessionManager(myconstr,authpath='/afs/cern.ch/user/x/xiezhen',debugON=False)
session=svc.openSession(isReadOnly=True,cpp2sqltype=[('unsigned int','NUMBER(10)'),('unsigned long long','NUMBER(20)')])
#runrange=[163337,163387,163385,163664,163757,163269,1234,152611]
schema=session.nominalSchema()
session.transaction().start(True)
driftresult=driftcorrectionsForRange(schema,[160467,152611])
print driftresult
#result=correctionsForRange(schema,runrange)
session.transaction().commit()
del session
|
|
###################################################################
# Numexpr - Fast numerical array expression evaluator for NumPy.
#
# License: MIT
# Author: See AUTHORS.txt
#
# See LICENSE.txt and LICENSES/*.txt for details about copyright and
# rights to use.
####################################################################
__all__ = ['E']
import ast
import operator
import sys
import threading
import numpy
# Declare a double type that does not exist in Python space
double = numpy.double
# The default kind for undeclared variables
default_kind = 'double'
if sys.version_info[0] < 3:
int_ = int
long_ = long
else:
int_ = numpy.int32
long_ = numpy.int64
type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float',
double: 'double', complex: 'complex', bytes: 'bytes'}
kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float,
'double': double, 'complex': complex, 'bytes': bytes}
kind_rank = ['bool', 'int', 'long', 'float', 'double', 'complex', 'none']
scalar_constant_types = [bool, int_, long, float, double, complex, bytes]
# Final corrections for Python 3 (mainly for PyTables needs)
if sys.version_info[0] > 2:
type_to_kind[str] = 'str'
kind_to_type['str'] = str
scalar_constant_types.append(str)
scalar_constant_types = tuple(scalar_constant_types)
class Expression(object):
def __init__(self):
object.__init__(self)
def __getattr__(self, name):
if name.startswith('_'):
return self.__dict__[name]
else:
return VariableNode(name, default_kind)
E = Expression()
class Context(threading.local):
initialized = False
def __init__(self, dict_):
if self.initialized:
raise SystemError('__init__ called too many times')
self.initialized = True
self.__dict__.update(dict_)
def get(self, value, default):
return self.__dict__.get(value, default)
def get_current_context(self):
return self.__dict__
def set_new_context(self, dict_):
self.__dict__.update(dict_)
# This will be called each time the local object is used in a separate thread
_context = Context({})
def get_optimization():
return _context.get('optimization', 'none')
# helper functions for creating __magic__ methods
def ophelper(f):
def func(*args):
args = list(args)
for i, x in enumerate(args):
if isConstant(x):
args[i] = x = ConstantNode(x)
if not isinstance(x, ExpressionNode):
raise TypeError("unsupported object type: %s" % type(x))
return f(*args)
func.__name__ = f.__name__
func.__doc__ = f.__doc__
func.__dict__.update(f.__dict__)
return func
def allConstantNodes(args):
"returns True if args are all ConstantNodes."
for x in args:
if not isinstance(x, ConstantNode):
return False
return True
def isConstant(ex):
"Returns True if ex is a constant scalar of an allowed type."
return isinstance(ex, scalar_constant_types)
def commonKind(nodes):
node_kinds = [node.astKind for node in nodes]
str_count = node_kinds.count('bytes') + node_kinds.count('str')
if 0 < str_count < len(node_kinds): # some args are strings, but not all
raise TypeError("strings can only be operated with strings")
if str_count > 0: # if there are some, all of them must be
return 'bytes'
n = -1
for x in nodes:
n = max(n, kind_rank.index(x.astKind))
return kind_rank[n]
max_int32 = 2147483647
min_int32 = -max_int32 - 1
def bestConstantType(x):
# ``numpy.string_`` is a subclass of ``bytes``
if isinstance(x, (bytes, str)):
return bytes
# Numeric conversion to boolean values is not tried because
# ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be
# interpreted as booleans when ``False`` and ``True`` are already
# supported.
if isinstance(x, (bool, numpy.bool_)):
return bool
# ``long`` objects are kept as is to allow the user to force
# promotion of results by using long constants, e.g. by operating
# a 32-bit array with a long (64-bit) constant.
if isinstance(x, (long_, numpy.int64)):
return long_
# ``double`` objects are kept as is to allow the user to force
# promotion of results by using double constants, e.g. by operating
# a float (32-bit) array with a double (64-bit) constant.
if isinstance(x, double):
return double
if isinstance(x, (int, numpy.integer)):
# Constants needing more than 32 bits are always
# considered ``long``, *regardless of the platform*, so we
# can clearly tell 32- and 64-bit constants apart.
if not (min_int32 <= x <= max_int32):
return long_
return int_
# The duality of float and double in Python avoids that we have to list
# ``double`` too.
for converter in float, complex:
try:
y = converter(x)
except StandardError, err:
continue
if y == x:
return converter
def getKind(x):
converter = bestConstantType(x)
return type_to_kind[converter]
def binop(opname, reversed=False, kind=None):
# Getting the named method from self (after reversal) does not
# always work (e.g. int constants do not have a __lt__ method).
opfunc = getattr(operator, "__%s__" % opname)
@ophelper
def operation(self, other):
if reversed:
self, other = other, self
if allConstantNodes([self, other]):
return ConstantNode(opfunc(self.value, other.value))
else:
return OpNode(opname, (self, other), kind=kind)
return operation
def func(func, minkind=None, maxkind=None):
@ophelper
def function(*args):
if allConstantNodes(args):
return ConstantNode(func(*[x.value for x in args]))
kind = commonKind(args)
if kind in ('int', 'long'):
# Exception for following NumPy casting rules
#FIXME: this is not always desirable. The following
# functions which return ints (for int inputs) on numpy
# but not on numexpr: copy, abs, fmod, ones_like
kind = 'double'
else:
# Apply regular casting rules
if minkind and kind_rank.index(minkind) > kind_rank.index(kind):
kind = minkind
if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind):
kind = maxkind
name = func.__name__
if name == "absolute":
name = "abs"
return FuncNode(name, args, kind)
return function
@ophelper
def where_func(a, b, c):
if isinstance(a, ConstantNode):
#FIXME: This prevents where(True, a, b)
raise ValueError("too many dimensions")
if allConstantNodes([a,b,c]):
return ConstantNode(numpy.where(a, b, c))
return FuncNode('where', [a,b,c])
def encode_axis(axis):
if axis is not None:
if axis < 0:
raise ValueError("negative axis are not supported")
if axis > 254:
raise ValueError("cannot encode axis")
return ConstantNode(axis)
#XXX: use ophelper (to convert args to ConstantNode)?
def sum_func(a, axis=None):
axis = encode_axis(axis)
#XXX: I suspect these two if are swapped (see prod_func below)
if isinstance(a, ConstantNode):
return a
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
return FuncNode('sum', [a, axis], kind=a.astKind)
def prod_func(a, axis=None):
axis = encode_axis(axis)
if isinstance(a, (bool, int_, long_, float, double, complex)):
a = ConstantNode(a)
if isinstance(a, ConstantNode):
return a
return FuncNode('prod', [a, axis], kind=a.astKind)
@ophelper
def div_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
return OpNode('div', [a,b])
def cast(node, dtype):
# Assumes there is a function named like each type. For double, this is
# the case.
return FuncNode(dtype, (node,), dtype)
@ophelper
def truediv_op(a, b):
if get_optimization() in ('moderate', 'aggressive'):
if (isinstance(b, ConstantNode) and
(a.astKind == b.astKind) and
a.astKind in ('float', 'double', 'complex')):
return OpNode('mul', [a, ConstantNode(1./b.value)])
kind = commonKind([a, b])
if kind in ('bool', 'int', 'long'):
kind = 'double'
# force a cast
a = cast(a, 'double')
b = cast(b, 'double')
return OpNode('div', [a, b], kind=kind)
@ophelper
def rtruediv_op(a, b):
return truediv_op(b, a)
@ophelper
def pow_op(a, b):
if allConstantNodes([a, b]):
return ConstantNode(a**b)
if isinstance(b, ConstantNode):
x = b.value
if get_optimization() == 'aggressive':
RANGE = 50 # Approximate break even point with pow(x,y)
# Optimize all integral and half integral powers in [-RANGE, RANGE]
# Note: for complex numbers RANGE could be larger.
if (int(2*x) == 2*x) and (-RANGE <= abs(x) <= RANGE):
n = int_(abs(x))
ishalfpower = int_(abs(2*x)) % 2
def multiply(x, y):
if x is None: return y
return OpNode('mul', [x, y])
r = None
p = a
mask = 1
while True:
if (n & mask):
r = multiply(r, p)
mask <<= 1
if mask > n:
break
p = OpNode('mul', [p,p])
if ishalfpower:
kind = commonKind([a])
if kind in ('int', 'long'):
kind = 'double'
r = multiply(r, OpNode('sqrt', [a], kind))
if r is None:
r = OpNode('ones_like', [a])
if x < 0:
r = OpNode('div', [ConstantNode(1), r])
return r
if get_optimization() in ('moderate', 'aggressive'):
if x == -1:
return OpNode('div', [ConstantNode(1),a])
if x == 0:
return OpNode('ones_like', [a])
if x == 0.5:
kind = a.astKind
if kind in ('int', 'long'): kind = 'double'
return FuncNode('sqrt', [a], kind=kind)
if x == 1:
return a
if x == 2:
return OpNode('mul', [a,a])
return OpNode('pow', [a,b])
# The functions and the minimum and maximum types accepted
func_defs = {
'copy': (numpy.copy,),
'ones_like': (numpy.ones_like,),
'sqrt': (numpy.sqrt, 'float'),
'sin': (numpy.sin, 'float'),
'cos': (numpy.cos, 'float'),
'tan': (numpy.tan, 'float'),
'arcsin': (numpy.arcsin, 'float'),
'arccos': (numpy.arccos, 'float'),
'arctan': (numpy.arctan, 'float'),
'sinh': (numpy.sinh, 'float'),
'cosh': (numpy.cosh, 'float'),
'tanh': (numpy.tanh, 'float'),
'arcsinh': (numpy.arcsinh, 'float'),
'arccosh': (numpy.arccosh, 'float'),
'arctanh': (numpy.arctanh, 'float'),
'fmod': (numpy.fmod, 'float'),
'arctan2': (numpy.arctan2, 'float'),
'log': (numpy.log, 'float'),
'log1p': (numpy.log1p, 'float'),
'log10': (numpy.log10, 'float'),
'exp': (numpy.exp, 'float'),
'expm1': (numpy.expm1, 'float'),
'abs': (numpy.absolute, 'float'),
'complex': (complex, 'complex'),
}
functions = dict((k, func(*v)) for k, v in func_defs.items())
functions.update({
'double': (numpy.double,),
'real': (numpy.real, 'double', 'double'),
'imag': (numpy.imag, 'double', 'double'),
'where' : where_func,
'sum' : sum_func,
'prod' : prod_func,
})
class ExpressionNode(object):
"""An object that represents a generic number object.
This implements the number special methods so that we can keep
track of how this object has been used.
"""
astType = 'generic'
def __init__(self, value=None, kind=None, children=None):
object.__init__(self)
self.value = value
if kind is None:
kind = 'none'
self.astKind = kind
if children is None:
self.children = ()
else:
self.children = tuple(children)
def get_real(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).real)
return OpNode('real', (self,), 'double')
real = property(get_real)
def get_imag(self):
if self.astType == 'constant':
return ConstantNode(complex(self.value).imag)
return OpNode('imag', (self,), 'double')
imag = property(get_imag)
def __str__(self):
return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value,
self.astKind, self.children)
def __repr__(self):
return self.__str__()
def __neg__(self):
return OpNode('neg', (self,))
def __invert__(self):
return OpNode('invert', (self,))
def __pos__(self):
return self
# The next check is commented out. See #24 for more info.
def __nonzero__(self):
raise TypeError("You can't use Python's standard boolean operators in "
"NumExpr expressions. You should use their bitwise "
"counterparts instead: '&' instead of 'and', "
"'|' instead of 'or', and '~' instead of 'not'.")
__add__ = __radd__ = binop('add')
__sub__ = binop('sub')
__rsub__ = binop('sub', reversed=True)
__mul__ = __rmul__ = binop('mul')
if sys.version_info[0] < 3:
__div__ = div_op
__rdiv__ = binop('div', reversed=True)
__truediv__ = truediv_op
__rtruediv__ = rtruediv_op
__pow__ = pow_op
__rpow__ = binop('pow', reversed=True)
__mod__ = binop('mod')
__rmod__ = binop('mod', reversed=True)
# boolean operations
__and__ = binop('and', kind='bool')
__or__ = binop('or', kind='bool')
__gt__ = binop('gt', kind='bool')
__ge__ = binop('ge', kind='bool')
__eq__ = binop('eq', kind='bool')
__ne__ = binop('ne', kind='bool')
__lt__ = binop('gt', reversed=True, kind='bool')
__le__ = binop('ge', reversed=True, kind='bool')
def postorderWalk(self):
for c in self.children:
for w in c.postorderWalk():
yield w
yield self
def allOf(self, *astTypes):
astTypes = set(astTypes)
for w in self.postorderWalk():
if w.astType in astTypes:
yield w
class LeafNode(ExpressionNode):
leafNode = True
class VariableNode(LeafNode):
astType = 'variable'
def __init__(self, value=None, kind=None, children=None):
LeafNode.__init__(self, value=value, kind=kind)
def toPython(self):
return ast.Name(self.value, ast.Load())
class ConstantNode(LeafNode):
astType = 'constant'
def __init__(self, value=None, children=None):
if value is not None:
kind = getKind(value)
# Python float constants are double precision by default
if kind == 'float':
kind = 'double'
else:
kind = None
LeafNode.__init__(self, value=value, kind=kind)
def __neg__(self):
return ConstantNode(-self.value)
def __invert__(self):
return ConstantNode(~self.value)
def toPython(self):
return ast.Num(self.value)
class OpNode(ExpressionNode):
astType = 'op'
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
ExpressionNode.__init__(self, value=opcode, kind=kind, children=args)
def toPython(self):
# Invert | Not | UAdd | USub
unaryops = {
'invert': ast.Not,
# 'invert': ast.Invert, # use bitwise op, not logical op
'neg': ast.USub
}
# And | Or
boolops = {
'and': ast.And,
'or': ast.Or
}
# Eq | NotEq | Lt | LtE | Gt | GtE # Is | IsNot | In | NotIn
cmpops = {
'gt': ast.Gt,
'ge': ast.GtE,
'eq': ast.Eq,
'ne': ast.NotEq
# lt and le are handled by their reverse operation
}
binops = {
'add': ast.Add,
'sub': ast.Sub,
'mul': ast.Mult,
'div': ast.Div,
'mod': ast.Mod,
'pow': ast.Pow,
# translate to bit ops not "logical" ops
# 'and': ast.BitAnd,
# 'or': ast.BitOr
}
op = self.value
ch = self.children
args = [c.toPython() for c in ch]
if op in unaryops:
arg = args[0]
# force conversion to int to workaround numba #238
# it fixes age * ~gender but breaks count(~gender) :(
# if op == 'invert':
# arg = ast.BinOp(arg, ast.Add(), ast.Num(0))
return ast.UnaryOp(unaryops[op](), arg)
elif op in boolops:
return ast.BoolOp(boolops[op](), args)
elif op in cmpops:
# Transform "var != var" to "not (var == var)". This is a
# workaround for numba issue #247 "(a != a) is False for nan"
# This is a weak workaround because it will only work for simple
# variable, while it should work for all expressions that return
# nans on both sides.
if (ch[0].astType == 'variable' and ch[1].astType == 'variable' and
ch[0].value == ch[1].value and op == 'ne'):
comparison = ast.Compare(args[0], [cmpops['eq']()], [args[1]])
return ast.UnaryOp(ast.Not(), comparison)
else:
return ast.Compare(args[0], [cmpops[op]()], [args[1]])
elif op == 'ones_like':
return ast.Num(1)
elif op == 'real':
return ast.Attribute(args[0], 'real', ast.Load())
elif op == 'imag':
return ast.Attribute(args[0], 'imag', ast.Load())
else:
binop = ast.BinOp(args[0], binops[op](), args[1])
# shield against integer division by 0
if (op == 'div' and ch[1].astKind in ('int', 'long') and
ch[1].astType != 'constant'):
return ast.IfExp(args[1], binop, ast.Num(0))
return binop
class FuncNode(OpNode):
def __init__(self, opcode=None, args=None, kind=None):
if (kind is None) and (args is not None):
kind = commonKind(args)
OpNode.__init__(self, opcode, args, kind)
def toPython(self):
args = [c.toPython() if c is not None else None for c in self.children]
if self.value == "where":
return ast.IfExp(*args)
elif self.value == "real":
return ast.Attribute(args[0], 'real', ast.Load())
elif self.value == "imag":
return ast.Attribute(args[0], 'imag', ast.Load())
elif self.value == 'copy':
assert len(args) == 1
# implement copy as a no-op
return args[0]
elif self.value == 'ones_like':
return ast.Num(1)
else:
return ast.Call(ast.Name(self.value, ast.Load()), args, [],
None, None)
|
|
from rti_python.Writer.rti_sql import rti_sql
from rti_python.Ensemble import Ensemble
from datetime import datetime, date, time
class RtiProjects:
"""
Handle the projects.
Create projects and add data to the projects.
"""
def __init__(self,
host='localhost',
port=5432,
dbname='postgres',
user='user',
pw='pw'):
# Construct connection string
self.sql_conn_string = "host=\'{0}\' port=\'{1}\' dbname=\'{2}\' user=\'{3}\' password=\'{4}\'".format(host, port, dbname, user, pw)
# Sql connection when doing batch inserts
self.batch_sql = None
self.batch_prj_id = 0
self.batch_count = 0
def add_prj_sql(self, prj_name, prj_file_path):
"""
Add the given project name to the projects table.
:param prj_name: Project name
:return: TRUE = Project added. FALSE = Project already exists and could not add.
"""
# Check if the project exist
project_exist = self.check_project_exist(prj_name)
if project_exist == 0:
# Add project to database
dt = datetime.now()
sql = rti_sql(self.sql_conn_string)
query = 'INSERT INTO projects (name, path, created, modified) VALUES (%s,%s,%s,%s) RETURNING ID;'
sql.cursor.execute(query, (prj_name, prj_file_path, dt, dt))
prj_idx = sql.cursor.fetchone()[0]
sql.conn.commit()
print(prj_idx)
sql.close()
return prj_idx
elif project_exist > 0:
# Send a warning and make them give a new name
return -1
def check_project_exist(self, prj_name):
"""
Check if the given project name exist in the projects table.
:param prj_name: Project Name.
:return: TRUE = Project exists.
"""
idx = -1
# Make connection
try:
sql = rti_sql(self.sql_conn_string)
except Exception as e:
print("Unable to connect to the database")
sql.close()
return -1
# Check if the project exists
try:
result = sql.query('SELECT id FROM projects WHERE name = \'{0}\' LIMIT 1;'.format(prj_name))
# Check for a result
if not result:
idx = 0 # No project found
else:
idx = result[0][0] # Index found
except Exception as e:
print("Unable to run query", e)
sql.close()
return -2
# Close connection
sql.close()
return idx
def get_all_projects(self):
"""
Select all the projects from the database.
:return: All the databases in the projects table.
"""
result = None
# Make connection
try:
sql = rti_sql(self.sql_conn_string)
except Exception as e:
print("Unable to connect to the database")
return result
# Get all projects
try:
result = sql.query('SELECT * FROM projects;')
except Exception as e:
print("Unable to run query", e)
return result
# Close connection
sql.close()
return result
def begin_batch(self, prj_name):
# Make connection
try:
self.batch_sql = rti_sql(self.sql_conn_string)
except Exception as e:
print("Unable to connect to the database")
# Get the index for the given project name
self.batch_prj_id = self.batch_sql.query('SELECT id FROM projects WHERE name=\'{0}\''.format(prj_name))
print("Project ID: " + str(self.batch_prj_id))
def end_batch(self):
# Commit the batch
self.batch_sql.commit();
# Close connection
self.batch_sql.close()
# Set the connection to none
self.batch_sql = None
def add_ensemble(self, ens, burst_num=0):
'''
Add the ensemble to the database.
:param ens: Ensemble to store data.
:param burst_num: Burst number if a waves deployment.
:return:
'''
if self.batch_sql is not None:
# Ensemble and Ancillary dataset
try:
ens_idx = self.add_ensemble_ds(ens, burst_num)
except Exception as ex:
print("Error adding Ensemble, Ancillary and System Setup Dataset to project.", ex)
return
# Correlation
try:
if ens.IsCorrelation:
self.add_dataset("correlation",
ens.Correlation.Correlation,
ens.Correlation.num_elements,
ens.Correlation.element_multiplier,
ens_idx)
except Exception as ex:
print("Error adding Correlation to project.", ex)
# Amplitude
try:
if ens.IsAmplitude:
self.add_dataset("amplitude",
ens.Amplitude.Amplitude,
ens.Amplitude.num_elements,
ens.Amplitude.element_multiplier,
ens_idx)
except Exception as ex:
print("Error adding Amplitude to project.", ex)
# Beam Velocity
try:
if ens.IsBeamVelocity:
self.add_dataset("beamvelocity",
ens.BeamVelocity.Velocities,
ens.BeamVelocity.num_elements,
ens.BeamVelocity.element_multiplier,
ens_idx)
except Exception as ex:
print("Error adding Beam Velocity to project.", ex)
# Instrument Velocity
try:
if ens.IsInstrumentVelocity:
self.add_dataset("instrumentvelocity",
ens.InstrumentVelocity.Velocities,
ens.InstrumentVelocity.num_elements,
ens.InstrumentVelocity.element_multiplier,
ens_idx)
except Exception as ex:
print("Error adding Instrument Velocity to project.", ex)
# Earth Velocity
try:
if ens.IsEarthVelocity:
self.add_dataset("earthvelocity",
ens.EarthVelocity.Velocities,
ens.EarthVelocity.num_elements,
ens.EarthVelocity.element_multiplier,
ens_idx)
except Exception as ex:
print("Error adding Earth Velocity to project.", ex)
# Good Beam Ping
try:
if ens.IsGoodBeam:
self.add_dataset("goodbeamping",
ens.GoodBeam.GoodBeam,
ens.GoodBeam.num_elements,
ens.GoodBeam.element_multiplier,
ens_idx,
bad_val=0)
except Exception as ex:
print("Error adding Good Beam to project.", ex)
# Good Earth Ping
try:
if ens.IsGoodEarth:
self.add_dataset("goodearthping",
ens.GoodEarth.GoodEarth,
ens.GoodEarth.num_elements,
ens.GoodEarth.element_multiplier,
ens_idx,
bad_val=0)
except Exception as ex:
print("Error adding Good Earth to project.", ex)
# Bottom Track
try:
if ens.IsBottomTrack:
self.add_bottomtrack_ds(ens, ens_idx)
except Exception as ex:
print("Error adding Bottom Track to project.", ex)
# Range Tracking
try:
if ens.IsRangeTracking:
self.add_rangetracking_ds(ens, ens_idx)
except Exception as ex:
print("Error adding Range Tracking to project.", ex)
# NMEA
try:
if ens.IsNmeaData:
year = 2017
month = 1
day = 1
if ens.IsEnsembleData:
year = ens.EnsembleData.Year
month = ens.EnsembleData.Month
day = ens.EnsembleData.Day
self.add_nmea_ds(ens, ens_idx, year=year, month=month, day=day)
except Exception as ex:
print("Error adding NMEA to project.", ex)
else:
print("Batch import not started. Please call begin_batch() first.")
def add_ensemble_ds(self, ens, burst_num=0):
"""
Add the Ensemble dataset to the database.
"""
if not ens.IsEnsembleData or not ens.IsAncillaryData or not ens.IsSystemSetup:
return
# Get Date and time for created and modified
dt = datetime.now()
# Add line for each dataset type
ens_query = "INSERT INTO ensembles (" \
"ensnum, " \
"numbins, " \
"numbeams, " \
"desiredpings, " \
"actualpings, " \
"status, " \
"datetime, " \
"serialnumber, " \
"firmware, " \
"subsystemCode, " \
"subsystemConfig, " \
'rangeFirstBin, ' \
'binSize, ' \
'firstPingTime, ' \
'lastPingTime, ' \
'heading, ' \
'pitch, ' \
'roll, ' \
'waterTemp, ' \
'sysTemp, ' \
'salinity, ' \
'pressure, ' \
'xdcrDepth, ' \
'sos, ' \
'rawMagFieldStrength,' \
'pitchGravityVector, ' \
'rollGravityVector, ' \
'verticalGravityVector, ' \
'BtSamplesPerSecond, ' \
'BtSystemFreqHz, ' \
'BtCPCE, ' \
'BtNCE, ' \
'BtRepeatN, ' \
'WpSamplesPerSecond, ' \
'WpSystemFreqHz, ' \
'WpCPCE, ' \
'WpNCE, ' \
'WpRepeatN, ' \
'WpLagSamples, ' \
'Voltage, ' \
'XmtVoltage, ' \
'BtBroadband, ' \
'BtLagLength, ' \
'BtNarrowband, ' \
'BtBeamMux, ' \
'WpBroadband, ' \
'WpLagLength, ' \
'WpTransmitBandwidth, ' \
'WpReceiveBandwidth, ' \
'burstNum, ' \
'project_id, ' \
'created, ' \
'modified)' \
'VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ' \
'RETURNING ID;'
self.batch_sql.cursor.execute(ens_query, (ens.EnsembleData.EnsembleNumber,
ens.EnsembleData.NumBins,
ens.EnsembleData.NumBeams,
ens.EnsembleData.DesiredPingCount,
ens.EnsembleData.ActualPingCount,
ens.EnsembleData.Status,
ens.EnsembleData.datetime(),
ens.EnsembleData.SerialNumber,
ens.EnsembleData.firmware_str(),
ens.EnsembleData.SysFirmwareSubsystemCode,
ens.EnsembleData.SubsystemConfig,
ens.AncillaryData.FirstBinRange,
ens.AncillaryData.BinSize,
ens.AncillaryData.FirstPingTime,
ens.AncillaryData.LastPingTime,
ens.AncillaryData.Heading,
ens.AncillaryData.Pitch,
ens.AncillaryData.Roll,
ens.AncillaryData.WaterTemp,
ens.AncillaryData.SystemTemp,
ens.AncillaryData.Salinity,
ens.AncillaryData.Pressure,
ens.AncillaryData.TransducerDepth,
ens.AncillaryData.SpeedOfSound,
ens.AncillaryData.RawMagFieldStrength,
ens.AncillaryData.PitchGravityVector,
ens.AncillaryData.RollGravityVector,
ens.AncillaryData.VerticalGravityVector,
ens.SystemSetup.BtSamplesPerSecond,
ens.SystemSetup.BtSystemFreqHz,
ens.SystemSetup.BtCPCE,
ens.SystemSetup.BtNCE,
ens.SystemSetup.BtRepeatN,
ens.SystemSetup.WpSamplesPerSecond,
ens.SystemSetup.WpSystemFreqHz,
ens.SystemSetup.WpCPCE,
ens.SystemSetup.WpNCE,
ens.SystemSetup.WpRepeatN,
ens.SystemSetup.WpLagSamples,
ens.SystemSetup.Voltage,
ens.SystemSetup.XmtVoltage,
ens.SystemSetup.BtBroadband,
ens.SystemSetup.BtLagLength,
ens.SystemSetup.BtNarrowband,
ens.SystemSetup.BtBeamMux,
ens.SystemSetup.WpBroadband,
ens.SystemSetup.WpLagLength,
ens.SystemSetup.WpTransmitBandwidth,
ens.SystemSetup.WpReceiveBandwidth,
burst_num,
self.batch_prj_id[0][0],
dt,
dt))
ens_idx = self.batch_sql.cursor.fetchone()[0]
#print("rti_projects:add_ensemble_ds() Ens Index: " + str(ens_idx))
# Monitor how many inserts have been done so it does not get too big
self.batch_count += 1
if self.batch_count > 10:
self.batch_sql.commit()
self.batch_count = 0
return ens_idx
def add_bottomtrack_ds(self, ens, ens_idx):
if not ens.IsBottomTrack:
return
# Get Date and time for created and modified
dt = datetime.now()
query_range_label = ""
query_range_val = ""
query_snr_label = ""
query_snr_val = ""
query_amp_label = ""
query_amp_val = ""
query_corr_label = ""
query_corr_val = ""
query_beam_vel_label = ""
query_beam_vel_val = ""
query_beam_ping_label = ""
query_beam_ping_val = ""
query_instr_vel_label = ""
query_instr_vel_val = ""
query_instr_good_label = ""
query_instr_good_val = ""
query_earth_vel_label = ""
query_earth_vel_val = ""
query_earth_good_label = ""
query_earth_good_val = ""
query_snr_pc_label = ""
query_snr_pc_val = ""
query_amp_pc_label = ""
query_amp_pc_val = ""
query_vel_pc_label = ""
query_vel_pc_val = ""
query_noise_pc_label = ""
query_noise_pc_val = ""
query_corr_pc_label = ""
query_corr_pc_val = ""
for beam in range(int(ens.BottomTrack.NumBeams)):
query_range_label += "rangeBeam{0}, ".format(beam)
query_range_val += "{0}, ".format(ens.BottomTrack.Range[beam])
query_snr_label += "snrBeam{0}, ".format(beam)
query_snr_val += "{0}, ".format(ens.BottomTrack.SNR[beam])
query_amp_label += "ampBeam{0}, ".format(beam)
query_amp_val += "{0}, ".format(ens.BottomTrack.Amplitude[beam])
query_corr_label += "corrBeam{0}, ".format(beam)
query_corr_val += "{0}, ".format(ens.BottomTrack.Correlation[beam])
query_beam_vel_label += "beamVelBeam{0}, ".format(beam)
query_beam_vel_val += "{0}, ".format(ens.BottomTrack.BeamVelocity[beam])
query_beam_ping_label += "beamGoodBeam{0}, ".format(beam)
query_beam_ping_val += "{0}, ".format(int(ens.BottomTrack.BeamGood[beam]))
query_instr_vel_label += "instrVelBeam{0}, ".format(beam)
query_instr_vel_val += "{0}, ".format(ens.BottomTrack.InstrumentVelocity[beam])
query_instr_good_label += "instrGoodBeam{0}, ".format(beam)
query_instr_good_val += "{0}, ".format(int(ens.BottomTrack.InstrumentGood[beam]))
query_earth_vel_label += "earthVelBeam{0}, ".format(beam)
query_earth_vel_val += "{0}, ".format(ens.BottomTrack.EarthVelocity[beam])
query_earth_good_label += "earthGoodBeam{0}, ".format(beam)
query_earth_good_val += "{0}, ".format(int(ens.BottomTrack.EarthGood[beam]))
query_snr_pc_label += "snrPulseCoherentBeam{0}, ".format(beam)
query_snr_pc_val += "{0}, ".format(ens.BottomTrack.SNR_PulseCoherent[beam])
query_amp_pc_label += "ampPulseCoherentBeam{0}, ".format(beam)
query_amp_pc_val += "{0}, ".format(ens.BottomTrack.Amp_PulseCoherent[beam])
query_vel_pc_label += "velPulseCoherentBeam{0}, ".format(beam)
query_vel_pc_val += "{0}, ".format(ens.BottomTrack.Vel_PulseCoherent[beam])
query_noise_pc_label += "noisePulseCoherentBeam{0}, ".format(beam)
query_noise_pc_val += "{0}, ".format(ens.BottomTrack.Noise_PulseCoherent[beam])
query_corr_pc_label += "corrPulseCoherentBeam{0}, ".format(beam)
query_corr_pc_val += "{0}, ".format(ens.BottomTrack.Corr_PulseCoherent[beam])
query_range_label = query_range_label[:-2] # Remove final comma
query_range_val = query_range_val[:-2] # Remove final comma
query_snr_label = query_snr_label[:-2] # Remove final comma
query_snr_val = query_snr_val[:-2] # Remove final comma
query_amp_label = query_amp_label[:-2] # Remove final comma
query_amp_val = query_amp_val[:-2] # Remove final comma
query_corr_label = query_corr_label[:-2] # Remove final comma
query_corr_val = query_corr_val[:-2] # Remove final comma
query_beam_vel_label = query_beam_vel_label[:-2] # Remove final comma
query_beam_vel_val = query_beam_vel_val[:-2] # Remove final comma
query_beam_ping_label = query_beam_ping_label[:-2] # Remove final comma
query_beam_ping_val = query_beam_ping_val[:-2] # Remove final comma
query_instr_vel_label = query_instr_vel_label[:-2] # Remove final comma
query_instr_vel_val = query_instr_vel_val[:-2] # Remove final comma
query_instr_good_label = query_instr_good_label[:-2] # Remove final comma
query_instr_good_val = query_instr_good_val[:-2] # Remove final comma
query_earth_vel_label = query_earth_vel_label[:-2] # Remove final comma
query_earth_vel_val = query_earth_vel_val[:-2] # Remove final comma
query_earth_good_label = query_earth_good_label[:-2] # Remove final comma
query_earth_good_val = query_earth_good_val[:-2] # Remove final comma
query_snr_pc_label = query_snr_pc_label[:-2] # Remove final comma
query_snr_pc_val = query_snr_pc_val[:-2] # Remove final comma
query_amp_pc_label = query_amp_pc_label[:-2] # Remove final comma
query_amp_pc_val = query_amp_pc_val[:-2] # Remove final comma
query_vel_pc_label = query_vel_pc_label[:-2] # Remove final comma
query_vel_pc_val = query_vel_pc_val[:-2] # Remove final comma
query_noise_pc_label = query_noise_pc_label[:-2] # Remove final comma
query_noise_pc_val = query_noise_pc_val[:-2] # Remove final comma
query_corr_pc_label = query_corr_pc_label[:-2] # Remove final comma
query_corr_pc_val = query_corr_pc_val[:-2] # Remove final comma
# Add line for each dataset type
query = "INSERT INTO bottomtrack (" \
'ensIndex, ' \
'firstPingTime, ' \
'lastPingTime, ' \
'heading, ' \
'pitch, ' \
'roll, ' \
'waterTemp, ' \
'salinity, ' \
'xdcrDepth, ' \
'pressure, ' \
'sos, ' \
'status, ' \
'numBeams, ' \
'pingCount, ' \
'{0}, ' \
'{1}, ' \
'{2}, ' \
'{3}, ' \
'{4}, ' \
'{5}, ' \
'{6}, ' \
'{7}, ' \
'{8}, ' \
'{9}, ' \
'{10}, ' \
'{11}, ' \
'{12}, ' \
'{13}, ' \
'{14}, ' \
'created, ' \
"modified)" \
"VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s," \
"{15}," \
"{16}," \
"{17}," \
"{18}," \
"{19}," \
"{20}," \
"{21}," \
"{22}," \
"{23}," \
"{24}," \
"{25}," \
"{26}," \
"{27}," \
"{28}," \
"{29}," \
"%s,%s);".format(query_range_label,
query_snr_label,
query_amp_label,
query_corr_label,
query_beam_vel_label,
query_beam_ping_label,
query_instr_vel_label,
query_instr_good_label,
query_earth_vel_label,
query_earth_good_label,
query_snr_pc_label,
query_amp_pc_label,
query_vel_pc_label,
query_noise_pc_label,
query_corr_pc_label,
query_range_val,
query_snr_val,
query_amp_val,
query_corr_val,
query_beam_vel_val,
query_beam_ping_val,
query_instr_vel_val,
query_instr_good_val,
query_earth_vel_val,
query_earth_good_val,
query_snr_pc_val,
query_amp_pc_val,
query_vel_pc_val,
query_noise_pc_val,
query_corr_pc_val)
self.batch_sql.cursor.execute(query, (ens_idx,
ens.BottomTrack.FirstPingTime,
ens.BottomTrack.LastPingTime,
ens.BottomTrack.Heading,
ens.BottomTrack.Pitch,
ens.BottomTrack.Roll,
ens.BottomTrack.WaterTemp,
ens.BottomTrack.Salinity,
ens.BottomTrack.TransducerDepth,
ens.BottomTrack.Pressure,
ens.BottomTrack.SpeedOfSound,
int(ens.BottomTrack.Status),
int(ens.BottomTrack.NumBeams),
int(ens.BottomTrack.ActualPingCount),
dt,
dt))
# Monitor how many inserts have been done so it does not get too big
self.batch_count += 1
if self.batch_count > 10:
self.batch_sql.commit()
self.batch_count = 0
def add_rangetracking_ds(self, ens, ens_idx):
if not ens.IsRangeTracking:
return
# Get Date and time for created and modified
dt = datetime.now()
query_range_label = ""
query_range_val = ""
query_snr_label = ""
query_snr_val = ""
query_pings_label = ""
query_pings_val = ""
query_amp_label = ""
query_amp_val = ""
query_corr_label = ""
query_corr_val = ""
query_beam_vel_label = ""
query_beam_vel_val = ""
query_instr_vel_label = ""
query_instr_vel_val = ""
query_earth_vel_label = ""
query_earth_vel_val = ""
for beam in range(int(ens.RangeTracking.NumBeams)):
query_range_label += "rangeBeam{0}, ".format(beam)
query_range_val += "{0}, ".format(ens.RangeTracking.Range[beam])
query_snr_label += "snrBeam{0}, ".format(beam)
query_snr_val += "{0}, ".format(ens.RangeTracking.SNR[beam])
query_amp_label += "ampBeam{0}, ".format(beam)
query_amp_val += "{0}, ".format(ens.RangeTracking.Amplitude[beam])
query_corr_label += "corrBeam{0}, ".format(beam)
query_corr_val += "{0}, ".format(ens.RangeTracking.Correlation[beam])
query_beam_vel_label += "beamVelBeam{0}, ".format(beam)
query_beam_vel_val += "{0}, ".format(ens.RangeTracking.BeamVelocity[beam])
query_pings_label += "pingsBeam{0}, ".format(beam)
query_pings_val += "{0}, ".format(int(ens.RangeTracking.Pings[beam]))
query_instr_vel_label += "instrVelBeam{0}, ".format(beam)
query_instr_vel_val += "{0}, ".format(ens.BottomTrack.InstrumentVelocity[beam])
query_earth_vel_label += "earthVelBeam{0}, ".format(beam)
query_earth_vel_val += "{0}, ".format(ens.BottomTrack.EarthVelocity[beam])
query_range_label = query_range_label[:-2] # Remove final comma
query_range_val = query_range_val[:-2] # Remove final comma
query_snr_label = query_snr_label[:-2] # Remove final comma
query_snr_val = query_snr_val[:-2] # Remove final comma
query_amp_label = query_amp_label[:-2] # Remove final comma
query_amp_val = query_amp_val[:-2] # Remove final comma
query_corr_label = query_corr_label[:-2] # Remove final comma
query_corr_val = query_corr_val[:-2] # Remove final comma
query_beam_vel_label = query_beam_vel_label[:-2] # Remove final comma
query_beam_vel_val = query_beam_vel_val[:-2] # Remove final comma
query_beam_ping_label = query_pings_label[:-2] # Remove final comma
query_beam_ping_val = query_pings_val[:-2] # Remove final comma
query_instr_vel_label = query_instr_vel_label[:-2] # Remove final comma
query_instr_vel_val = query_instr_vel_val[:-2] # Remove final comma
query_earth_vel_label = query_earth_vel_label[:-2] # Remove final comma
query_earth_vel_val = query_earth_vel_val[:-2] # Remove final comma
# Add line for each dataset type
query = "INSERT INTO rangetracking (" \
'ensIndex, ' \
'numBeams, ' \
'{0}, ' \
'{1}, ' \
'{2}, ' \
'{3}, ' \
'{4}, ' \
'{5}, ' \
'{6}, ' \
'{7}, ' \
'created, ' \
"modified)" \
"VALUES(%s," \
"{8}," \
"{9}," \
"{10}," \
"{11}," \
"{12}," \
"{13}," \
"{14}," \
"{15}," \
"%s,%s);".format(query_range_label,
query_snr_label,
query_amp_label,
query_corr_label,
query_beam_vel_label,
query_pings_label,
query_instr_vel_label,
query_earth_vel_label,
query_range_val,
query_snr_val,
query_amp_val,
query_corr_val,
query_beam_vel_val,
query_beam_ping_val,
query_instr_vel_val,
query_earth_vel_val)
self.batch_sql.cursor.execute(query, (ens_idx,
int(ens.BottomTrack.NumBeams),
dt,
dt))
# Monitor how many inserts have been done so it does not get too big
self.batch_count += 1
if self.batch_count > 10:
self.batch_sql.commit()
self.batch_count = 0
def add_nmea_ds(self, ens, ens_idx, year=2017, month=1, day=1):
"""
Add the NMEA dataset to the database.
"""
if not ens.IsNmeaData:
return
# Get Date and time for created and modified
dt = datetime.now()
# GPS DateTime
ens_date = date(year, month, day)
gps_time = ens.NmeaData.datetime
gps_datetime = datetime.combine(ens_date, gps_time)
# Set null if does not exist
gga = str(ens.NmeaData.GPGGA)
if ens.NmeaData.GPGGA is None:
gga = None
vtg = str(ens.NmeaData.GPVTG)
if ens.NmeaData.GPVTG is None:
vtg = None
rmc = str(ens.NmeaData.GPRMC)
if ens.NmeaData.GPRMC is None:
rmc = None
rmf = str(ens.NmeaData.GPRMF)
if ens.NmeaData.GPRMF is None:
rmf = None
gll = str(ens.NmeaData.GPGLL)
if ens.NmeaData.GPGLL is None:
gll = None
gsv = str(ens.NmeaData.GPGSV)
if ens.NmeaData.GPGSV is None:
gsv = None
gsa = str(ens.NmeaData.GPGSA)
if ens.NmeaData.GPGSA is None:
gsa = None
hdt = str(ens.NmeaData.GPHDT)
if ens.NmeaData.GPHDT is None:
hdt = None
hdg = str(ens.NmeaData.GPHDG)
if ens.NmeaData.GPHDG is None:
hdg = None
# Add line for each dataset type
query = "INSERT INTO nmea (" \
"ensIndex, " \
"nmea, " \
"GPGGA, " \
"GPVTG, " \
"GPRMC, " \
"GPRMF, " \
"GPGLL, " \
"GPGSV, " \
"GPGSA, " \
"GPHDT, " \
"GPHDG, " \
"latitude, " \
"longitude, "\
"speed_knots, " \
"heading, " \
"datetime, " \
"created, " \
"modified) " \
"VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);"
#print(query)
self.batch_sql.cursor.execute(query, (ens_idx,
ens.NmeaData.nmea_sentences,
gga,
vtg,
rmc,
rmf,
gll,
gsv,
gsa,
hdt,
hdg,
ens.NmeaData.latitude,
ens.NmeaData.longitude,
ens.NmeaData.speed_knots,
ens.NmeaData.heading,
gps_datetime,
dt,
dt))
# Monitor how many inserts have been done so it does not get too big
self.batch_count += 1
if self.batch_count > 10:
self.batch_sql.commit()
self.batch_count = 0
return ens_idx
def add_dataset(self, table, data, num_elements, element_multiplier, ens_idx, bad_val=Ensemble.Ensemble.BadVelocity):
"""
Add a dataset to the database. Give the table name, data, number of beams and bins and the ensemble index.
:param table: Table name as a string.
:param data: 2D Array of the data.
:param num_elements: Number of bins.
:param element_multiplier: Number of beams.
:param ens_idx: Ensemble index in Ensembles table.
:param bad_val: If a value is bad or missing, replace it with this value.
"""
# Get Date and time for created and modified
dt = datetime.now()
beam0_avail = False
beam1_avail = False
beam2_avail = False
beam3_avail = False
query_b0_label = ""
query_b0_val = ""
query_b1_label = ""
query_b1_val = ""
query_b2_label = ""
query_b2_val = ""
query_b3_label = ""
query_b3_val = ""
for bin_num in range(num_elements):
if element_multiplier > 0:
query_b0_label += "Bin{0}, ".format(bin_num)
if data[bin_num][0]:
query_b0_val += "{0}, ".format(data[bin_num][0])
else:
query_b0_val += "{0}, ".format(bad_val)
beam0_avail = True
if element_multiplier > 1:
query_b1_label += "Bin{0}, ".format(bin_num)
if data[bin_num][1]:
query_b1_val += "{0}, ".format(data[bin_num][1])
else:
query_b1_val += "{0}, ".format(bad_val)
beam1_avail = True
if element_multiplier > 2:
query_b2_label += "Bin{0}, ".format(bin_num)
if data[bin_num][2]:
query_b2_val += "{0}, ".format(data[bin_num][2])
else:
query_b2_val += "{0}, ".format(bad_val)
beam2_avail = True
if element_multiplier > 3:
query_b3_label += "Bin{0}, ".format(bin_num)
if data[bin_num][3]:
query_b3_val += "{0}, ".format(data[bin_num][3])
else:
query_b3_val += "{0}, ".format(bad_val)
beam3_avail = True
query_b0_label = query_b0_label[:-2] # Remove final comma
query_b0_val = query_b0_val[:-2] # Remove final comma
query_b1_label = query_b1_label[:-2] # Remove final comma
query_b1_val = query_b1_val[:-2] # Remove final comma
query_b2_label = query_b2_label[:-2] # Remove final comma
query_b2_val = query_b2_val[:-2] # Remove final comma
query_b3_label = query_b3_label[:-2] # Remove final comma
query_b3_val = query_b3_val[:-2] # Remove final comma
# Add line for each beam
if beam0_avail:
query = "INSERT INTO {0} (" \
"ensIndex, " \
"beam, " \
"{1}, " \
"created, " \
"modified) " \
"VALUES ( %s, %s, {2}, %s, %s);".format(table, query_b0_label, query_b0_val)
#print(query)
self.batch_sql.cursor.execute(query, (ens_idx, 0, dt, dt))
if beam1_avail:
query = "INSERT INTO {0} (" \
"ensIndex, " \
"beam, " \
"{1}, " \
"created, " \
"modified) " \
"VALUES ( %s, %s, {2}, %s, %s);".format(table, query_b1_label, query_b1_val)
#print(query)
self.batch_sql.cursor.execute(query, (ens_idx, 1, dt, dt))
if beam2_avail:
query = "INSERT INTO {0} (" \
"ensIndex, " \
"beam, " \
"{1}, " \
"created, " \
"modified) " \
"VALUES ( %s, %s, {2}, %s, %s);".format(table, query_b2_label, query_b2_val)
#print(query)
self.batch_sql.cursor.execute(query, (ens_idx, 2, dt, dt))
if beam3_avail:
query = "INSERT INTO {0} (" \
"ensIndex, " \
"beam, " \
"{1}, " \
"created, " \
"modified) " \
"VALUES ( %s, %s, {2}, %s, %s);".format(table, query_b3_label, query_b3_val)
#print(query)
self.batch_sql.cursor.execute(query, (ens_idx, 3, dt, dt))
# Monitor how many inserts have been done so it does not get too big
self.batch_count += 1
if self.batch_count > 10:
self.batch_sql.commit()
self.batch_count = 0
|
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides a class used to describe the elastic tensor,
including methods used to fit the elastic tensor from linear response
stress-strain data
"""
import itertools
import warnings
from collections import OrderedDict
import numpy as np
import sympy as sp
from monty.dev import deprecated
from scipy.integrate import quad
from scipy.optimize import root
from scipy.special import factorial
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.analysis.elasticity.stress import Stress
from pymatgen.core.tensors import (
DEFAULT_QUAD,
SquareTensor,
Tensor,
TensorCollection,
get_uvec,
)
from pymatgen.core.units import Unit
__author__ = "Joseph Montoya"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Maarten de Jong, Ian Winter, Shyam Dwaraknath, Mark Asta, Anubhav Jain"
__version__ = "1.0"
__maintainer__ = "Joseph Montoya"
__email__ = "montoyjh@lbl.gov"
__status__ = "Production"
__date__ = "July 24, 2018"
class NthOrderElasticTensor(Tensor):
"""
An object representing an nth-order tensor expansion
of the stress-strain constitutive equations
"""
GPa_to_eV_A3 = Unit("GPa").get_conversion_factor(Unit("eV ang^-3"))
symbol = "C"
def __new__(cls, input_array, check_rank=None, tol=1e-4):
"""
Args:
input_array ():
check_rank ():
tol ():
"""
obj = super().__new__(cls, input_array, check_rank=check_rank)
if obj.rank % 2 != 0:
raise ValueError("ElasticTensor must have even rank")
if not obj.is_voigt_symmetric(tol):
warnings.warn("Input elastic tensor does not satisfy standard voigt symmetries")
return obj.view(cls)
@property
def order(self):
"""
Order of the elastic tensor
"""
return self.rank // 2
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
Args:
strain (3x3 array-like): matrix corresponding to strain
"""
strain = np.array(strain)
if strain.shape == (6,):
strain = Strain.from_voigt(strain)
assert strain.shape == (3, 3), "Strain must be 3x3 or voigt-notation"
stress_matrix = self.einsum_sequence([strain] * (self.order - 1)) / factorial(self.order - 1)
return Stress(stress_matrix)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
e_density = np.sum(self.calculate_stress(strain) * strain) / self.order
if convert_GPa_to_eV:
e_density *= self.GPa_to_eV_A3 # Conversion factor for GPa to eV/A^3
return e_density
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
Args:
strains ():
stresses ():
eq_stress ():
order ():
tol ():
Returns:
"""
return cls(diff_fit(strains, stresses, eq_stress, order, tol)[order - 2])
def raise_error_if_unphysical(f):
"""
Wrapper for functions or properties that should raise an error
if tensor is unphysical.
"""
def wrapper(self, *args, **kwargs):
"""
Args:
self ():
*args ():
**kwargs ():
Returns:
"""
if self.k_vrh < 0 or self.g_vrh < 0:
raise ValueError("Bulk or shear modulus is negative, property cannot be determined")
return f(self, *args, **kwargs)
return wrapper
class ElasticTensor(NthOrderElasticTensor):
"""
This class extends Tensor to describe the 3x3x3x3
second-order elastic tensor, C_{ijkl}, with various
methods for estimating other properties derived from
the second order elastic tensor
"""
def __new__(cls, input_array, tol=1e-4):
"""
Create an ElasticTensor object. The constructor throws an error if
the shape of the input_matrix argument is not 3x3x3x3, i. e. in true
tensor notation. Issues a warning if the input_matrix argument does
not satisfy standard symmetries. Note that the constructor uses
__new__ rather than __init__ according to the standard method of
subclassing numpy ndarrays.
Args:
input_array (3x3x3x3 array-like): the 3x3x3x3 array-like
representing the elastic tensor
tol (float): tolerance for initial symmetry test of tensor
"""
obj = super().__new__(cls, input_array, check_rank=4, tol=tol)
return obj.view(cls)
@property
def compliance_tensor(self):
"""
returns the Voigt-notation compliance tensor,
which is the matrix inverse of the
Voigt-notation elastic tensor
"""
s_voigt = np.linalg.inv(self.voigt)
return ComplianceTensor.from_voigt(s_voigt)
@property
def k_voigt(self):
"""
returns the K_v bulk modulus
"""
return self.voigt[:3, :3].mean()
@property
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (
2.0 * self.voigt[:3, :3].trace() - np.triu(self.voigt[:3, :3]).sum() + 3 * self.voigt[3:, 3:].trace()
) / 15.0
@property
def k_reuss(self):
"""
returns the K_r bulk modulus
"""
return 1.0 / self.compliance_tensor.voigt[:3, :3].sum()
@property
def g_reuss(self):
"""
returns the G_r shear modulus
"""
return 15.0 / (
8.0 * self.compliance_tensor.voigt[:3, :3].trace()
- 4.0 * np.triu(self.compliance_tensor.voigt[:3, :3]).sum()
+ 3.0 * self.compliance_tensor.voigt[3:, 3:].trace()
)
@property
def k_vrh(self):
"""
returns the K_vrh (Voigt-Reuss-Hill) average bulk modulus
"""
return 0.5 * (self.k_voigt + self.k_reuss)
@property
def g_vrh(self):
"""
returns the G_vrh (Voigt-Reuss-Hill) average shear modulus
"""
return 0.5 * (self.g_voigt + self.g_reuss)
@property
def y_mod(self):
"""
Calculates Young's modulus (in SI units) using the
Voigt-Reuss-Hill averages of bulk and shear moduli
"""
return 9.0e9 * self.k_vrh * self.g_vrh / (3.0 * self.k_vrh + self.g_vrh)
def directional_poisson_ratio(self, n, m, tol=1e-8):
"""
Calculates the poisson ratio for a specific direction
relative to a second, orthogonal direction
Args:
n (3-d vector): principal direction
m (3-d vector): secondary direction orthogonal to n
tol (float): tolerance for testing of orthogonality
"""
n, m = get_uvec(n), get_uvec(m)
if not np.abs(np.dot(n, m)) < tol:
raise ValueError("n and m must be orthogonal")
v = self.compliance_tensor.einsum_sequence([n] * 2 + [m] * 2)
v *= -1 / self.compliance_tensor.einsum_sequence([n] * 4)
return v
def directional_elastic_mod(self, n):
"""
Calculates directional elastic modulus for a specific vector
"""
n = get_uvec(n)
return self.einsum_sequence([n] * 4)
@raise_error_if_unphysical
def trans_v(self, structure):
"""
Calculates transverse sound velocity (in SI units) using the
Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: transverse sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
if self.g_vrh < 0:
raise ValueError("k_vrh or g_vrh is negative, sound velocity is undefined")
return (1e9 * self.g_vrh / mass_density) ** 0.5
@raise_error_if_unphysical
def long_v(self, structure):
"""
Calculates longitudinal sound velocity (in SI units)
using the Voigt-Reuss-Hill average bulk modulus
Args:
structure: pymatgen structure object
Returns: longitudinal sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
if self.g_vrh < 0:
raise ValueError("k_vrh or g_vrh is negative, sound velocity is undefined")
return (1e9 * (self.k_vrh + 4.0 / 3.0 * self.g_vrh) / mass_density) ** 0.5
@raise_error_if_unphysical
def snyder_ac(self, structure):
"""
Calculates Snyder's acoustic sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's acoustic sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
natoms = structure.composition.num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum(e.atomic_mass for e in structure.species)
avg_mass = 1.6605e-27 * tot_mass / natoms
return (
0.38483
* avg_mass
* ((self.long_v(structure) + 2.0 * self.trans_v(structure)) / 3.0) ** 3.0
/ (300.0 * num_density ** (-2.0 / 3.0) * nsites ** (1.0 / 3.0))
)
@raise_error_if_unphysical
def snyder_opt(self, structure):
"""
Calculates Snyder's optical sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's optical sound velocity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return (
1.66914e-23
* (self.long_v(structure) + 2.0 * self.trans_v(structure))
/ 3.0
/ num_density ** (-2.0 / 3.0)
* (1 - nsites ** (-1.0 / 3.0))
)
@raise_error_if_unphysical
def snyder_total(self, structure):
"""
Calculates Snyder's total sound velocity (in SI units)
Args:
structure: pymatgen structure object
Returns: Snyder's total sound velocity (in SI units)
"""
return self.snyder_ac(structure) + self.snyder_opt(structure)
@raise_error_if_unphysical
def clarke_thermalcond(self, structure):
"""
Calculates Clarke's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Clarke's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
tot_mass = sum(e.atomic_mass for e in structure.species)
natoms = structure.composition.num_atoms
weight = float(structure.composition.weight)
avg_mass = 1.6605e-27 * tot_mass / natoms
mass_density = 1.6605e3 * nsites * weight / (natoms * volume)
return 0.87 * 1.3806e-23 * avg_mass ** (-2.0 / 3.0) * mass_density ** (1.0 / 6.0) * self.y_mod ** 0.5
@raise_error_if_unphysical
def cahill_thermalcond(self, structure):
"""
Calculates Cahill's thermal conductivity (in SI units)
Args:
structure: pymatgen structure object
Returns: Cahill's thermal conductivity (in SI units)
"""
nsites = structure.num_sites
volume = structure.volume
num_density = 1e30 * nsites / volume
return 1.3806e-23 / 2.48 * num_density ** (2.0 / 3.0) * (self.long_v(structure) + 2 * self.trans_v(structure))
@raise_error_if_unphysical
def debye_temperature(self, structure):
"""
Estimates the debye temperature from longitudinal and
transverse sound velocities
Args:
structure: pymatgen structure object
Returns: debye temperature (in SI units)
"""
v0 = structure.volume * 1e-30 / structure.num_sites
vl, vt = self.long_v(structure), self.trans_v(structure)
vm = 3 ** (1.0 / 3.0) * (1 / vl ** 3 + 2 / vt ** 3) ** (-1.0 / 3.0)
td = 1.05457e-34 / 1.38065e-23 * vm * (6 * np.pi ** 2 / v0) ** (1.0 / 3.0)
return td
@deprecated(
"debye_temperature_from_sound_velocities is now the default"
"debye_temperature function, this one will be removed."
)
@raise_error_if_unphysical
def debye_temperature_from_sound_velocities(self, structure):
"""
Estimates Debye temperature from sound velocities
"""
return self.debye_temperature(structure)
@property
def universal_anisotropy(self):
"""
returns the universal anisotropy value
"""
return 5.0 * self.g_voigt / self.g_reuss + self.k_voigt / self.k_reuss - 6.0
@property
def homogeneous_poisson(self):
"""
returns the homogeneous poisson ratio
"""
return (1.0 - 2.0 / 3.0 * self.g_vrh / self.k_vrh) / (2.0 + 2.0 / 3.0 * self.g_vrh / self.k_vrh)
def green_kristoffel(self, u):
"""
Returns the Green-Kristoffel tensor for a second-order tensor
"""
return self.einsum_sequence([u, u], "ijkl,i,l")
@property
def property_dict(self):
"""
returns a dictionary of properties derived from the elastic tensor
"""
props = [
"k_voigt",
"k_reuss",
"k_vrh",
"g_voigt",
"g_reuss",
"g_vrh",
"universal_anisotropy",
"homogeneous_poisson",
"y_mod",
]
return {prop: getattr(self, prop) for prop in props}
def get_structure_property_dict(self, structure, include_base_props=True, ignore_errors=False):
"""
returns a dictionary of properties derived from the elastic tensor
and an associated structure
Args:
structure (Structure): structure object for which to calculate
associated properties
include_base_props (bool): whether to include base properties,
like k_vrh, etc.
ignore_errors (bool): if set to true, will set problem properties
that depend on a physical tensor to None, defaults to False
"""
s_props = [
"trans_v",
"long_v",
"snyder_ac",
"snyder_opt",
"snyder_total",
"clarke_thermalcond",
"cahill_thermalcond",
"debye_temperature",
]
if ignore_errors and (self.k_vrh < 0 or self.g_vrh < 0):
sp_dict = {prop: None for prop in s_props}
else:
sp_dict = {prop: getattr(self, prop)(structure) for prop in s_props}
sp_dict["structure"] = structure
if include_base_props:
sp_dict.update(self.property_dict)
return sp_dict
@classmethod
def from_pseudoinverse(cls, strains, stresses):
"""
Class method to fit an elastic tensor from stress/strain
data. Method uses Moore-Penrose pseudoinverse to invert
the s = C*e equation with elastic tensor, stress, and
strain in voigt notation
Args:
stresses (Nx3x3 array-like): list or array of stresses
strains (Nx3x3 array-like): list or array of strains
"""
# convert the stress/strain to Nx6 arrays of voigt-notation
warnings.warn(
"Pseudoinverse fitting of Strain/Stress lists may yield "
"questionable results from vasp data, use with caution."
)
stresses = np.array([Stress(stress).voigt for stress in stresses])
with warnings.catch_warnings(record=True):
strains = np.array([Strain(strain).voigt for strain in strains])
voigt_fit = np.transpose(np.dot(np.linalg.pinv(strains), stresses))
return cls.from_voigt(voigt_fit)
@classmethod
def from_independent_strains(cls, strains, stresses, eq_stress=None, vasp=False, tol=1e-10):
"""
Constructs the elastic tensor least-squares fit of independent strains
Args:
strains (list of Strains): list of strain objects to fit
stresses (list of Stresses): list of stress objects to use in fit
corresponding to the list of strains
eq_stress (Stress): equilibrium stress to use in fitting
vasp (boolean): flag for whether the stress tensor should be
converted based on vasp units/convention for stress
tol (float): tolerance for removing near-zero elements of the
resulting tensor
"""
strain_states = [tuple(ss) for ss in np.eye(6)]
ss_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress)
if not set(strain_states) <= set(ss_dict.keys()):
raise ValueError(f"Missing independent strain states: {set(strain_states) - set(ss_dict)}")
if len(set(ss_dict.keys()) - set(strain_states)) > 0:
warnings.warn("Extra strain states in strain-stress pairs are neglected in independent strain fitting")
c_ij = np.zeros((6, 6))
for i in range(6):
istrains = ss_dict[strain_states[i]]["strains"]
istresses = ss_dict[strain_states[i]]["stresses"]
for j in range(6):
c_ij[i, j] = np.polyfit(istrains[:, i], istresses[:, j], 1)[0]
if vasp:
c_ij *= -0.1 # Convert units/sign convention of vasp stress tensor
c = cls.from_voigt(c_ij)
c = c.zeroed(tol)
return c
class ComplianceTensor(Tensor):
"""
This class represents the compliance tensor, and exists
primarily to keep the voigt-conversion scheme consistent
since the compliance tensor has a unique vscale
"""
def __new__(cls, s_array):
"""
Args:
s_array ():
"""
vscale = np.ones((6, 6))
vscale[3:] *= 2
vscale[:, 3:] *= 2
obj = super().__new__(cls, s_array, vscale=vscale)
return obj.view(cls)
class ElasticTensorExpansion(TensorCollection):
"""
This class is a sequence of elastic tensors corresponding
to an elastic tensor expansion, which can be used to
calculate stress and energy density and inherits all
of the list-based properties of TensorCollection
(e. g. symmetrization, voigt conversion, etc.)
"""
def __init__(self, c_list):
"""
Initialization method for ElasticTensorExpansion
Args:
c_list (list or tuple): sequence of Tensor inputs
or tensors from which the elastic tensor
expansion is constructed.
"""
c_list = [NthOrderElasticTensor(c, check_rank=4 + i * 2) for i, c in enumerate(c_list)]
super().__init__(c_list)
@classmethod
def from_diff_fit(cls, strains, stresses, eq_stress=None, tol=1e-10, order=3):
"""
Generates an elastic tensor expansion via the fitting function
defined below in diff_fit
"""
c_list = diff_fit(strains, stresses, eq_stress, order, tol)
return cls(c_list)
@property
def order(self):
"""
Order of the elastic tensor expansion, i. e. the order of the
highest included set of elastic constants
"""
return self[-1].order
def calculate_stress(self, strain):
"""
Calculate's a given elastic tensor's contribution to the
stress using Einstein summation
"""
return sum(c.calculate_stress(strain) for c in self)
def energy_density(self, strain, convert_GPa_to_eV=True):
"""
Calculates the elastic energy density due to a strain
"""
return sum(c.energy_density(strain, convert_GPa_to_eV) for c in self)
def get_ggt(self, n, u):
"""
Gets the Generalized Gruneisen tensor for a given
third-order elastic tensor expansion.
Args:
n (3x1 array-like): normal mode direction
u (3x1 array-like): polarization direction
"""
gk = self[0].einsum_sequence([n, u, n, u])
result = -(
2 * gk * np.outer(u, u) + self[0].einsum_sequence([n, n]) + self[1].einsum_sequence([n, u, n, u])
) / (2 * gk)
return result
def get_tgt(self, temperature=None, structure=None, quad=None):
"""
Gets the thermodynamic Gruneisen tensor (TGT) by via an
integration of the GGT weighted by the directional heat
capacity.
See refs:
R. N. Thurston and K. Brugger, Phys. Rev. 113, A1604 (1964).
K. Brugger Phys. Rev. 137, A1826 (1965).
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
if temperature and not structure:
raise ValueError("If using temperature input, you must also include structure")
quad = quad if quad else DEFAULT_QUAD
points = quad["points"]
weights = quad["weights"]
num, denom, c = np.zeros((3, 3)), 0, 1
for p, w in zip(points, weights):
gk = ElasticTensor(self[0]).green_kristoffel(p)
rho_wsquareds, us = np.linalg.eigh(gk)
us = [u / np.linalg.norm(u) for u in np.transpose(us)]
for u in us:
# TODO: this should be benchmarked
if temperature:
c = self.get_heat_capacity(temperature, structure, p, u)
num += c * self.get_ggt(p, u) * w
denom += c * w
return SquareTensor(num / denom)
def get_gruneisen_parameter(self, temperature=None, structure=None, quad=None):
"""
Gets the single average gruneisen parameter from the TGT.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (float): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
quad (dict): quadrature for integration, should be
dictionary with "points" and "weights" keys defaults
to quadpy.sphere.Lebedev(19) as read from file
"""
return np.trace(self.get_tgt(temperature, structure, quad)) / 3.0
def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2):
"""
Gets the directional heat capacity for a higher order tensor
expansion as a function of direction and polarization.
Args:
temperature (float): Temperature in kelvin
structure (float): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
cutoff (float): cutoff for scale of kt / (hbar * omega)
if lower than this value, returns 0
"""
k = 1.38065e-23
kt = k * temperature
hbar_w = 1.05457e-34 * self.omega(structure, n, u)
if hbar_w > kt * cutoff:
return 0.0
c = k * (hbar_w / kt) ** 2
c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1) ** 2
return c * 6.022e23
def omega(self, structure, n, u):
"""
Finds directional frequency contribution to the heat
capacity from direction and polarization
Args:
structure (Structure): Structure to be used in directional heat
capacity determination
n (3x1 array-like): direction for Cv determination
u (3x1 array-like): polarization direction, note that
no attempt for verification of eigenvectors is made
"""
l0 = np.dot(np.sum(structure.lattice.matrix, axis=0), n)
l0 *= 1e-10 # in A
weight = float(structure.composition.weight) * 1.66054e-27 # in kg
vol = structure.volume * 1e-30 # in m^3
vel = (1e9 * self[0].einsum_sequence([n, u, n, u]) / (weight / vol)) ** 0.5
return vel / l0
def thermal_expansion_coeff(self, structure, temperature, mode="debye"):
"""
Gets thermal expansion coefficient from third-order constants.
Args:
temperature (float): Temperature in kelvin, if not specified
will return non-cv-normalized value
structure (Structure): Structure to be used in directional heat
capacity determination, only necessary if temperature
is specified
mode (string): mode for finding average heat-capacity,
current supported modes are 'debye' and 'dulong-petit'
"""
soec = ElasticTensor(self[0])
v0 = structure.volume * 1e-30 / structure.num_sites
if mode == "debye":
td = soec.debye_temperature(structure)
t_ratio = temperature / td
def integrand(x):
return (x ** 4 * np.exp(x)) / (np.exp(x) - 1) ** 2
cv = 9 * 8.314 * t_ratio ** 3 * quad(integrand, 0, t_ratio ** -1)[0]
elif mode == "dulong-petit":
cv = 3 * 8.314
else:
raise ValueError("Mode must be debye or dulong-petit")
tgt = self.get_tgt(temperature, structure)
alpha = np.einsum("ijkl,ij", soec.compliance_tensor, tgt)
alpha *= cv / (1e9 * v0 * 6.022e23)
return SquareTensor(alpha)
def get_compliance_expansion(self):
"""
Gets a compliance tensor expansion from the elastic
tensor expansion.
"""
# TODO: this might have a general form
if not self.order <= 4:
raise ValueError("Compliance tensor expansion only supported for fourth-order and lower")
ce_exp = [ElasticTensor(self[0]).compliance_tensor]
einstring = "ijpq,pqrsuv,rskl,uvmn->ijklmn"
ce_exp.append(np.einsum(einstring, -ce_exp[-1], self[1], ce_exp[-1], ce_exp[-1]))
if self.order == 4:
# Four terms in the Fourth-Order compliance tensor
# pylint: disable=E1130
einstring_1 = "pqab,cdij,efkl,ghmn,abcdefgh"
tensors_1 = [ce_exp[0]] * 4 + [self[-1]]
temp = -np.einsum(einstring_1, *tensors_1)
einstring_2 = "pqab,abcdef,cdijmn,efkl"
einstring_3 = "pqab,abcdef,efklmn,cdij"
einstring_4 = "pqab,abcdef,cdijkl,efmn"
for es in [einstring_2, einstring_3, einstring_4]:
temp -= np.einsum(es, ce_exp[0], self[-2], ce_exp[1], ce_exp[0])
ce_exp.append(temp)
return TensorCollection(ce_exp)
def get_strain_from_stress(self, stress):
"""
Gets the strain from a stress state according
to the compliance expansion corresponding to the
tensor expansion.
"""
compl_exp = self.get_compliance_expansion()
strain = 0
for n, compl in enumerate(compl_exp):
strain += compl.einsum_sequence([stress] * (n + 1)) / factorial(n + 1)
return strain
def get_effective_ecs(self, strain, order=2):
"""
Returns the effective elastic constants
from the elastic tensor expansion.
Args:
strain (Strain or 3x3 array-like): strain condition
under which to calculate the effective constants
order (int): order of the ecs to be returned
"""
ec_sum = 0
for n, ecs in enumerate(self[order - 2 :]):
ec_sum += ecs.einsum_sequence([strain] * n) / factorial(n)
return ec_sum
def get_wallace_tensor(self, tau):
"""
Gets the Wallace Tensor for determining yield strength
criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor
"""
b = 0.5 * (
np.einsum("ml,kn->klmn", tau, np.eye(3))
+ np.einsum("km,ln->klmn", tau, np.eye(3))
+ np.einsum("nl,km->klmn", tau, np.eye(3))
+ np.einsum("kn,lm->klmn", tau, np.eye(3))
+ -2 * np.einsum("kl,mn->klmn", tau, np.eye(3))
)
strain = self.get_strain_from_stress(tau)
b += self.get_effective_ecs(strain)
return b
def get_symmetric_wallace_tensor(self, tau):
"""
Gets the symmetrized wallace tensor for determining
yield strength criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor.
"""
wallace = self.get_wallace_tensor(tau)
return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1])))
def get_stability_criteria(self, s, n):
"""
Gets the stability criteria from the symmetric
Wallace tensor from an input vector and stress
value.
Args:
s (float): Stress value at which to evaluate
the stability criteria
n (3x1 array-like): direction of the applied
stress
"""
n = get_uvec(n)
stress = s * np.outer(n, n)
sym_wallace = self.get_symmetric_wallace_tensor(stress)
return np.linalg.det(sym_wallace.voigt)
def get_yield_stress(self, n):
"""
Gets the yield stress for a given direction
Args:
n (3x1 array-like): direction for which to find the
yield stress
"""
# TODO: root finding could be more robust
comp = root(self.get_stability_criteria, -1, args=n)
tens = root(self.get_stability_criteria, 1, args=n)
return (comp.x, tens.x)
# TODO: abstract this for other tensor fitting procedures
def diff_fit(strains, stresses, eq_stress=None, order=2, tol=1e-10):
"""
nth order elastic constant fitting function based on
central-difference derivatives with respect to distinct
strain states. The algorithm is summarized as follows:
1. Identify distinct strain states as sets of indices
for which nonzero strain values exist, typically
[(0), (1), (2), (3), (4), (5), (0, 1) etc.]
2. For each strain state, find and sort strains and
stresses by strain value.
3. Find first, second .. nth derivatives of each stress
with respect to scalar variable corresponding to
the smallest perturbation in the strain.
4. Use the pseudoinverse of a matrix-vector expression
corresponding to the parameterized stress-strain
relationship and multiply that matrix by the respective
calculated first or second derivatives from the
previous step.
5. Place the calculated nth-order elastic
constants appropriately.
Args:
order (int): order of the elastic tensor set to return
strains (nx3x3 array-like): Array of 3x3 strains
to use in fitting of ECs
stresses (nx3x3 array-like): Array of 3x3 stresses
to use in fitting ECs. These should be PK2 stresses.
eq_stress (3x3 array-like): stress corresponding to
equilibrium strain (i. e. "0" strain state).
If not specified, function will try to find
the state in the list of provided stresses
and strains. If not found, defaults to 0.
tol (float): value for which strains below
are ignored in identifying strain states.
Returns:
Set of tensors corresponding to nth order expansion of
the stress/strain relation
"""
strain_state_dict = get_strain_state_dict(strains, stresses, eq_stress=eq_stress, tol=tol, add_eq=True, sort=True)
# Collect derivative data
c_list = []
dei_dsi = np.zeros((order - 1, 6, len(strain_state_dict)))
for n, (strain_state, data) in enumerate(strain_state_dict.items()):
hvec = data["strains"][:, strain_state.index(1)]
for i in range(1, order):
coef = get_diff_coeff(hvec, i)
dei_dsi[i - 1, :, n] = np.dot(coef, data["stresses"])
m, absent = generate_pseudo(list(strain_state_dict.keys()), order)
for i in range(1, order):
cvec, carr = get_symbol_list(i + 1)
svec = np.ravel(dei_dsi[i - 1].T)
cmap = dict(zip(cvec, np.dot(m[i - 1], svec)))
c_list.append(v_subs(carr, cmap))
return [Tensor.from_voigt(c) for c in c_list]
def find_eq_stress(strains, stresses, tol=1e-10):
"""
Finds stress corresponding to zero strain state in stress-strain list
Args:
strains (Nx3x3 array-like): array corresponding to strains
stresses (Nx3x3 array-like): array corresponding to stresses
tol (float): tolerance to find zero strain state
"""
stress_array = np.array(stresses)
strain_array = np.array(strains)
eq_stress = stress_array[np.all(abs(strain_array) < tol, axis=(1, 2))]
if eq_stress.size != 0:
all_same = (abs(eq_stress - eq_stress[0]) < 1e-8).all()
if len(eq_stress) > 1 and not all_same:
raise ValueError(
"Multiple stresses found for equilibrium strain"
" state, please specify equilibrium stress or "
" remove extraneous stresses."
)
eq_stress = eq_stress[0]
else:
warnings.warn("No eq state found, returning zero voigt stress")
eq_stress = Stress(np.zeros((3, 3)))
return eq_stress
def get_strain_state_dict(strains, stresses, eq_stress=None, tol=1e-10, add_eq=True, sort=True):
"""
Creates a dictionary of voigt-notation stress-strain sets
keyed by "strain state", i. e. a tuple corresponding to
the non-zero entries in ratios to the lowest nonzero value,
e.g. [0, 0.1, 0, 0.2, 0, 0] -> (0,1,0,2,0,0)
This allows strains to be collected in stencils as to
evaluate parameterized finite difference derivatives
Args:
strains (Nx3x3 array-like): strain matrices
stresses (Nx3x3 array-like): stress matrices
eq_stress (Nx3x3 array-like): equilibrium stress
tol (float): tolerance for sorting strain states
add_eq (bool): flag for whether to add eq_strain
to stress-strain sets for each strain state
sort (bool): flag for whether to sort strain states
Returns:
OrderedDict with strain state keys and dictionaries
with stress-strain data corresponding to strain state
"""
# Recast stress/strains
vstrains = np.array([Strain(s).zeroed(tol).voigt for s in strains]) # pylint: disable=E1101
vstresses = np.array([Stress(s).zeroed(tol).voigt for s in stresses]) # pylint: disable=E1101
# Collect independent strain states:
independent = {tuple(np.nonzero(vstrain)[0].tolist()) for vstrain in vstrains}
strain_state_dict = OrderedDict()
if add_eq:
if eq_stress is not None:
veq_stress = Stress(eq_stress).voigt
else:
veq_stress = find_eq_stress(strains, stresses).voigt
for n, ind in enumerate(independent):
# match strains with templates
template = np.zeros(6, dtype=bool)
np.put(template, ind, True)
template = np.tile(template, [vstresses.shape[0], 1])
mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
mstresses = vstresses[mode]
mstrains = vstrains[mode]
# Get "strain state", i.e. ratio of each value to minimum strain
min_nonzero_ind = np.argmin(np.abs(np.take(mstrains[-1], ind)))
min_nonzero_val = np.take(mstrains[-1], ind)[min_nonzero_ind]
strain_state = mstrains[-1] / min_nonzero_val
strain_state = tuple(strain_state)
if add_eq:
# add zero strain state
mstrains = np.vstack([mstrains, np.zeros(6)])
mstresses = np.vstack([mstresses, veq_stress])
# sort strains/stresses by strain values
if sort:
mstresses = mstresses[mstrains[:, ind[0]].argsort()]
mstrains = mstrains[mstrains[:, ind[0]].argsort()]
strain_state_dict[strain_state] = {"strains": mstrains, "stresses": mstresses}
return strain_state_dict
def generate_pseudo(strain_states, order=3):
"""
Generates the pseudoinverse for a given set of strains.
Args:
strain_states (6xN array like): a list of voigt-notation
"strain-states", i. e. perturbed indices of the strain
as a function of the smallest strain e. g. (0, 1, 0, 0, 1, 0)
order (int): order of pseudoinverse to calculate
Returns:
mis: pseudo inverses for each order tensor, these can
be multiplied by the central difference derivative
of the stress with respect to the strain state
absent_syms: symbols of the tensor absent from the PI
expression
"""
s = sp.Symbol("s")
nstates = len(strain_states)
ni = np.array(strain_states) * s
mis, absent_syms = [], []
for degree in range(2, order + 1):
cvec, carr = get_symbol_list(degree)
sarr = np.zeros((nstates, 6), dtype=object)
for n, strain_v in enumerate(ni):
# Get expressions
exps = carr.copy()
for i in range(degree - 1):
exps = np.dot(exps, strain_v)
exps /= np.math.factorial(degree - 1)
sarr[n] = [sp.diff(exp, s, degree - 1) for exp in exps]
svec = sarr.ravel()
present_syms = set.union(*[exp.atoms(sp.Symbol) for exp in svec])
absent_syms += [set(cvec) - present_syms]
m = np.zeros((6 * nstates, len(cvec)))
for n, c in enumerate(cvec):
m[:, n] = v_diff(svec, c)
mis.append(np.linalg.pinv(m))
return mis, absent_syms
def get_symbol_list(rank, dim=6):
"""
Returns a symbolic representation of the voigt-notation
tensor that places identical symbols for entries related
by index transposition, i. e. C_1121 = C_1211 etc.
Args:
dim (int): dimension of matrix/tensor, e. g. 6 for
voigt notation and 3 for standard
rank (int): rank of tensor, e. g. 3 for third-order ECs
Returns:
c_vec (array): array representing distinct indices
c_arr (array): array representing tensor with equivalent
indices assigned as above
"""
indices = list(itertools.combinations_with_replacement(range(dim), r=rank))
c_vec = np.zeros(len(indices), dtype=object)
c_arr = np.zeros([dim] * rank, dtype=object)
for n, idx in enumerate(indices):
c_vec[n] = sp.Symbol("c_" + "".join([str(i) for i in idx]))
for perm in itertools.permutations(idx):
c_arr[perm] = c_vec[n]
return c_vec, c_arr
def subs(entry, cmap):
"""
Sympy substitution function, primarily for the purposes
of numpy vectorization
Args:
entry (symbol or exp): sympy expr to undergo subs
cmap (dict): map for symbols to values to use in subs
Returns:
Evaluated expression with substitution
"""
return entry.subs(cmap)
# Vectorized functions
v_subs = np.vectorize(subs)
v_diff = np.vectorize(sp.diff)
def get_diff_coeff(hvec, n=1):
"""
Helper function to find difference coefficients of an
derivative on an arbitrary mesh.
Args:
hvec (1D array-like): sampling stencil
n (int): degree of derivative to find
"""
hvec = np.array(hvec, dtype=np.float_)
acc = len(hvec)
exp = np.column_stack([np.arange(acc)] * acc)
a = np.vstack([hvec] * acc) ** exp
b = np.zeros(acc)
b[n] = factorial(n)
return np.linalg.solve(a, b)
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
__author__ = "Kyle Bystrom"
__copyright__ = "Copyright 2019, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Kyle Bystrom"
__email__ = "kylebystrom@gmail.com"
__date__ = "5/29/2019"
import unittest
from pymatgen.analysis.interface import Interface, InterfaceBuilder
from pymatgen.analysis.substrate_analyzer import ZSLGenerator
from pymatgen.util.testing import PymatgenTest
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.structure import Structure
import numpy as np
class InterfaceTest(PymatgenTest):
@classmethod
def setUpClass(cls):
si_struct = cls.get_structure('Si')
sio2_struct = cls.get_structure('SiO2')
sga = SpacegroupAnalyzer(si_struct)
si_conventional = sga.get_conventional_standard_structure()
sga = SpacegroupAnalyzer(sio2_struct)
sio2_conventional = sga.get_conventional_standard_structure()
cls.ib = InterfaceBuilder(si_conventional, sio2_conventional)
cls.ib.generate_interfaces(substrate_millers=[[1, 0, 0]], film_layers=3, substrate_layers=3)
def test_offset_vector(self):
interface = self.ib.interfaces[0]
init_lattice = interface.lattice.matrix.copy()
self.assertArrayAlmostEqual(interface.offset_vector, np.array([0,0,2.5]))
init_film = interface.film
init_sub = interface.substrate
tst = Structure.from_sites(interface.sites)
interface.z_shift += 1
self.assertArrayAlmostEqual(interface.offset_vector, np.array([0,0,3.5]))
tdm, idm = tst.distance_matrix, interface.distance_matrix
diff = tdm - idm
assert (tdm <= idm + 1e-10).all()
assert (tdm + 0.5 < idm).any()
self.assertArrayAlmostEqual(init_film.distance_matrix, interface.film.distance_matrix)
self.assertArrayAlmostEqual(init_sub.distance_matrix, interface.substrate.distance_matrix)
interface.z_shift -= 1
self.assertArrayAlmostEqual(interface.offset_vector, np.array([0,0,2.5]))
idm = interface.distance_matrix
assert (np.abs(tdm - idm) < 1e-10).all()
interface.ab_shift += np.array([0.2,0.2])
self.assertArrayAlmostEqual(interface.ab_shift, np.array([0.2,0.2]))
idm = interface.distance_matrix
assert (np.abs(tdm - idm) > 0.9).any()
self.assertArrayAlmostEqual(init_lattice, interface.lattice.matrix)
self.assertArrayAlmostEqual(init_film.distance_matrix, interface.film.distance_matrix)
self.assertArrayAlmostEqual(init_sub.distance_matrix, interface.substrate.distance_matrix)
interface.ab_shift -= np.array([0.2,0.2])
self.assertArrayAlmostEqual(interface.offset_vector, np.array([0,0,2.5]))
idm = interface.distance_matrix
assert (np.abs(tdm - idm) < 1e-10).all()
self.assertArrayAlmostEqual(init_film.distance_matrix, interface.film.distance_matrix)
self.assertArrayAlmostEqual(init_sub.distance_matrix, interface.substrate.distance_matrix)
def test_labels(self):
interface = self.ib.interfaces[0]
film = interface.film
substrate = interface.substrate
film_sites = [site for i, site in enumerate(interface)\
if 'film' in interface.site_properties['interface_label'][i]]
substrate_sites = [site for i, site in enumerate(interface)\
if 'substrate' in interface.site_properties['interface_label'][i]]
assert film.sites == film_sites
assert substrate.sites == substrate_sites
assert len(film) == len(interface.modified_film_structure)
assert len(substrate) == len(interface.modified_sub_structure)
def test_vertical_spacing(self):
interface = self.ib.interfaces[0]
self.assertAlmostEqual(interface.z_shift,
np.min(interface.film.cart_coords[:,2]) - np.max(interface.substrate.cart_coords[:,2]))
self.assertAlmostEqual(interface.lattice.c, interface.vacuum_thickness + interface.z_shift\
+ np.max(interface.film.cart_coords[:,2])\
- np.min(interface.film.cart_coords[:,2])\
+ np.max(interface.substrate.cart_coords[:,2])\
- np.min(interface.substrate.cart_coords[:,2]))
def test_inplane_spacing(self):
delta = np.array([0, 1.5, 0])
interface = self.ib.interfaces[0]
old_coords = interface.film.frac_coords.copy()
interface.offset_vector += delta
self.assertArrayAlmostEqual(interface.offset_vector, [0, 1.5, 2.5])
new_coords = interface.film.frac_coords.copy()
for i in range(new_coords.shape[0]):
self.assertAlmostEqual(interface.lattice.get_distance_and_image(old_coords[i], new_coords[i])[0], 1.5)
interface.offset_vector -= delta
self.assertArrayAlmostEqual(interface.offset_vector, [0, 0, 2.5])
new_coords = interface.film.frac_coords.copy()
for i in range(new_coords.shape[0]):
self.assertAlmostEqual(interface.lattice.get_distance_and_image(old_coords[i], new_coords[i])[0], 0.0)
def test_copy(self):
interface = self.ib.interfaces[0]
copy = interface.copy()
for attr in ['lattice', 'cart_coords', 'sub_plane', 'film_plane',\
'modified_film_structure', 'modified_sub_structure',\
'strained_film_structure', 'strained_sub_structure',\
'sub_init_cell', 'film_init_cell', 'site_properties',\
'offset_vector', 'ab_shift', 'z_shift', 'vacuum_thickness']:
if type(copy.__getattribute__(attr)) == np.ndarray:
self.assertArrayAlmostEqual(copy.__getattribute__(attr), interface.__getattribute__(attr))
else:
assert copy.__getattribute__(attr) == interface.__getattribute__(attr)
def test_serialization(self):
interface = self.ib.interfaces[0]
interface_dict = interface.as_dict()
interface_from_dict = Interface.from_dict(interface_dict)
for attr in ['lattice', 'cart_coords', 'sub_plane', 'film_plane',\
'modified_film_structure', 'modified_sub_structure',\
'strained_film_structure', 'strained_sub_structure',\
'sub_init_cell', 'film_init_cell', 'site_properties',\
'offset_vector', 'ab_shift', 'z_shift', 'vacuum_thickness']:
if type(interface_from_dict.__getattribute__(attr)) == np.ndarray:
self.assertArrayAlmostEqual(interface_from_dict.__getattribute__(attr), interface.__getattribute__(attr))
else:
self.assertAlmostEqual(interface_from_dict.__getattribute__(attr), interface.__getattribute__(attr))
# Shift film and check equality
interface = self.ib.interfaces[0].copy()
interface.z_shift = 4
interface.ab_shift = [0.5, 0.5]
interface_dict = interface.as_dict()
interface_from_dict = Interface.from_dict(interface_dict)
for attr in ['lattice', 'cart_coords', 'sub_plane', 'film_plane',\
'modified_film_structure', 'modified_sub_structure',\
'strained_film_structure', 'strained_sub_structure',\
'sub_init_cell', 'film_init_cell', 'site_properties',\
'offset_vector', 'ab_shift', 'z_shift', 'vacuum_thickness']:
if type(interface_from_dict.__getattribute__(attr)) == np.ndarray:
self.assertArrayAlmostEqual(interface_from_dict.__getattribute__(attr), interface.__getattribute__(attr))
else:
self.assertAlmostEqual(interface_from_dict.__getattribute__(attr), interface.__getattribute__(attr))
class InterfaceBuilderTest(PymatgenTest):
@classmethod
def setUpClass(cls):
si_struct = cls.get_structure('Si')
sio2_struct = cls.get_structure('SiO2')
sga = SpacegroupAnalyzer(si_struct)
si_conventional = sga.get_conventional_standard_structure()
sga = SpacegroupAnalyzer(sio2_struct)
sio2_conventional = sga.get_conventional_standard_structure()
cls.ibs = []
cls.ibs.append(cls.make_ib(si_conventional, sio2_conventional, [1,0,0]))
cls.ibs.append(cls.make_ib(sio2_conventional, si_conventional, [1,0,0]))
cls.ibs.append(cls.make_ib(si_struct, sio2_struct, [1,0,0]))
cls.ibs.append(cls.make_ib(sio2_struct, si_struct, [1,0,0]))
cls.ibs.append(cls.make_ib(si_struct, sio2_struct, [1,1,1]))
cls.ibs.append(cls.make_ib(sio2_struct, si_struct, [1,1,1]))
@staticmethod
def make_ib(substrate, film, miller):
ib = InterfaceBuilder(substrate, film)
ib.generate_interfaces(substrate_millers=[miller])
return ib
def test_film_and_substrate_sites(self):
for ib in self.ibs:
interface = ib.interfaces[0]
assert len(interface.film) == len(interface.modified_film_structure)
assert len(interface.substrate) == len(interface.modified_sub_structure)
def test_lattice(self):
zsl = ZSLGenerator()
for ib in self.ibs:
interface = ib.interfaces[0]
assert zsl.is_same_vectors(interface.modified_sub_structure.lattice.matrix[:2],
interface.modified_film_structure.lattice.matrix[:2])
def test_structure_preservation(self):
for ib in self.ibs:
for interface in ib.interfaces[:2]:
# assumes test structures are SiO2 and Si
substrate, film = interface.substrate, interface.film
if substrate.ntypesp == 1:
si = substrate
sio2 = film
else:
si = film
sio2 = substrate
sidm = si.distance_matrix
sidm = sidm[sidm > 0]
sio2dm = sio2.distance_matrix
sio2dm = sio2dm[sio2dm > 0]
assert si.is_valid(tol=2.32)
assert sio2.is_valid(tol=1.6)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition of Kubernetes TFX runner."""
import datetime
import json
from typing import List, Optional, Type
from absl import logging
from tfx.dsl.component.experimental import container_component
from tfx.dsl.components.base import base_node
from tfx.orchestration import data_types
from tfx.orchestration import metadata
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import tfx_runner
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import config_utils
from tfx.orchestration.config import pipeline_config
from tfx.orchestration.experimental.kubernetes import kubernetes_remote_runner
from tfx.orchestration.experimental.kubernetes import node_wrapper
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import in_process_component_launcher
from tfx.orchestration.launcher import kubernetes_component_launcher
from tfx.utils import json_utils
from tfx.utils import kube_utils
from tfx.utils import name_utils
from google.protobuf import json_format
from ml_metadata.proto import metadata_store_pb2
_CONTAINER_COMMAND = [
'python', '-m',
'tfx.orchestration.experimental.kubernetes.container_entrypoint'
]
# Suffix added to the component id to avoid MLMD conflict when
# registering this component.
_WRAPPER_SUFFIX = '.Wrapper'
_TFX_IMAGE = 'tensorflow/tfx'
def get_default_kubernetes_metadata_config(
) -> metadata_store_pb2.ConnectionConfig:
"""Returns the default metadata connection config for a kubernetes cluster.
Returns:
A config proto that will be serialized as JSON and passed to the running
container so the TFX component driver is able to communicate with MLMD in
a kubernetes cluster.
"""
connection_config = metadata_store_pb2.ConnectionConfig()
connection_config.mysql.host = 'mysql'
connection_config.mysql.port = 3306
connection_config.mysql.database = 'mysql'
connection_config.mysql.user = 'root'
connection_config.mysql.password = ''
return connection_config
def launch_container_component(
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: base_component_config.BaseComponentConfig,
pipeline: tfx_pipeline.Pipeline):
"""Use the kubernetes component launcher to launch the component.
Args:
component: Container component to be executed.
component_launcher_class: The class of the launcher to launch the component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
"""
driver_args = data_types.DriverArgs(enable_cache=pipeline.enable_cache)
metadata_connection = metadata.Metadata(pipeline.metadata_connection_config)
component_launcher = component_launcher_class.create(
component=component,
pipeline_info=pipeline.pipeline_info,
driver_args=driver_args,
metadata_connection=metadata_connection,
beam_pipeline_args=pipeline.beam_pipeline_args,
additional_pipeline_args=pipeline.additional_pipeline_args,
component_config=component_config)
logging.info('Component %s is running.', component.id)
component_launcher.launch()
logging.info('Component %s is finished.', component.id)
class KubernetesDagRunnerConfig(pipeline_config.PipelineConfig):
"""Runtime configuration parameters specific to execution on Kubernetes."""
def __init__(self,
tfx_image: Optional[str] = None,
supported_launcher_classes: Optional[List[Type[
base_component_launcher.BaseComponentLauncher]]] = None,
**kwargs):
"""Creates a KubernetesDagRunnerConfig object.
Args:
tfx_image: The TFX container image to use in the pipeline.
supported_launcher_classes: Optional list of component launcher classes
that are supported by the current pipeline. List sequence determines the
order in which launchers are chosen for each component being run.
**kwargs: keyword args for PipelineConfig.
"""
supported_launcher_classes = supported_launcher_classes or [
in_process_component_launcher.InProcessComponentLauncher,
kubernetes_component_launcher.KubernetesComponentLauncher,
]
super().__init__(
supported_launcher_classes=supported_launcher_classes, **kwargs)
self.tfx_image = tfx_image or _TFX_IMAGE
class KubernetesDagRunner(tfx_runner.TfxRunner):
"""TFX runner on Kubernetes."""
def __init__(self, config: Optional[KubernetesDagRunnerConfig] = None):
"""Initializes KubernetesDagRunner as a TFX orchestrator.
Args:
config: Optional pipeline config for customizing the launching of each
component. Defaults to pipeline config that supports
InProcessComponentLauncher and KubernetesComponentLauncher.
"""
if config is None:
config = KubernetesDagRunnerConfig()
super().__init__(config)
def run(self, pipeline: tfx_pipeline.Pipeline) -> None:
"""Deploys given logical pipeline on Kubernetes.
Args:
pipeline: Logical pipeline containing pipeline args and components.
"""
if not pipeline.pipeline_info.run_id:
pipeline.pipeline_info.run_id = datetime.datetime.now().isoformat()
if not kube_utils.is_inside_cluster():
kubernetes_remote_runner.run_as_kubernetes_job(
pipeline=pipeline, tfx_image=self._config.tfx_image)
return
# TODO(ericlege): Support running components in parallel.
ran_components = set()
# Runs component in topological order.
for component in pipeline.components:
# Verify that components are in topological order.
if hasattr(component, 'upstream_nodes') and component.upstream_nodes:
for upstream_node in component.upstream_nodes:
assert upstream_node in ran_components, ('Components is not in '
'topological order')
(component_launcher_class,
component_config) = config_utils.find_component_launch_info(
self._config, component)
# Check if the component is launchable as a container component.
if kubernetes_component_launcher.KubernetesComponentLauncher.can_launch(
component.executor_spec, component_config):
launch_container_component(component, component_launcher_class,
component_config, pipeline)
# Otherwise, the component should be launchable with the in process
# component launcher. wrap the component to a container component.
elif in_process_component_launcher.InProcessComponentLauncher.can_launch(
component.executor_spec, component_config):
wrapped_component = self._wrap_container_component(
component=component,
component_launcher_class=component_launcher_class,
component_config=component_config,
pipeline=pipeline)
# Component launch info is updated by wrapping the component into a
# container component. Therefore, these properties need to be reloaded.
(wrapped_component_launcher_class,
wrapped_component_config) = config_utils.find_component_launch_info(
self._config, wrapped_component)
launch_container_component(wrapped_component,
wrapped_component_launcher_class,
wrapped_component_config, pipeline)
else:
raise ValueError('Can not find suitable launcher for component.')
ran_components.add(component)
def _wrap_container_component(
self,
component: base_node.BaseNode,
component_launcher_class: Type[
base_component_launcher.BaseComponentLauncher],
component_config: Optional[base_component_config.BaseComponentConfig],
pipeline: tfx_pipeline.Pipeline,
) -> base_node.BaseNode:
"""Wrapper for container component.
Args:
component: Component to be executed.
component_launcher_class: The class of the launcher to launch the
component.
component_config: component config to launch the component.
pipeline: Logical pipeline that contains pipeline related information.
Returns:
A container component that runs the wrapped component upon execution.
"""
component_launcher_class_path = name_utils.get_full_name(
component_launcher_class)
serialized_component = json_utils.dumps(node_wrapper.NodeWrapper(component))
arguments = [
'--pipeline_name',
pipeline.pipeline_info.pipeline_name,
'--pipeline_root',
pipeline.pipeline_info.pipeline_root,
'--run_id',
pipeline.pipeline_info.run_id,
'--metadata_config',
json_format.MessageToJson(
message=get_default_kubernetes_metadata_config(),
preserving_proto_field_name=True),
'--beam_pipeline_args',
json.dumps(pipeline.beam_pipeline_args),
'--additional_pipeline_args',
json.dumps(pipeline.additional_pipeline_args),
'--component_launcher_class_path',
component_launcher_class_path,
'--serialized_component',
serialized_component,
'--component_config',
json_utils.dumps(component_config),
]
# Outputs/Parameters fields are not used as they are contained in
# the serialized component.
return container_component.create_container_component(
name=component.__class__.__name__,
outputs={},
parameters={},
image=self._config.tfx_image,
command=_CONTAINER_COMMAND + arguments)().with_id(component.id +
_WRAPPER_SUFFIX)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for align_transforms."""
import gin
import tensorflow as tf
from dedal import vocabulary
from dedal.data import align_transforms
class AlignTransformsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
gin.clear_config()
tf.random.set_seed(0)
self.vocab = vocabulary.alternative
self.sampler = vocabulary.Sampler(vocab=self.vocab)
def test_project_msa_rows(self):
token = '-'
seq1 = tf.convert_to_tensor(self.vocab.encode('XX--X-XX-X'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('Y-Y-Y-YYYY'), tf.int32)
mc = tf.convert_to_tensor([False] + 8 * [True] + [False], tf.bool)
proj_msa_fn = align_transforms.ProjectMSARows(token=token, vocab=self.vocab)
out1, out2 = proj_msa_fn.call(seq1, seq2)
self.assertAllEqual(out1, self.vocab.encode('XX-XXX-X'))
self.assertAllEqual(out2, self.vocab.encode('Y-YYYYYY'))
out1, out2 = proj_msa_fn.call(seq1, seq2, mc)
self.assertAllEqual(out1, self.vocab.encode('X-XXX-'))
self.assertAllEqual(out2, self.vocab.encode('-YYYYY'))
def test_pid1(self):
token = '-'
pid_fn = align_transforms.PID(definition=1, token=token, vocab=self.vocab)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 1.0)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YXXXYXYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.5)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXYYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YXYXXYYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.0)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-XXY'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.8)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-YYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.2)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY----'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-YYXYXXX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.2)
def test_pid3(self):
token = '-'
pid_fn = align_transforms.PID(definition=3, token=token, vocab=self.vocab)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 1.0)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YXXXYXYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.5)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXYYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YXYXXYYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.0)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-XXY'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 1.0)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-YYX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.25)
seq1 = tf.convert_to_tensor(self.vocab.encode('XYXXYXXY----'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('---X-YYXYXXX'), tf.int32)
pid = pid_fn.call(seq1, seq2)
self.assertEqual(pid, 0.125)
def test_create_alignment_targets(self):
gap_token = '-'
n_prepend_tokens = 0
align_fn = align_transforms.CreateAlignmentTargets(
gap_token=gap_token,
n_prepend_tokens=n_prepend_tokens,
vocab=self.vocab)
seq1 = tf.convert_to_tensor(self.vocab.encode('XX-XXXX'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YYYY-YY'), tf.int32)
expected_output = tf.convert_to_tensor([[1, 2, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 4, 5, 6],
[0, 1, 4, 2, 6, 3, 1]], tf.int32)
output = align_fn.call(seq1, seq2)
self.assertAllEqual(output, expected_output)
seq1 = tf.convert_to_tensor(self.vocab.encode('--XXXXXX'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('YYYY-YY-'), tf.int32)
expected_output = tf.convert_to_tensor([[1, 2, 3, 4, 5],
[3, 4, 4, 5, 6],
[0, 1, 6, 3, 1]], tf.int32)
output = align_fn.call(seq1, seq2)
self.assertAllEqual(output, expected_output)
seq1 = tf.convert_to_tensor(self.vocab.encode('X-X-X-X-'), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode('-Y-Y-Y-Y'), tf.int32)
expected_output = tf.zeros([3, 0], tf.int32)
output = align_fn.call(seq1, seq2)
self.assertAllEqual(output, expected_output)
def test_create_homology_targets(self):
hom_fn = align_transforms.CreateHomologyTargets()
values = tf.repeat([0, 1, 2, 3], 2)
expected_output = tf.convert_to_tensor([1, 1, 1, 1, 0, 0, 0, 0], tf.float32)
expected_output = expected_output[:, tf.newaxis]
self.assertAllEqual(hom_fn.call(values), expected_output)
values = tf.repeat([0, 1, 2, 0], 2)
expected_output = tf.convert_to_tensor([1, 1, 1, 1, 1, 0, 0, 0], tf.float32)
expected_output = expected_output[:, tf.newaxis]
self.assertAllEqual(hom_fn.call(values), expected_output)
hom_fn = align_transforms.CreateHomologyTargets(process_negatives=False)
self.assertAllEqual(hom_fn.call(values),
tf.ones([len(values) // 2, 1], tf.float32))
def test_create_batched_weights(self):
b, l = 32, 64
mock_targets = self.sampler.sample([b])
weights_fn = align_transforms.CreateBatchedWeights()
weights = weights_fn.call(mock_targets)
self.assertAllEqual(weights, tf.ones(b, tf.float32))
mock_targets = self.sampler.sample([b, 3, l])
weights_fn = align_transforms.CreateBatchedWeights()
weights = weights_fn.call(mock_targets)
self.assertAllEqual(weights, tf.ones(b, tf.float32))
def test_pad_negative_pairs(self):
b, l = 32, 64
pad_fn = align_transforms.PadNegativePairs()
mock_targets = self.sampler.sample([b])
targets = pad_fn.call(mock_targets)
self.assertAllEqual(targets[:b], mock_targets)
self.assertAllEqual(targets[b:], tf.zeros_like(mock_targets))
mock_targets = self.sampler.sample([b, 3, l])
targets = pad_fn.call(mock_targets)
self.assertAllEqual(targets[:b], mock_targets)
self.assertAllEqual(targets[b:], tf.zeros_like(mock_targets))
value = -1
pad_fn = align_transforms.PadNegativePairs(value=value)
mock_targets = tf.ones([b], tf.float32)
targets = pad_fn.call(mock_targets)
self.assertAllEqual(targets[:b], mock_targets)
self.assertAllEqual(targets[b:], value * tf.ones_like(mock_targets))
def test_add_random_tails(self):
seq1 = 'ACG----AATGGCACC--CTAA---'
seq2 = '---GGGTAA-GGTACCTACT--TCG'
seq1 = tf.convert_to_tensor(self.vocab.encode(seq1), tf.int32)
seq2 = tf.convert_to_tensor(self.vocab.encode(seq2), tf.int32)
add_random_tails = align_transforms.AddRandomTails()
out_seq1, out_seq2 = add_random_tails.call(seq1, seq2)
start_pos1 = self.vocab.decode(out_seq1).find(self.vocab.decode(seq1))
start_pos2 = self.vocab.decode(out_seq2).find(self.vocab.decode(seq2))
# Verifies that seq1 (resp. seq2) is contained in out_seq1 (resp. out_seq2).
self.assertNotEqual(start_pos1, -1)
self.assertNotEqual(start_pos2, -1)
# Verifies alignment targets are shifted by the right offset.
create_alignment_targets = align_transforms.CreateAlignmentTargets()
alg_tar = create_alignment_targets.call(seq1, seq2)
out_alg_tar = create_alignment_targets.call(out_seq1, out_seq2)
self.assertAllEqual(out_alg_tar[0] - alg_tar[0],
alg_tar.shape[1] * [start_pos1])
self.assertAllEqual(out_alg_tar[1] - alg_tar[1],
alg_tar.shape[1] * [start_pos2])
self.assertAllEqual(out_alg_tar[2] - alg_tar[2],
alg_tar.shape[1] * [0]) # States unchanged.
def test_add_alignment_context(self):
sequence_1 = 'AATGGCACC--CT'
sequence_2 = 'AA-GGTACCTACT'
full_sequence_1 = 'ACG' + sequence_1.replace('-', '') + 'AA'
full_sequence_2 = 'GGGT' + sequence_2.replace('-', '') + 'TCG'
sequence_1 = tf.convert_to_tensor(self.vocab.encode(sequence_1), tf.int32)
sequence_2 = tf.convert_to_tensor(self.vocab.encode(sequence_2), tf.int32)
full_sequence_1 = tf.convert_to_tensor(
self.vocab.encode(full_sequence_1), tf.int32)
full_sequence_2 = tf.convert_to_tensor(
self.vocab.encode(full_sequence_2), tf.int32)
start_1, end_1 = 4, 14
start_2, end_2 = 5, 16
add_alignment_context = align_transforms.AddAlignmentContext()
sequence_with_ctx_1, sequence_with_ctx_2 = add_alignment_context.call(
sequence_1, sequence_2, full_sequence_1, full_sequence_2,
start_1, start_2, end_1, end_2)
self.assertEqual(len(sequence_with_ctx_1), len(sequence_with_ctx_2))
self.assertIn(self.vocab.decode(sequence_with_ctx_1),
self.vocab.decode(full_sequence_1))
self.assertIn(self.vocab.decode(sequence_with_ctx_2),
self.vocab.decode(full_sequence_2))
create_alignment_targets = align_transforms.CreateAlignmentTargets()
targets = create_alignment_targets.call(sequence_1, sequence_2)
targets_with_ctx = create_alignment_targets.call(
sequence_with_ctx_1, sequence_with_ctx_2)
find_1 = self.vocab.decode(sequence_with_ctx_1).find(
self.vocab.decode(sequence_1))
find_2 = self.vocab.decode(sequence_with_ctx_2).find(
self.vocab.decode(sequence_2))
self.assertAllEqual(targets_with_ctx[0], targets[0] + find_1)
self.assertAllEqual(targets_with_ctx[1], targets[1] + find_2)
self.assertAllEqual(targets_with_ctx[2], targets[2])
def test_trim_alignment(self):
sequence_1 = 'ACG----AATGGCACC--CTAA---'
sequence_2 = '---GGGTAA-GGTACCTACT--TCG'
sequence_1 = tf.convert_to_tensor(self.vocab.encode(sequence_1), tf.int32)
sequence_2 = tf.convert_to_tensor(self.vocab.encode(sequence_2), tf.int32)
trim_alignment = align_transforms.TrimAlignment(p_trim=1.0)
trimmed_sequence_1, trimmed_sequence_2 = trim_alignment.call(
sequence_1, sequence_2)
self.assertEqual(len(trimmed_sequence_1), len(trimmed_sequence_2))
self.assertIn(self.vocab.decode(trimmed_sequence_1),
self.vocab.decode(sequence_1))
self.assertIn(self.vocab.decode(trimmed_sequence_2),
self.vocab.decode(sequence_2))
def get_match_subsequences(seq1, seq2):
mask = tf.logical_and(self.vocab.compute_mask(seq1, '-'),
self.vocab.compute_mask(seq2, '-'))
match_indices = tf.reshape(tf.where(mask), [-1])
match_seq_1 = self.vocab.decode(tf.gather(seq1, match_indices))
match_seq_2 = self.vocab.decode(tf.gather(seq2, match_indices))
return match_seq_1, match_seq_2
matches_1, matches_2 = get_match_subsequences(sequence_1, sequence_2)
trimmed_matches_1, trimmed_matches_2 = get_match_subsequences(
trimmed_sequence_1, trimmed_sequence_2)
self.assertIn(trimmed_matches_1, matches_1)
self.assertIn(trimmed_matches_2, matches_2)
trim_alignment = align_transforms.TrimAlignment(p_trim=0.0)
trimmed_sequence_1, trimmed_sequence_2 = trim_alignment.call(
sequence_1, sequence_2)
self.assertAllEqual(trimmed_sequence_1, sequence_1)
self.assertAllEqual(trimmed_sequence_2, sequence_2)
class AlignDatasetTransformsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
tf.random.set_seed(0)
self.vocab = vocabulary.alternative
self.sampler = vocabulary.Sampler(vocab=self.vocab)
def test_stratified_sampling_pairing(self):
n_steps = 25
seq = self.sampler.sample([24, 30])
cla_key = tf.repeat([0, 1, 2], 8)
fam_key = tf.repeat([0, 1, 2, 3, 4, 5], 4)
clu_key = tf.concat(
[tf.tile([0, 0, 1, 1], [3]), tf.tile([0, 1, 2, 3], [3])], 0)
ds_in = tf.data.Dataset.from_tensor_slices({
'seq': seq,
'cla_key': cla_key,
'fam_key': fam_key,
'clu_key': clu_key
})
pair_examples = align_transforms.StratifiedSamplingPairing(
index_keys=('fam_key', 'clu_key'), branch_key='clu_key')
ds_out = ds_in.apply(pair_examples)
for ex in ds_out.take(n_steps):
self.assertEqual(ex['cla_key_1'], ex['cla_key_2'])
self.assertEqual(ex['fam_key_1'], ex['fam_key_2'])
self.assertNotEqual(ex['clu_key_1'], ex['clu_key_2'])
pair_examples = align_transforms.StratifiedSamplingPairing(
index_keys=('cla_key', 'fam_key', 'clu_key'), branch_key='clu_key')
ds_out = ds_in.apply(pair_examples)
for ex in ds_out.take(n_steps):
self.assertEqual(ex['cla_key_1'], ex['cla_key_2'])
self.assertEqual(ex['fam_key_1'], ex['fam_key_2'])
self.assertNotEqual(ex['clu_key_1'], ex['clu_key_2'])
pair_examples = align_transforms.StratifiedSamplingPairing(
index_keys=('cla_key', 'fam_key', 'clu_key'), branch_key='fam_key')
ds_out = ds_in.apply(pair_examples)
for ex in ds_out.take(n_steps):
self.assertEqual(ex['cla_key_1'], ex['cla_key_2'])
self.assertNotEqual(ex['fam_key_1'], ex['fam_key_2'])
pair_examples = align_transforms.StratifiedSamplingPairing(
index_keys=('cla_key', 'fam_key', 'clu_key'), branch_key='cla_key')
ds_out = ds_in.apply(pair_examples)
for ex in ds_out.take(n_steps):
self.assertNotEqual(ex['cla_key_1'], ex['cla_key_2'])
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import numpy as np
import pylab as pl
from pylab import *
from scipy import *
from scipy import optimize
#v = np.array(v)
#m_inf = np.array(m_inf)
#tau = np.array(tau)
#alpha = m_inf/tau
#beta = (1-tau*alpha)/(tau)
#v= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60.]
#alpha = [0.12, 0.19, 0.28, 0.36, 0.44, 0.51, 0.59, 0.67, 0.74, 0.82]
#beta = [0.54, 0.36, 0.24, 0.15, 0.08, 0.04, 0.02, 0.01, 0.01, 0. ]
v= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60. ]
alpha= [0., 0., 0.02, 0.11, 0.31, 0.57, 0.82, 1.13, 1.42, 1.7]
beta= [3.95, 2.36, 1.42, 0.85, 0.43, 0.25, 0.18, 0.11, 0.07, 0.04]
tau= [0.25, 0.42, 0.69, 1.05, 1.35, 1.21, 1., 0.81, 0.67, 0.57]
inf= [0., 0., 0.02, 0.11, 0.42, 0.69, 0.82, 0.91, 0.95, 0.98]
v= [-100. , -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60. ]
alpha= [0., 0., 0.04, 0.14, 0.31, 0.5, 0.73, 1.06, 1.34, 1.7]
beta= [4.34, 2.36, 1.42, 0.7, 0.37, 0.22, 0.17, 0.11, 0.06, 0.04]
#Tau: [0.23 0.42 0.68 1.19 1.46 1.38 1.11 0.86 0.71 0.57]
#Inf: [0. 0. 0.03 0.16 0.46 0.69 0.81 0.91 0.95 0.98]
Index= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60. ]
Alpha= [0., 0., 0.04, 0.2, 0.68, 1.95, 3.11, 4.69, 6.36, 8. ]
Beta= [4.08, 3.98, 3.94, 3.79, 2.98, 1.05, 0.32, 0.09, 0., 0.03]
Index= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60. ]
Alpha= [ 9.61e+00, 2.53e+00, 9.44e-01, 3.43e-01, 5.89e-02, 1.24e-02, 1.51e-03, 0.00e+00, 0.00e+00, 0.00e+00]
Beta= [0., 0., 0.02, 0.04, 0.13, 0.28, 0.69, 2.73, 3.9, 4. ]
Index= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60. ]
Alpha= [ 2.85e+00, 1.67e+00, 9.44e-01, 3.43e-01, 8.38e-02, 1.48e-02, -1.18e-03, 0.00e+00, 0.00e+00, 0.00e+00]
Beta= [0., 0., 0.02, 0.04, 0.13, 0.28, 0.69, 2.73, 3.9, 4. ]
Index = [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60.]
Alpha = [ 8.95e+00, 1.85e+00, 9.58e-01, 4.45e-01, 1.01e-01, 2.02e-02, 2.02e-02, 6.75e-03, 6.75e-03, 3.33e-16]
Beta = [0., 0., 0., 0.01, 0.11, 0.32, 0.73, 2.75, 3.67, 4.]
Index= [-100., -82.22, -64.44, -46.67, -28.89, -11.11, 6.67, 24.44, 42.22, 60.]
Alpha= [0., 0., 0.04, 0.2, 0.72, 2.24, 5.24, 7.63, 8.33, 8.33]
Beta= [3.85, 3.85, 3.81, 3.8, 3.44, 2.31, 0.64, 0.06, 0., 0.]
inf= """
-100.00 1.00
-82.22 1.00
-64.44 1.00
-46.67 0.98
-28.89 0.49
-11.11 0.06
6.67 0.03
24.44 0.00
42.22 0.00
60.00 0.00
"""
tau = """
-100.00 0.11
-82.22 0.53
-64.44 1.04
-46.67 2.21
-28.89 4.82
-11.11 2.96
6.67 1.33
24.44 0.36
42.22 0.27
60.00 0.25
"""
inf = [float(i) for i in inf.split()]
tau = [float(i) for i in tau.split()]
assert len(inf) == len(tau) == 20
v = inf[0::2]
inf = inf[1::2]
tau = tau[1::2]
Alpha = np.array(inf)/np.array(tau)
Beta = (1-np.array(tau) * Alpha) /np.array(tau)
v = Index
alpha = Alpha
beta = Beta
v = np.array(v)
alpha = np.array(alpha)
beta = np.array(beta)
m = v > -80
v =v[m]
alpha=alpha[m]
beta=beta[m]
v_fit = np.linspace(-100, 60, 100)
#print alpha,
#print beta
#assert False
def fitfunc(p, x):
#return (p[0])/(p[2] + np.exp((p[3]+x)/p[4]))
return (p[0])/(p[2] + np.exp((p[3]+x)/p[4]))
return (p[0] + p[1]*x)/(p[2] + np.exp((p[3]+x)/p[4]))
def errfunc(p, x, y):
dist = fitfunc(p, x) - y
#print fitfunc(p, v_fit)
if np.isnan(fitfunc(p, v_fit)).any():
assert False
return 1000
r = np.dot(dist.T, dist)
return r
# Distance to the target function
#p0_alpha = [0.095, 0.000, 1.0, 1.0, -8]
#p0_alpha = [0.461973318, 0.00820458521, 4.59367292, -4.20812882, -11.9678988]
p0_alpha=[5.06, 0.07, 5.12, -18.4, -25.42]
p0_alpha = [ 1.23807353e-01, 1.63897392e-03, -1.05356548e-01, 2.98171398e+01, 2.02269962e+01]
p0_alpha = [0.18881081, -0.00578191, 0.05032903, 0.00791992, 3.19731029]
p0_alpha = [1.26585951e-05, -7.64869304e-03, 1.00209483e-06, 1.29657042e-02, 6.77722948e+00]
p0_alpha = [1.22452064e-02, -1.62282471e-03, -1.41915248e-03, 2.22988192e+00, 2.95280879e+01]
p1_alpha = optimize.fmin(errfunc, np.array(p0_alpha), args=(v, alpha))
print 'P1-Alpha', p1_alpha
p0_beta = [ 0.2, 0.003, -8.95334802e-02, -1.0, 25]
#p0_beta = [0.12, 0.0, 2.0, 30, 9.0]
p0_beta = [ 1.23807353e-01, 1.63897392e-03, -1.05356548e-01, 2.98171398e+01, 2.02269962e+01]
p0_beta=[5.06, 0.07, 5.12, -18.4, -25.42]
p0_beta=[5.06, 0.00, 1, 0, 1]
p0_beta = [0.18881081, -0.00578191, 0.05032903, 0.00791992, 3.19731029]
p0_beta = [ 4.69173146e-01, 2.95636835e-03, 1.17652953e-01, 4.80687239e-02, -8.43859689e+00]
p1_beta = optimize.fmin(errfunc, np.array(p0_beta), args=(v, beta))
print 'P1-Beta', p1_beta
v_fit = np.linspace(-100, 60, 100)
alpha_fit = fitfunc(p1_alpha, v_fit)
beta_fit = fitfunc(p1_beta, v_fit)
pl.plot(v, alpha, 'r', label='Orig')
pl.plot(v_fit, alpha_fit, 'b', label='Fit')
pl.legend()
pl.figure()
pl.plot(v, beta, 'r', label='Orig')
pl.plot(v_fit, beta_fit, 'b', label='Fit')
pl.legend()
pl.show()
#pl.show()
|
|
import json, re, shutil, errno, os
from os.path import join
from twisted.internet.defer import Deferred
from twisted.web.resource import NoResource, ErrorPage
from twisted.web.server import NOT_DONE_YET
from .errors import BaseError, BaseHTTPError, BadRequest
from .projecttemplates import templates
from .resource import SlydJsonResource
from .utils.copy import FileSystemSpiderCopier
from .utils.download import FileSystemProjectArchiver
# stick to alphanum . and _. Do not allow only .'s (so safe for FS path)
_INVALID_PROJECT_RE = re.compile('[^A-Za-z0-9._]|^\.*$')
def create_projects_manager_resource(spec_manager):
return ProjectsManagerResource(spec_manager)
class ProjectsManagerResource(SlydJsonResource):
def __init__(self, spec_manager):
SlydJsonResource.__init__(self)
self.spec_manager = spec_manager
def getChildWithDefault(self, project_path_element, request):
auth_info = request.auth_info
if ('authorized_projects' not in auth_info or
auth_info.get('staff', False) or
project_path_element in auth_info['authorized_projects']):
request.project = project_path_element
try:
next_path_element = request.postpath.pop(0)
except IndexError:
next_path_element = None
if next_path_element not in self.children:
raise NoResource("No such child resource.")
request.prepath.append(project_path_element)
return self.children[next_path_element]
else:
return ErrorPage(
403, "Forbidden", "You don't have access to this project.")
def handle_project_command(self, projects_manager, command_spec):
command = command_spec.get('cmd')
dispatch_func = projects_manager.project_commands.get(command)
if dispatch_func is None:
self.bad_request(
"Unrecognised command %s, available commands: %s." %
(command, ', '.join(projects_manager.project_commands.keys())))
args = command_spec.get('args', [])
try:
retval = dispatch_func(*args)
except TypeError:
self.bad_request("Incorrect arguments for command %s." % command)
except OSError as ex:
if ex.errno == errno.ENOENT:
self.not_found()
elif ex.errno == errno.EEXIST or ex.errno == errno.ENOTEMPTY:
self.bad_request("A project with that name already exists.")
raise
except BaseError as ex:
self.error(ex.status, ex.title, ex.body)
else:
return retval or ''
return ''
def render_GET(self, request):
project_manager = self.spec_manager.project_manager(request.auth_info)
request.write(json.dumps(sorted(project_manager.list_projects())))
return '\n'
def render_POST(self, request):
def finish_request(val):
if modifier:
val = modifier(request, obj, val)
val and request.write(val)
request.finish()
def request_failed(failure):
request.setResponseCode(500)
request.write(failure.getErrorMessage())
request.finish()
return failure
project_manager = self.spec_manager.project_manager(request.auth_info)
project_manager.request = request
obj = self.read_json(request)
try:
retval = self.handle_project_command(project_manager, obj)
modifier = project_manager.modify_request.get(obj.get('cmd'))
if isinstance(retval, Deferred):
retval.addCallbacks(finish_request, request_failed)
return NOT_DONE_YET
else:
if modifier:
retval = modifier(request, obj, retval)
return retval
except BaseHTTPError as ex:
self.error(ex.status, ex.title, ex.body)
def allowed_project_name(name):
return not _INVALID_PROJECT_RE.search(name)
class ProjectsManager(object):
base_dir = '.'
@classmethod
def setup(cls, location):
cls.base_dir = location
def __init__(self, auth_info):
self.auth_info = auth_info
self.user = auth_info['username']
self.projectsdir = ProjectsManager.base_dir
self.modify_request = {
'download': self._render_file
}
self.project_commands = {
'create': self.create_project,
'mv': self.rename_project,
'rm': self.remove_project,
'copy': self.copy_data,
'download': self.download_project
}
def all_projects(self):
try:
for fname in os.listdir(self.projectsdir):
if os.path.isdir(join(self.projectsdir, fname)):
yield fname
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
def list_projects(self):
if 'authorized_projects' in self.auth_info:
return self.auth_info['authorized_projects']
else:
return self.all_projects()
def create_project(self, name):
self.validate_project_name(name)
project_filename = self.project_filename(name)
os.makedirs(project_filename)
with open(join(project_filename, 'project.json'), 'wb') as outf:
outf.write(templates['PROJECT'])
with open(join(project_filename, 'scrapy.cfg'), 'w') as outf:
outf.write(templates['SCRAPY'])
with open(join(project_filename, 'setup.py'), 'w') as outf:
outf.write(templates['SETUP'] % name)
with open(join(project_filename, 'items.json'), 'w') as outf:
outf.write(templates['ITEMS'])
os.makedirs(join(project_filename, 'spiders'))
init_py = join(project_filename, 'spiders', '__init__.py')
with open(init_py, 'w') as outf:
outf.write('')
settings_py = join(project_filename, 'spiders', 'settings.py')
with open(settings_py, 'w') as outf:
outf.write(templates['SETTINGS'])
def rename_project(self, from_name, to_name):
self.validate_project_name(from_name)
self.validate_project_name(to_name)
os.rename(self.project_filename(from_name),
self.project_filename(to_name))
def remove_project(self, name):
shutil.rmtree(self.project_filename(name))
def project_filename(self, name):
return join(self.projectsdir, name)
def validate_project_name(self, name):
if not allowed_project_name(name):
raise BadRequest('Bad Request', 'Invalid project name %s.' % name)
def copy_data(self, source, destination, spiders, items):
copier = FileSystemSpiderCopier(source, destination, self.projectsdir)
return json.dumps(copier.copy(spiders, items))
def download_project(self, name, spiders=None, version=None):
archiver = FileSystemProjectArchiver(name, base_dir=self.projectsdir)
return archiver.archive(spiders).read()
def _render_file(self, request, request_data, body):
name = request_data.get('args')[0].encode('utf-8')
request.setHeader('Content-Type', 'application/zip')
request.setHeader('Content-Disposition', 'attachment; '
'filename="%s.zip"' % name)
request.setHeader('Content-Length', len(body))
return body
|
|
from __future__ import absolute_import
import responses
import sentry
from mock import MagicMock
from six.moves.urllib.parse import urlencode, urlparse
from sentry.constants import ObjectStatus
from sentry.integrations.github import GitHubIntegrationProvider
from sentry.models import Integration, OrganizationIntegration, Repository, Project
from sentry.plugins import plugins
from sentry.testutils import IntegrationTestCase
from tests.sentry.plugins.testutils import register_mock_plugins, unregister_mock_plugins
class GitHubIntegrationTest(IntegrationTestCase):
provider = GitHubIntegrationProvider
base_url = "https://api.github.com"
def setUp(self):
super(GitHubIntegrationTest, self).setUp()
self.installation_id = "install_1"
self.user_id = "user_1"
self.app_id = "app_1"
self.access_token = "xxxxx-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"
self.expires_at = "3000-01-01T00:00:00Z"
self._stub_github()
register_mock_plugins()
def tearDown(self):
unregister_mock_plugins()
super(GitHubIntegrationTest, self).tearDown()
def _stub_github(self):
responses.reset()
sentry.integrations.github.integration.get_jwt = MagicMock(return_value="jwt_token_1")
sentry.integrations.github.client.get_jwt = MagicMock(return_value="jwt_token_1")
responses.add(
responses.POST,
self.base_url + "/installations/{}/access_tokens".format(self.installation_id),
json={"token": self.access_token, "expires_at": self.expires_at},
)
responses.add(
responses.GET,
self.base_url + "/installation/repositories",
json={
"repositories": [
{"id": 1296269, "name": "foo", "full_name": "Test-Organization/foo"},
{"id": 9876574, "name": "bar", "full_name": "Test-Organization/bar"},
]
},
)
responses.add(
responses.GET,
self.base_url + "/app/installations/{}".format(self.installation_id),
json={
"id": self.installation_id,
"app_id": self.app_id,
"account": {
"login": "Test Organization",
"avatar_url": "http://example.com/avatar.png",
"html_url": "https://github.com/Test-Organization",
"type": "Organization",
},
},
)
responses.add(responses.GET, self.base_url + "/repos/Test-Organization/foo/hooks", json=[])
def assert_setup_flow(self):
resp = self.client.get(self.init_path)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "github.com"
assert redirect.path == "/apps/sentry-test-app"
# App installation ID is provided
resp = self.client.get(
u"{}?{}".format(self.setup_path, urlencode({"installation_id": self.installation_id}))
)
auth_header = responses.calls[0].request.headers["Authorization"]
assert auth_header == "Bearer jwt_token_1"
self.assertDialogSuccess(resp)
return resp
@responses.activate
def test_plugin_migration(self):
accessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="github",
external_id=123,
config={"name": "Test-Organization/foo"},
)
inaccessible_repo = Repository.objects.create(
organization_id=self.organization.id,
name="Not-My-Org/other",
provider="github",
external_id=321,
config={"name": "Not-My-Org/other"},
)
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
# Updates the existing Repository to belong to the new Integration
assert Repository.objects.get(id=accessible_repo.id).integration_id == integration.id
# Doesn't touch Repositories not accessible by the new Integration
assert Repository.objects.get(id=inaccessible_repo.id).integration_id is None
@responses.activate
def test_basic_flow(self):
with self.tasks():
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
assert integration.external_id == self.installation_id
assert integration.name == "Test Organization"
assert integration.metadata == {
"access_token": None,
# The metadata doesn't get saved with the timezone "Z" character
# for some reason, so just compare everything but that.
"expires_at": None,
"icon": "http://example.com/avatar.png",
"domain_name": "github.com/Test-Organization",
"account_type": "Organization",
}
oi = OrganizationIntegration.objects.get(
integration=integration, organization=self.organization
)
assert oi.config == {}
@responses.activate
def test_reinstall_flow(self):
self._stub_github()
self.assert_setup_flow()
integration = Integration.objects.get(provider=self.provider.key)
integration.update(status=ObjectStatus.DISABLED)
assert integration.status == ObjectStatus.DISABLED
assert integration.external_id == self.installation_id
resp = self.client.get(
u"{}?{}".format(self.init_path, urlencode({"reinstall_id": integration.id}))
)
assert resp.status_code == 302
redirect = urlparse(resp["Location"])
assert redirect.scheme == "https"
assert redirect.netloc == "github.com"
assert redirect.path == "/apps/sentry-test-app"
# New Installation
self.installation_id = "install_2"
self._stub_github()
resp = self.client.get(
u"{}?{}".format(self.setup_path, urlencode({"installation_id": self.installation_id}))
)
assert resp.status_code == 200
auth_header = responses.calls[0].request.headers["Authorization"]
assert auth_header == "Bearer jwt_token_1"
integration = Integration.objects.get(provider=self.provider.key)
assert integration.status == ObjectStatus.VISIBLE
assert integration.external_id == self.installation_id
@responses.activate
def test_disable_plugin_when_fully_migrated(self):
self._stub_github()
project = Project.objects.create(organization_id=self.organization.id)
plugin = plugins.get("github")
plugin.enable(project)
# Accessible to new Integration - mocked in _stub_github
Repository.objects.create(
organization_id=self.organization.id,
name="Test-Organization/foo",
url="https://github.com/Test-Organization/foo",
provider="github",
external_id="123",
config={"name": "Test-Organization/foo"},
)
# Enabled before
assert "github" in [p.slug for p in plugins.for_project(project)]
with self.tasks():
self.assert_setup_flow()
# Disabled after Integration installed
assert "github" not in [p.slug for p in plugins.for_project(project)]
@responses.activate
def test_get_repositories_search_param(self):
with self.tasks():
self.assert_setup_flow()
responses.add(
responses.GET,
self.base_url + "/search/repositories?q=org:test%20ex",
json={
"items": [
{"name": "example", "full_name": "test/example"},
{"name": "exhaust", "full_name": "test/exhaust"},
]
},
)
integration = Integration.objects.get(provider=self.provider.key)
installation = integration.get_installation(self.organization)
result = installation.get_repositories("ex")
assert result == [
{"identifier": "test/example", "name": "example"},
{"identifier": "test/exhaust", "name": "exhaust"},
]
|
|
# -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame, Series
import pandas as pd
from numpy import nan
import numpy as np
from pandas.util.testing import assert_frame_equal
from pandas import get_dummies
import pandas.util.testing as tm
from pandas.compat import u
class TestGetDummies(object):
@pytest.fixture
def df(self):
return DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
@pytest.fixture(params=['uint8', 'i8', np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=['dense', 'sparse'])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == 'sparse'
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype='object')
def test_basic(self, sparse, dtype):
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype))
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
assert_frame_equal(result, expected)
def test_basic_types(self, sparse, dtype):
# GH 10531
s_list = list('abc')
s_series = Series(s_list)
s_df = DataFrame({'a': [0, 1, 0, 1, 2],
'b': ['A', 'A', 'B', 'C', 'C'],
'c': [2, 3, 3, 3, 2]})
expected = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list('abc'))
if not sparse:
compare = tm.assert_frame_equal
else:
expected = expected.to_sparse(fill_value=0, kind='integer')
compare = tm.assert_sp_frame_equal
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
compare(result, expected)
result = get_dummies(s_df, columns=s_df.columns,
sparse=sparse, dtype=dtype)
tm.assert_series_equal(result.get_dtype_counts(),
Series({dtype.name: 8}))
result = get_dummies(s_df, columns=['a'], sparse=sparse, dtype=dtype)
dtype_name = self.effective_dtype(dtype).name
expected_counts = {'int64': 1, 'object': 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_values()
tm.assert_series_equal(result.get_dtype_counts().sort_values(),
expected)
def test_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=['A'])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ['A']
def test_include_na(self, sparse, dtype):
if sparse:
pytest.xfail(reason='nan in index is problematic (GH 16894)')
s = ['a', 'b', np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame({'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame({nan: [0, 0, 1],
'a': [1, 0, 0],
'b': [0, 1, 0]},
dtype=self.effective_dtype(dtype))
exp_na = exp_na.reindex(['a', 'b', nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True,
sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(Series(1, index=[0]), columns=[nan],
dtype=self.effective_dtype(dtype))
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = 'e'
eacute = unicodedata.lookup('LATIN SMALL LETTER E WITH ACUTE')
s = [e, eacute, eacute]
res = get_dummies(s, prefix='letter', sparse=sparse)
exp = DataFrame({'letter_e': [1, 0, 0],
u('letter_%s') % eacute: [0, 1, 1]},
dtype=np.uint8)
assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, sparse=sparse)
expected = DataFrame({'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_mix_default(self, df, sparse, dtype):
result = get_dummies(df, sparse=sparse, dtype=dtype)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1]})
cols = ['A_a', 'A_b', 'B_b', 'B_c']
expected[cols] = expected[cols].astype(dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_list(self, df, sparse):
prefixes = ['from_A', 'from_B']
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'from_A_a', 'from_A_b',
'from_B_b', 'from_B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_str(self, df, sparse):
# not that you should do this...
result = get_dummies(df, prefix='bad', sparse=sparse)
bad_columns = ['bad_a', 'bad_b', 'bad_b', 'bad_c']
expected = DataFrame([[1, 1, 0, 1, 0],
[2, 0, 1, 1, 0],
[3, 1, 0, 0, 1]],
columns=['C'] + bad_columns,
dtype=np.uint8)
expected = expected.astype({"C": np.int64})
assert_frame_equal(result, expected)
def test_dataframe_dummies_subset(self, df, sparse):
result = get_dummies(df, prefix=['from_A'], columns=['A'],
sparse=sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]}, dtype=np.uint8)
expected[['C']] = df[['C']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_sep(self, df, sparse):
result = get_dummies(df, prefix_sep='..', sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A..a': [1, 0, 1],
'A..b': [0, 1, 0],
'B..b': [1, 1, 0],
'B..c': [0, 0, 1]},
dtype=np.uint8)
expected[['C']] = df[['C']]
expected = expected[['C', 'A..a', 'A..b', 'B..b', 'B..c']]
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep=['..', '__'], sparse=sparse)
expected = expected.rename(columns={'B..b': 'B__b', 'B..c': 'B__c'})
assert_frame_equal(result, expected)
result = get_dummies(df, prefix_sep={'A': '..', 'B': '__'},
sparse=sparse)
assert_frame_equal(result, expected)
def test_dataframe_dummies_prefix_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix=['too few'], sparse=sparse)
def test_dataframe_dummies_prefix_sep_bad_length(self, df, sparse):
with pytest.raises(ValueError):
get_dummies(df, prefix_sep=['bad'], sparse=sparse)
def test_dataframe_dummies_prefix_dict(self, sparse):
prefixes = {'A': 'from_A', 'B': 'from_B'}
df = DataFrame({'A': ['a', 'b', 'a'],
'B': ['b', 'b', 'c'],
'C': [1, 2, 3]})
result = get_dummies(df, prefix=prefixes, sparse=sparse)
expected = DataFrame({'from_A_a': [1, 0, 1],
'from_A_b': [0, 1, 0],
'from_B_b': [1, 1, 0],
'from_B_c': [0, 0, 1],
'C': [1, 2, 3]})
columns = ['from_A_a', 'from_A_b', 'from_B_b', 'from_B_c']
expected[columns] = expected[columns].astype(np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_na(self, df, sparse, dtype):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True,
sparse=sparse, dtype=dtype).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_a': [1, 0, 1, 0],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_b': [1, 1, 0, 0],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]}).sort_index(axis=1)
e_dtype = self.effective_dtype(dtype)
columns = ['A_a', 'A_b', 'A_nan', 'B_b', 'B_c', 'B_nan']
expected[columns] = expected[columns].astype(e_dtype)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, sparse=sparse, dtype=dtype)
expected = expected[['C', 'A_a', 'A_b', 'B_b', 'B_c']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_with_categorical(self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, sparse=sparse, dtype=dtype).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3],
'A_a': [1, 0, 1],
'A_b': [0, 1, 0],
'B_b': [1, 1, 0],
'B_c': [0, 0, 1],
'cat_x': [1, 0, 0],
'cat_y': [0, 1, 1]}).sort_index(axis=1)
columns = ['A_a', 'A_b', 'B_b', 'B_c', 'cat_x', 'cat_y']
effective_dtype = self.effective_dtype(dtype)
expected[columns] = expected[columns].astype(effective_dtype)
expected.sort_index(axis=1)
assert_frame_equal(result, expected)
def test_basic_drop_first(self, sparse):
# GH12402 Add a new parameter `drop_first` to avoid collinearity
# Basic case
s_list = list('abc')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame({'b': [0, 1, 0],
'c': [0, 0, 1]},
dtype=np.uint8)
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected.index = list('ABC')
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_one_level(self, sparse):
# Test the case that categorical variable only has one level.
s_list = list('aaa')
s_series = Series(s_list)
s_series_index = Series(s_list, list('ABC'))
expected = DataFrame(index=np.arange(3))
result = get_dummies(s_list, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
result = get_dummies(s_series, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
expected = DataFrame(index=list('ABC'))
result = get_dummies(s_series_index, drop_first=True, sparse=sparse)
assert_frame_equal(result, expected)
def test_basic_drop_first_NA(self, sparse):
# Test NA hadling together with drop_first
s_NA = ['a', 'b', np.nan]
res = get_dummies(s_NA, drop_first=True, sparse=sparse)
exp = DataFrame({'b': [0, 1, 0]}, dtype=np.uint8)
assert_frame_equal(res, exp)
res_na = get_dummies(s_NA, dummy_na=True, drop_first=True,
sparse=sparse)
exp_na = DataFrame(
{'b': [0, 1, 0],
nan: [0, 0, 1]},
dtype=np.uint8).reindex(['b', nan], axis=1)
assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([nan], dummy_na=True, drop_first=True,
sparse=sparse)
exp_just_na = DataFrame(index=np.arange(1))
assert_frame_equal(res_just_na, exp_just_na)
def test_dataframe_dummies_drop_first(self, df, sparse):
df = df[['A', 'B']]
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'A_b': [0, 1, 0],
'B_c': [0, 0, 1]},
dtype=np.uint8)
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_categorical(
self, df, sparse, dtype):
df['cat'] = pd.Categorical(['x', 'y', 'y'])
result = get_dummies(df, drop_first=True, sparse=sparse)
expected = DataFrame({'C': [1, 2, 3],
'A_b': [0, 1, 0],
'B_c': [0, 0, 1],
'cat_y': [0, 1, 1]})
cols = ['A_b', 'B_c', 'cat_y']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected[['C', 'A_b', 'B_c', 'cat_y']]
assert_frame_equal(result, expected)
def test_dataframe_dummies_drop_first_with_na(self, df, sparse):
df.loc[3, :] = [np.nan, np.nan, np.nan]
result = get_dummies(df, dummy_na=True, drop_first=True,
sparse=sparse).sort_index(axis=1)
expected = DataFrame({'C': [1, 2, 3, np.nan],
'A_b': [0, 1, 0, 0],
'A_nan': [0, 0, 0, 1],
'B_c': [0, 0, 1, 0],
'B_nan': [0, 0, 0, 1]})
cols = ['A_b', 'A_nan', 'B_c', 'B_nan']
expected[cols] = expected[cols].astype(np.uint8)
expected = expected.sort_index(axis=1)
assert_frame_equal(result, expected)
result = get_dummies(df, dummy_na=False, drop_first=True,
sparse=sparse)
expected = expected[['C', 'A_b', 'B_c']]
assert_frame_equal(result, expected)
def test_int_int(self):
data = Series([1, 2, 1])
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=[1, 2],
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
data = Series(pd.Categorical(['a', 'b', 'a']))
result = pd.get_dummies(data)
expected = DataFrame([[1, 0],
[0, 1],
[1, 0]],
columns=pd.Categorical(['a', 'b']),
dtype=np.uint8)
tm.assert_frame_equal(result, expected)
def test_int_df(self, dtype):
data = DataFrame(
{'A': [1, 2, 1],
'B': pd.Categorical(['a', 'b', 'a']),
'C': [1, 2, 1],
'D': [1., 2., 1.]
}
)
columns = ['C', 'D', 'A_1', 'A_2', 'B_a', 'B_b']
expected = DataFrame([
[1, 1., 1, 0, 1, 0],
[2, 2., 0, 1, 0, 1],
[1, 1., 1, 0, 1, 0]
], columns=columns)
expected[columns[2:]] = expected[columns[2:]].astype(dtype)
result = pd.get_dummies(data, columns=['A', 'B'], dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_dataframe_dummies_preserve_categorical_dtype(self, dtype):
# GH13854
for ordered in [False, True]:
cat = pd.Categorical(list("xy"), categories=list("xyz"),
ordered=ordered)
result = get_dummies(cat, dtype=dtype)
data = np.array([[1, 0, 0], [0, 1, 0]],
dtype=self.effective_dtype(dtype))
cols = pd.CategoricalIndex(cat.categories,
categories=cat.categories,
ordered=ordered)
expected = DataFrame(data, columns=cols,
dtype=self.effective_dtype(dtype))
tm.assert_frame_equal(result, expected)
class TestMakeAxisDummies(object):
def test_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
cidx = pd.CategoricalIndex(list("xyz"), ordered=ordered)
midx = pd.MultiIndex(levels=[['a'], cidx],
labels=[[0, 0], [0, 1]])
df = DataFrame([[10, 11]], index=midx)
expected = DataFrame([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
index=midx, columns=cidx)
from pandas.core.reshape.reshape import make_axis_dummies
result = make_axis_dummies(df)
tm.assert_frame_equal(result, expected)
result = make_axis_dummies(df, transform=lambda x: x)
tm.assert_frame_equal(result, expected)
|
|
# coding=utf-8
# Copyright 2022 The ML Fairness Gym Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Testing utilities for ML fairness gym."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import attr
import core
import run_util
from agents import random_agents
from spaces import batch
import gin
import gym
import numpy as np
import simplejson as json
from six.moves import range
@attr.s(cmp=False)
class DummyState(core.State):
x = attr.ib()
rng = attr.ib()
params = attr.ib()
@gin.configurable
class DummyEnv(core.FairnessEnv):
"""Simple Dummy Environment used for testing."""
hidden_state_vars = ['rng']
observable_state_vars = {
'x': gym.spaces.Box(low=0, high=1, shape=(1,), dtype=np.float32)
}
def __init__(self, params=None):
self.action_space = gym.spaces.Discrete(2)
if params is None:
params = core.Params() # Empty params.
super(DummyEnv, self).__init__(params)
self._state_init()
def _state_init(self, rng=None):
self.state = DummyState(
rng=rng or np.random.RandomState(),
x=np.array([0.5]),
params=self.initial_params)
def reset(self):
self._state_init(self.state.rng)
return super(DummyEnv, self).reset()
def _step_impl(self, state, action):
"""Run one timestep of the environment's dynamics.
At each timestep, x is resampled from a uniform distribution.
Args:
state: A `State` object containing the current state.
action: An action in `action_space`.
Returns:
A `State` object containing the updated state.
"""
del action # Unused.
state.x = state.rng.rand(1)
return state
@attr.s
class DummyParams(core.Params):
dim = attr.ib(default=1)
class DeterministicDummyEnv(core.FairnessEnv):
"""Simple Dummy Environment with alternating binary state used for testing."""
observable_state_vars = {'x': batch.Batch(gym.spaces.Discrete(2))}
def __init__(self, params=None):
if params is None:
params = DummyParams()
self.action_space = gym.spaces.Discrete(2)
super(DeterministicDummyEnv, self).__init__(params)
self._state_init()
def _state_init(self):
self.state = DummyState(
params=copy.deepcopy(self.initial_params),
rng=None,
x=[0 for _ in range(self.initial_params.dim)])
def _step_impl(self, state, action):
"""Run one timestep of the environment's dynamics.
At each timestep, x is flipped from zero to one or one to zero.
Args:
state: A `State` object containing the current state.
action: An action in `action_space`.
Returns:
A `State` object containing the updated state.
"""
del action # Unused.
state.x = [1 - x for x in state.x]
return state
# TODO(): There isn't actually anything to configure in DummyMetric,
# but we mark it as configurable so that we can refer to it on the
# right-hand-side of expressions in gin configurations. Find out whether
# there's a better way of indicating that than gin.configurable.
@gin.configurable
class DummyMetric(core.Metric):
"""Simple metric for testing.
Measurement returns the length of the history.
"""
def measure(self, env):
"""Returns the length of history."""
history = self._extract_history(env)
return len(history)
def setup_test_simulation(env=None, agent=None, metric=None, return_copy=False):
"""Create an environment, agent, and metric for testing purposes.
Arguments that are left as None will be replaced by dummy versions defined
in this file.
Args:
env: A `core.FairnessEnv` or None.
agent: A `core.Agent` or None.
metric: A `core.Metric` or None.
return_copy: If True, copies of the environment, agent, and auditors are
returned rather than the originals.
Returns:
An (environment, agent, metric) tuple.
"""
if env is None:
env = DummyEnv()
if agent is None:
agent = random_agents.RandomAgent(env.action_space, None,
env.observation_space)
if metric is None:
metric = DummyMetric(env)
if return_copy:
return copy.deepcopy(env), copy.deepcopy(agent), copy.deepcopy(metric)
return env, agent, metric
def run_test_simulation(env=None,
agent=None,
metric=None,
num_steps=10,
seed=100,
stackelberg=False,
check_reproducibility=True):
"""Perform a simple test simulation and return a measurement.
Arguments that are left as None will be replaced by dummy versions defined
in this file.
Args:
env: A `core.FairnessEnv` or None.
agent: A `core.Agent` or None.
metric: A `core.Metric` or None.
num_steps: An integer indicating the number of steps to simulate.
seed: An integer indicating a random seed.
stackelberg: Bool. if true, run a two player stackelberg game else run the
default simulation.
check_reproducibility: Bool. If true, run the simulation twice and check
that the same histories are produced.
Raises:
core.NotReproducibleError if the histories of multiple runs do not match.
Returns:
A measurement result.
"""
env, agent, metric = setup_test_simulation(
env=env, agent=agent, metric=metric)
# Create the clones before any simulation is run.
if check_reproducibility:
# Env doesn't need to be cloned because run_simulation will re-seed and
# reset the environment.
clones = copy.deepcopy((agent, metric))
simulator = (
run_util.run_stackelberg_simulation
if stackelberg else run_util.run_simulation)
result = simulator(env, agent, metric, num_steps, seed=seed, agent_seed=seed)
if check_reproducibility:
base_history = env.serialize_history()
cloned_agent, cloned_metric = clones
simulator(
env, cloned_agent, cloned_metric, num_steps, seed=seed, agent_seed=seed)
cloned_history = env.serialize_history()
# Check reproducibility by comparing histories of the cloned run with the.
# original. They should be identical.
base_history = json.loads(base_history)['history']
cloned_history = json.loads(cloned_history)['history']
for step, ((state_a, action_a),
(state_b,
action_b)) in enumerate(zip(base_history, cloned_history)):
if state_a != state_b:
raise core.NotReproducibleError('Step %d. State mismatch: %s vs %s' %
(step, state_a, state_b))
if action_a != action_b:
raise core.NotReproducibleError('Step %d. Action mismatch: %s vs %s' %
(step, action_a, action_b))
return result
# In keeping with the style of DummyEnv and DummyMetric, alias DummyAgent as
# well.
# pylint: disable=invalid-name
DummyAgent = gin.external_configurable(
random_agents.RandomAgent, name='test_util.DummyAgent')
|
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
from __future__ import absolute_import
from six import unichr
import six
__all__ = [
'HTMLParserTreeBuilder',
]
from six.moves.html_parser import HTMLParser
if six.PY2:
from six.moves.html_parser import HTMLParseError
else:
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError) as e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, six.text_type):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError as e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
|
import json
import logging
from go.apps.jsbox.log import LogManager
from go.vumitools.api import VumiApiCommand
from go.vumitools.tests.helpers import djangotest_imports
with djangotest_imports(globals()):
from go.apps.jsbox.view_definition import (
JSBoxReportsView, ConversationReportsView)
from go.apps.tests.view_helpers import AppViewsHelper
from go.base.utils import get_conversation_view_definition
from go.base.tests.helpers import GoDjangoTestCase
class TestJsBoxViews(GoDjangoTestCase):
def setUp(self):
self.app_helper = self.add_helper(AppViewsHelper(u'jsbox'))
self.client = self.app_helper.get_client()
def setup_conversation(self, with_group=True, with_channel=True, **kw):
groups = []
if with_group:
groups.append(
self.app_helper.create_group_with_contacts(u'test_group', 0))
channel = None
if with_channel:
channel = self.app_helper.create_channel(
supports_generic_sends=True)
return self.app_helper.create_conversation_helper(
channel=channel, groups=groups, **kw)
def test_action_send_jsbox_get(self):
conv_helper = self.setup_conversation(started=True)
response = self.client.get(
conv_helper.get_action_view_url('send_jsbox'))
self.assertEqual([], self.app_helper.get_api_commands_sent())
self.assertContains(response, '<h1>Trigger push messages</h1>')
def test_action_send_jsbox_post(self):
conv_helper = self.setup_conversation(started=True)
response = self.client.post(
conv_helper.get_action_view_url('send_jsbox'), {}, follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
[send_jsbox_cmd] = self.app_helper.get_api_commands_sent()
conversation = conv_helper.get_conversation()
self.assertEqual(send_jsbox_cmd, VumiApiCommand.command(
'%s_application' % (conversation.conversation_type,),
'send_jsbox', command_id=send_jsbox_cmd["command_id"],
user_account_key=conversation.user_account.key,
conversation_key=conversation.key,
batch_id=conversation.batch.key))
def test_action_send_jsbox_no_group(self):
conv_helper = self.setup_conversation(started=True, with_group=False)
response = self.client.post(
conv_helper.get_action_view_url('send_jsbox'), {}, follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
[msg] = response.context['messages']
self.assertEqual(
str(msg), "Action disabled: This action needs a contact group.")
self.assertEqual([], self.app_helper.get_api_commands_sent())
def test_action_send_jsbox_not_running(self):
conv_helper = self.setup_conversation(started=False)
response = self.client.post(
conv_helper.get_action_view_url('send_jsbox'), {},
follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
[msg] = response.context['messages']
self.assertEqual(
str(msg),
"Action disabled: This action needs a running conversation.")
self.assertEqual([], self.app_helper.get_api_commands_sent())
def test_action_send_jsbox_no_channel(self):
conv_helper = self.setup_conversation(started=True, with_channel=False)
response = self.client.post(
conv_helper.get_action_view_url('send_jsbox'), {}, follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
[msg] = response.context['messages']
self.assertEqual(
str(msg),
"Action disabled: This action needs channels capable"
" of sending messages attached to this conversation.")
self.assertEqual([], self.app_helper.get_api_commands_sent())
def test_show_stopped(self):
conv_helper = self.setup_conversation(started=False, name=u"myconv")
response = self.client.get(conv_helper.get_view_url('show'))
conversation = response.context[0].get('conversation')
self.assertEqual(conversation.name, u"myconv")
self.assertContains(response, '<h1>myconv</h1>')
self.assertNotContains(
response, conv_helper.get_action_view_url('send_jsbox'))
def test_show_running(self):
conv_helper = self.setup_conversation(started=True, name=u"myconv")
response = self.client.get(conv_helper.get_view_url('show'))
conversation = response.context[0].get('conversation')
self.assertEqual(conversation.name, u"myconv")
self.assertContains(response, '<h1>myconv</h1>')
self.assertContains(response,
conv_helper.get_action_view_url('send_jsbox'))
def setup_and_save_conversation(self, app_config):
conv_helper = self.setup_conversation()
# render the form
response = self.client.get(conv_helper.get_view_url('edit'))
self.assertEqual(response.status_code, 200)
# create the form data
form_data = {
'jsbox-javascript': 'x = 1;',
'jsbox-source_url': '',
'jsbox-update_from_source': '0',
'jsbox_app_config-TOTAL_FORMS': str(len(app_config)),
'jsbox_app_config-INITIAL_FORMS': '0',
'jsbox_app_config-MAX_NUM_FORMS': u''
}
for i, (key, cfg) in enumerate(app_config.items()):
form_data['jsbox_app_config-%d-key' % i] = key
form_data['jsbox_app_config-%d-value' % i] = cfg["value"]
form_data['jsbox_app_config-%d-source_url' % i] = cfg["source_url"]
# post the form
response = self.client.post(
conv_helper.get_view_url('edit'), form_data)
self.assertRedirects(response, conv_helper.get_view_url('show'))
return conv_helper.get_conversation()
def test_edit_conversation(self):
conversation = self.setup_and_save_conversation({})
self.assertEqual(conversation.config, {
'jsbox': {
'javascript': 'x = 1;',
'source_url': '',
},
'jsbox_app_config': {},
})
self.assertEqual(list(conversation.extra_endpoints), [])
def test_edit_conversation_with_v2_extra_endpoints(self):
app_config = {
"config": {
"value": json.dumps({
"endpoints": {
"endpoint1": {},
"endpoint2": {},
},
}),
"source_url": u"",
}
}
conversation = self.setup_and_save_conversation(app_config)
self.assertEqual(conversation.config, {
'jsbox': {
'javascript': 'x = 1;',
'source_url': '',
},
'jsbox_app_config': app_config,
})
self.assertEqual(list(conversation.extra_endpoints),
['endpoint1', 'endpoint2'])
def test_edit_conversation_with_v1_extra_endpoints(self):
app_config = {
"config": {
"value": json.dumps({
"sms_tag": ["foo", "bar"],
}),
"source_url": u"",
}
}
conversation = self.setup_and_save_conversation(app_config)
self.assertEqual(conversation.config, {
'jsbox': {
'javascript': 'x = 1;',
'source_url': '',
},
'jsbox_app_config': app_config,
})
self.assertEqual(list(conversation.extra_endpoints), ['foo:bar'])
def test_jsbox_logs(self):
conv_helper = self.setup_conversation()
campaign_key = conv_helper.get_conversation().user_account.key
log_manager = LogManager(
self.app_helper.vumi_helper.get_vumi_api().redis)
for i in range(10):
log_manager.add_log(campaign_key, conv_helper.conversation_key,
"test %d" % i, logging.INFO)
response = self.client.get(conv_helper.get_view_url('jsbox_logs'))
self.assertEqual(response.status_code, 200)
for i in range(10):
self.assertContains(response, "INFO] test %d" % i)
def test_jsbox_empty_logs(self):
conv_helper = self.setup_conversation()
response = self.client.get(conv_helper.get_view_url('jsbox_logs'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No logs yet.")
def test_jsbox_logs_action(self):
conv_helper = self.setup_conversation()
response = self.client.get(
conv_helper.get_action_view_url('view_logs'))
self.assertRedirects(response, conv_helper.get_view_url('jsbox_logs'))
def test_jsbox_report_layout_building(self):
conv_helper = self.setup_conversation()
conversation = conv_helper.get_conversation()
conversation.config['jsbox_app_config'] = {
'reports': {
'key': 'reports',
'value': json.dumps({
'layout': [{
'type': 'diamondash.widgets.lvalue.LValueWidget',
'time_range': '1d',
'name': 'Messages Received (24h)',
'target': {
'metric_type': 'conversation',
'name': 'messages_received',
}
}]
})
}
}
view = JSBoxReportsView()
layout = view.build_layout(conversation)
self.assertEqual(layout.get_config(), [{
'type': 'diamondash.widgets.lvalue.LValueWidget',
'name': 'Messages Received (24h)',
'time_range': '1d',
'target': (
"go.campaigns.%s.conversations.%s.messages_received.avg" %
(conversation.user_account.key, conversation.key))
}])
def test_jsbox_report_layout_building_for_no_report_config(self):
conv_helper = self.setup_conversation()
conversation = conv_helper.get_conversation()
view_def = get_conversation_view_definition(
conversation.conversation_type)
default_reports_view = ConversationReportsView(view_def=view_def)
default_layout = default_reports_view.build_layout(conversation)
view = JSBoxReportsView(view_def=view_def)
layout = view.build_layout(conversation)
self.assertEqual(layout.get_config(), default_layout.get_config())
|
|
import sys
import os
import shutil
import collections
import pprint
from subprocess import call
IMG_VIEW_HOME = os.environ.get('XNATIMAGEVIEWER_HOME')
LOAD_PATH = IMG_VIEW_HOME + \
"/src/main/scripts/viewer/gxnat"
PROVIDE = 'goog.provide('
def filterLastCapital(depLine):
"""
Filters an isolated dependency line by breaking it apart by the periods,
and thenfinding the last capitalized sub-property.
-----------------
EXAMPLES:
-----------------
filterLastCapital('nrg.ui.Component.getElement')
>>> 'nrg.ui.Component'
filterLastCapital('nrg.ui.Component.SubComponent')
>>> 'nrg.ui.Component.SubComponent'
-----------------
EXCEPTIONS:
-----------------
# Does not filter 2-level dependency lines
filterLastCapital('nrg.string')
>>> 'nrg.string'
# Returns the property/dependecy before the last if no capitals
filterLastCapital('nrg.fx.fadeIn')
>>> 'nrg.fx'
@type depLine: string
@param depLine: The dependency line
@rtype: string
@return: The filtered dependecy line.
"""
lastCap = -1;
count = 0;
filteredDepLine = ''
#---------------
# split the dependencies
#---------------
depArr = depLine.split('.')
for depSub in depArr:
if len(depSub) > 0 and depSub[0].isupper():
lastCap = count
count += 1
#print 'DEP ARR:', depArr[-1], depArr[-1].isupper()
#---------------
# If we have a two-level dependecy (i.e 'goog.math') then
# we just keep the entire line.
#
# We can filter any unneeded things manually.
#---------------
if len(depArr) == 2:
filteredDepLine = depLine
#---------------
# If there are no capital letters in the dependency line, we filter
# to the second to last property
#---------------
elif lastCap == -1:
filteredDepLine = depLine.rpartition('.')[0]
#---------------
# ALL caps
#---------------
elif len(depArr) > 1 and depArr[-1].isupper():
filteredDepLine = depLine.rpartition('.')[0]
#---------------
# Otherwise we create a new dependecy based on the last capital letter.
# For instance, if we have 'nrg.ui.Component.getElement' we crop this to
# 'nrg.ui.Component'
#---------------
else:
filteredDepLine = ''
count = 0;
for depSub in depArr:
filteredDepLine += depSub + '.'
#print filteredDepLine
if count == lastCap:
filteredDepLine = filteredDepLine[:-1]
break
count += 1
#print '\ndepLine: ', depLine
#print lastCap
#print 'depArr:', depArr
#print 'filtered:', filteredDepLine
return filteredDepLine
def stripRequireLine(line):
"""
@type line: string
@param line: The line to strip
@rtype: string, string
@return: The stripped line, the store key for the reqLine
"""
reqLine = line.strip(REQUIRE_PREFIX).\
replace(')', '').\
replace("'",'').\
replace('"', '').\
replace(';', '').\
replace('//', '').strip()
return reqLine.split('.')[0] + '.', reqLine
def getProvides(fileLines):
"""
@type fileLines: array.<string>
@param fileLines: the filelines to parse
@rtype: array.<string>
@return: the provides
"""
provides = []
for line in fileLines:
if PROVIDE_PREFIX in line:
prov = line.split(PROVIDE_PREFIX + '\'')[1]
prov = prov.replace(' ' , '').split('\'')[0]
#print 'PROV', prov
if not prov in provides:
provides.append(prov.strip())
return provides
def stripDepsPrefixLine(line, depsPrefix):
"""
@type line: string
@param line: The line to strip
@rtype: string, string
@return: The stripped line, the store key for the reqLine
"""
skip = False
line = line.strip().strip(';').strip('\n')
savers = {}
#---------------
# Skippables
# ---------------
for skipSubstr in SKIP_SUBSTRS:
if skipSubstr in line:
skip = True
#---------------
# Savers
#---------------
for saver in SAVE_FOR_LATERS:
if saver in line:
savers[saver] = line;
skip = True;
#---------------
# Now we need to break apart the lines
#---------------
foundArr = depsPrefix + line.split(depsPrefix)[1]
foundArr = foundArr.rsplit(" ")[0].rsplit('.')
if not skip:
return foundArr[0] + '.', line
else:
return None, None
def tallyDependencies(foundDeps):
"""
@type foundDeps: dict
@param foundDeps: The found dependency dictionary
"""
talliedDeps = {}
#---------------
# Loop through every found depency line
#---------------
for dep, item in foundDeps.iteritems():
for line in item:
#print '\n\nline:', line
#---------------
# Split line by dep
#---------------
depSplit = line
if dep in depSplit:
depSplit = depSplit.split(dep)[1]
#---------------
# Split line by left splitters
#---------------
for splitter in LEFT_SPLITTERS:
if splitter in depSplit:
depSplit = depSplit.split(splitter)[0]
#---------------
# Split line by EventType
#---------------
if 'EventType' in depSplit:
depSplit = depSplit.split('.EventType')[0]
#---------------
# Skip all that have nothing after the splut
#---------------
if (len(depSplit) == 0):
continue
#---------------
# filtered the last capital
#---------------
key = filterLastCapital(dep + depSplit)
#
# tally the dep
#
if not talliedDeps.has_key(key):
talliedDeps[key] = 0
talliedDeps[key] += 1
return talliedDeps
def getRawDependency(line):
"""
@type line: string
@param line: The line to get the raw dependencies from
@rtype: ?string, ?string
@return: A dictionary of the raw dependencies
"""
#---------------
# store 'goog.require(' lines
#---------------
if REQUIRE_PREFIX in line:
return stripRequireLine(line)
#---------------
# find a line with a DEPS_PREFIX (see above) in it
#---------------
for depsPrefix in DEPS_PREFIXES:
if depsPrefix in line:
depsLineKey, depsLine = stripDepsPrefixLine(line, depsPrefix)
if depsLineKey != None:
return depsLineKey, depsLine
return None, None
def getDependenciesByRoot(talliedDeps, skippables = []):
"""
@type talliedDeps: array.<string> | dict.<string, string>
@param talliedDeps: The tallied dependices
@rtype: dict
@return: A dictionary of the dependencies by root.
"""
depsByRoot = {}
for dep in talliedDeps:
#---------------
# skip anything ending with a period
#---------------
if dep.endswith('.'): continue
#---------------
# Derive the root dependency key
#---------------
depPrefix = dep.split('.')[0] + '.'
if not depsByRoot.has_key(depPrefix):
depsByRoot[depPrefix] = []
#---------------
# Derive the root dependency key
#---------------
isSkipper = False
for skippable in skippables:
if skippable == dep.strip():
isSkipper = True
isGoogSkipper = False
for skip in GOOG_SKIPPERS:
if skip in dep:
isGoogSkipper = True
#print isProvider, dep
if isSkipper or isGoogSkipper:
continue
else:
depsByRoot[depPrefix].append(dep)
return depsByRoot
def getRawDependencies(fileLines):
"""
@type fileLines: array.<string>
@param fileLines: the filelines to parse
@rtype: dict
@return: A dictionary of the raw dependencies
"""
rawDeps = dict((key, []) for key in DEPS_PREFIXES)
for line in fileLines:
key, val = getRawDependency(line)
if key:
rawDeps[key].append(val)
return rawDeps
def getDependencies(filename):
"""
@type filename: string
@param filename: the file to parse
"""
fileLines = [line for line in open(filename)]
#---------------
# Get the provides
#---------------
provides = getProvides(fileLines)
#print "PROVIDES"
#PPRINT.pprint(provides)
#---------------
# Get the raw dependicies dictionary
#---------------
rawDeps = getRawDependencies(fileLines)
PPRINT.pprint(rawDeps)
#---------------
# Tally the dependencies from the raw dependencies
#---------------
talliedDeps = tallyDependencies(rawDeps)
#---------------
# Filter the tallied dependencies
#---------------
depsByRoot = getDependenciesByRoot(talliedDeps, provides)
#---------------
# PRINT!!
#---------------
depLines = []
for depPrefix in DEPS_PREFIXES:
if depsByRoot.has_key(depPrefix):
depLines.append('\n// ' + depPrefix.split('.')[0] + '\n')
for req in depsByRoot[depPrefix]:
depLines.append(REQUIRE_PREFIX + '\'' + req + '\');\n')
continue
depLines.append('\n//-----------\n\n')
return depLines
def moveProvide(filename):
"""
@type filename: string
@param filename: the file to parse
"""
fileLines = [line for line in open(filename)]
newFileLines =
for line in fileLines:
#---------------
# Get the dependencies as lines
#---------------
depsAsLines = getDependencies(filename)
for l in depsAsLines: print l
#---------------
# Add dependencies to the top of the file's lines
#---------------
fileLines = depsAsLines + [line for line in open(filename)]
#---------------
# Re-write the file
#---------------
_file = open(filename, "w")
for line in fileLines:
_file.write(line)
_file.close()
def openFile(filename):
"""
@type filename: string
@param filename: the file to parse
"""
os.system("open " + filename)
def main():
#---------------
# Walk through LOAD_PATH, modifying each file
#---------------
files = []
for (dirpath, dirnames, filenames) in os.walk(LOAD_PATH):
for f in filenames:
if dirpath == LOAD_PATH and not f.startswith('.'):
filename = os.path.join(dirpath, f)
files.append(filename)
modifyFile(filename)
#---------------
# Open each file using the default system editor
#---------------
for f in files:
openFile(f)
if __name__ == "__main__":
main()
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.utils.py3 import httplib
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import ComputeFileFixtures
from libcloud.compute.types import Provider
from libcloud.compute.types import NodeState
from libcloud.compute.providers import get_driver
from libcloud.test import unittest
from libcloud.test.secrets import PROFIT_BRICKS_PARAMS
class ProfitBricksTests(unittest.TestCase):
def setUp(self):
ProfitBricks = get_driver(Provider.PROFIT_BRICKS)
ProfitBricks.connectionCls.conn_classes = (None, ProfitBricksMockHttp)
self.driver = ProfitBricks(*PROFIT_BRICKS_PARAMS)
''' Server Function Tests
'''
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes), 3)
node = nodes[0]
self.assertEqual(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246")
self.assertEqual(node.name, "server001")
self.assertEqual(node.state, NodeState.RUNNING)
self.assertEqual(node.public_ips, ['162.254.25.197'])
self.assertEqual(node.private_ips, ['10.10.108.12', '10.13.198.11'])
self.assertEqual(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899")
self.assertEqual(node.extra['datacenter_version'], "5")
self.assertEqual(node.extra['provisioning_state'], NodeState.RUNNING)
self.assertEqual(node.extra['creation_time'], "2014-07-14T20:52:20.839Z")
self.assertEqual(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z")
self.assertEqual(node.extra['os_type'], "LINUX")
self.assertEqual(node.extra['availability_zone'], "ZONE_1")
def test_ex_describe_node(self):
image = type('NodeImage', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9",
name="Debian-7-server-2014-07-01"))
size = type('NodeSize', (object,),
dict(id="2",
name="Small Instance",
ram=2048,
disk=50,
extra={'cores': 1}))
node = self.driver.create_node(name="SPC-Server",
image=image,
size=size)
self.assertEqual(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750")
def test_reboot_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
reboot = self.driver.reboot_node(node=node)
self.assertTrue(reboot)
def test_ex_stop_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
stop = self.driver.ex_stop_node(node=node)
self.assertTrue(stop)
def test_ex_start_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
start = self.driver.ex_start_node(node=node)
self.assertTrue(start)
def test_destroy_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
destroy = self.driver.destroy_node(node=node)
self.assertTrue(destroy)
def test_ex_update_node(self):
node = type('Node', (object,),
dict(id="c8e57d7b-e731-46ad-a913-1828c0562246"))
zone = type('ExProfitBricksAvailabilityZone', (object,),
dict(name="ZONE_2"))
update = self.driver.ex_update_node(node=node, ram=2048, cores=2, name="server002", availability_zone=zone)
self.assertTrue(update)
''' Volume Function Tests
'''
def test_list_volumes(self):
volumes = self.driver.list_volumes()
self.assertEqual(len(volumes), 4)
volume = volumes[0]
self.assertEqual(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232")
self.assertEqual(volume.name, "storage001")
self.assertEqual(volume.size, 50)
self.assertEqual(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29")
self.assertEqual(volume.extra['provisioning_state'], NodeState.RUNNING)
self.assertEqual(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z")
self.assertEqual(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z")
self.assertEqual(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9")
self.assertEqual(volume.extra['image_name'], "CentOS-6-server-2014-07-01")
self.assertEqual(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292")
def test_create_volume(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
image = type('NodeImage', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
create = self.driver.create_volume(name="StackPointCloudStorage001",
size=50,
ex_datacenter=datacenter,
ex_image=image)
self.assertTrue(create)
def test_attach_volume_general(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node,
volume=volume,
device=None, ex_bus_type=None)
self.assertTrue(attach)
def test_attach_volume_device_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node, volume=volume, device=1, ex_bus_type=None)
self.assertTrue(attach)
def test_attach_volume_bus_type_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node,
volume=volume,
device=None,
ex_bus_type="IDE")
self.assertTrue(attach)
def test_attach_volume_options_defined(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
attach = self.driver.attach_volume(node=node, volume=volume,
device=1, ex_bus_type="IDE")
self.assertTrue(attach)
def test_detach_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476",
extra={'server_id': "cd59b162-0289-11e4-9f63-52540066fee9"}
))
attach = self.driver.detach_volume(volume=volume)
self.assertTrue(attach)
def test_destroy_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.destroy_volume(volume=volume)
self.assertTrue(destroy)
def test_update_volume(self):
volume = type('StorageVolume', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.ex_update_volume(volume=volume)
self.assertTrue(destroy)
def test_ex_describe_volume(self):
describe = self.driver.ex_describe_volume(volume_id="8669a69f-2274-4520-b51e-dbdf3986a476")
self.assertEqual(describe.id, "00d0b9e7-e016-456f-85a0-517aa9a34bf5")
self.assertEqual(describe.size, 50)
self.assertEqual(describe.name, "StackPointCloud-Volume")
self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING)
''' Image Function Tests
'''
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images), 3)
image = images[0]
self.assertEqual(image.extra['cpu_hotpluggable'], "false")
self.assertEqual(image.id, "03b6c3e7-f2ad-11e3-a036-52540066fee9")
self.assertEqual(image.name, "windows-2012-r2-server-2014-06")
self.assertEqual(image.extra['image_size'], "11264")
self.assertEqual(image.extra['image_type'], "HDD")
self.assertEqual(image.extra['memory_hotpluggable'], "false")
self.assertEqual(image.extra['os_type'], "WINDOWS")
self.assertEqual(image.extra['public'], "true")
self.assertEqual(image.extra['location'], None)
self.assertEqual(image.extra['writeable'], "true")
''' Datacenter Function Tests
'''
def test_ex_create_datacenter(self):
datacenter = self.driver.ex_create_datacenter(name="StackPointCloud",
location="us/la")
self.assertEqual(datacenter.id, '0c793dd1-d4cd-4141-86f3-8b1a24b2d604')
self.assertEqual(datacenter.extra['location'], 'us/las')
self.assertEqual(datacenter.version, '1')
def test_ex_destroy_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
destroy = self.driver.ex_destroy_datacenter(datacenter=datacenter)
self.assertTrue(destroy)
def test_ex_describe_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d"))
describe = self.driver.ex_describe_datacenter(datacenter_id=datacenter.id)
self.assertEqual(describe.id, 'a3e6f83a-8982-4d6a-aebc-60baf5755ede')
self.assertEqual(describe.name, 'StackPointCloud')
self.assertEqual(describe.version, '1')
self.assertEqual(describe.extra['location'], 'us/las')
self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING)
def test_ex_clear_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="8669a69f-2274-4520-b51e-dbdf3986a476"))
clear = self.driver.ex_clear_datacenter(datacenter=datacenter)
self.assertTrue(clear)
def test_ex_list_datacenters(self):
datacenters = self.driver.ex_list_datacenters()
self.assertEqual(len(datacenters), 2)
dc1 = datacenters[0]
self.assertEqual(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede")
self.assertEqual(dc1.name, "StackPointCloud")
self.assertEqual(dc1.version, "1")
def test_ex_rename_datacenter(self):
datacenter = type('Datacenter', (object,),
dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d"))
update = self.driver.ex_rename_datacenter(datacenter=datacenter,
name="StackPointCloud")
self.assertTrue(update)
def test_list_locations(self):
locations = self.driver.list_locations()
self.assertEqual(len(locations), 3)
locationNamesResult = sorted(list(a.name for a in locations))
locationNamesExpected = ['de/fkb', 'de/fra', 'us/las']
self.assertEqual(locationNamesResult, locationNamesExpected)
''' Availability Zone Tests
'''
def test_ex_list_availability_zones(self):
zones = self.driver.ex_list_availability_zones()
self.assertEqual(len(zones), 3)
zoneNamesResult = sorted(list(a.name for a in zones))
zoneNamesExpected = ['AUTO', 'ZONE_1', 'ZONE_2']
self.assertEqual(zoneNamesResult, zoneNamesExpected)
''' Interface Tests
'''
def test_ex_list_interfaces(self):
interfaces = self.driver.ex_list_network_interfaces()
self.assertEqual(len(interfaces), 3)
interface = interfaces[0]
self.assertEqual(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727")
self.assertEqual(interface.name, "StackPointCloud")
self.assertEqual(interface.state, NodeState.RUNNING)
self.assertEqual(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116")
self.assertEqual(interface.extra['lan_id'], '3')
self.assertEqual(interface.extra['internet_access'], 'false')
self.assertEqual(interface.extra['mac_address'], "02:01:40:47:90:04")
self.assertEqual(interface.extra['dhcp_active'], "true")
self.assertEqual(interface.extra['gateway_ip'], None)
self.assertEqual(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15'])
def test_ex_create_network_interface(self):
node = type('Node', (object,),
dict(id="cd59b162-0289-11e4-9f63-52540066fee9"))
interface = self.driver.ex_create_network_interface(node=node)
self.assertEqual(interface.id, '6b38a4f3-b851-4614-9e3a-5ddff4727727')
def test_ex_destroy_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
destroy = self.driver.ex_destroy_network_interface(
network_interface=network_interface)
self.assertTrue(destroy)
def test_ex_update_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
create = self.driver.ex_update_network_interface(
network_interface=network_interface)
self.assertTrue(create)
def test_ex_describe_network_interface(self):
network_interface = type('ProfitBricksNetworkInterface', (object,),
dict(
id="cd59b162-0289-11e4-9f63-52540066fee9"))
describe = self.driver.ex_describe_network_interface(network_interface=network_interface)
self.assertEqual(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3")
self.assertEqual(describe.name, None)
self.assertEqual(describe.state, NodeState.RUNNING)
self.assertEqual(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee")
self.assertEqual(describe.extra['datacenter_version'], "6")
self.assertEqual(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408")
self.assertEqual(describe.extra['lan_id'], "1")
self.assertEqual(describe.extra['internet_access'], "false")
self.assertEqual(describe.extra['mac_address'], "02:01:96:d7:60:e0")
self.assertEqual(describe.extra['dhcp_active'], "true")
self.assertEqual(describe.extra['gateway_ip'], None)
self.assertEqual(describe.extra['ips'], ['10.10.38.12'])
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes), 7)
class ProfitBricksMockHttp(MockHttp):
fixtures = ComputeFileFixtures('profitbricks')
def _1_3_clearDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_clear_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_create_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_destroy_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllDataCenters(self, method, url, body, headers):
body = self.fixtures.load('ex_list_datacenters.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateDataCenter(self, method, url, body, headers):
body = self.fixtures.load('ex_update_datacenter.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllImages(self, method, url, body, headers):
body = self.fixtures.load('list_images.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllServers(self, method, url, body, headers):
body = self.fixtures.load('list_nodes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_resetServer(self, method, url, body, headers):
body = self.fixtures.load('reboot_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_stopServer(self, method, url, body, headers):
body = self.fixtures.load('ex_stop_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_startServer(self, method, url, body, headers):
body = self.fixtures.load('ex_start_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteServer(self, method, url, body, headers):
body = self.fixtures.load('destroy_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllStorages(self, method, url, body, headers):
body = self.fixtures.load('list_volumes.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createStorage(self, method, url, body, headers):
body = self.fixtures.load('create_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_connectStorageToServer(self, method, url, body, headers):
body = self.fixtures.load('attach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_disconnectStorageFromServer(self, method, url, body, headers):
body = self.fixtures.load('detach_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteStorage(self, method, url, body, headers):
body = self.fixtures.load('destroy_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateStorage(self, method, url, body, headers):
body = self.fixtures.load('ex_update_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateServer(self, method, url, body, headers):
body = self.fixtures.load('ex_update_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getNic(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getAllNic(self, method, url, body, headers):
body = self.fixtures.load('ex_list_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createNic(self, method, url, body, headers):
body = self.fixtures.load('ex_list_network_interfaces.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_deleteNic(self, method, url, body, headers):
body = self.fixtures.load('ex_destroy_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_updateNic(self, method, url, body, headers):
body = self.fixtures.load('ex_update_network_interface.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getServer(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_getStorage(self, method, url, body, headers):
body = self.fixtures.load('ex_describe_volume.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _1_3_createServer(self, method, url, body, headers):
body = self.fixtures.load('create_node.xml')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
|
import bpy
from math import floor, ceil
from mathutils import Vector, Quaternion
from mathutils.geometry import distance_point_to_plane
import sprytile_utils
import sprytile_uv
import sprytile_preview
class ToolBuild:
modal = None
left_down = False
start_coord = None
can_build = False
def __init__(self, modal, rx_source):
self.modal = modal
rx_source.filter(
lambda modal_evt: modal_evt.paint_mode == 'MAKE_FACE'
).subscribe(
on_next=lambda modal_evt: self.process_tool(modal_evt),
on_error=lambda err: self.handle_error(err),
on_completed=lambda: self.handle_complete()
)
def process_tool(self, modal_evt):
if self.modal.rx_data is None:
return
# get the context arguments
context = self.modal.rx_data.context
scene = context.scene
ray_origin = self.modal.rx_data.ray_origin
ray_vector = self.modal.rx_data.ray_vector
if modal_evt.left_down:
is_start = self.left_down is False
self.left_down = True
self.execute(context, scene, ray_origin, ray_vector, is_start)
elif self.left_down:
self.left_down = False
self.start_coord = None
# self.modal.virtual_cursor.clear()
bpy.ops.ed.undo_push()
#if modal_evt.build_preview:
# self.build_preview(context, scene, ray_origin, ray_vector)
def execute(self, context, scene, ray_origin, ray_vector, is_start):
data = scene.sprytile_data
grid = sprytile_utils.get_grid(context, context.object.sprytile_gridid)
tile_xy = (grid.tile_selection[0], grid.tile_selection[1])
# Get vectors for grid, without rotation
up_vector, right_vector, plane_normal = sprytile_utils.get_current_grid_vectors(
scene,
with_rotation=False
)
# If building on decal layer, modify plane normal to the one under mouse
if data.work_layer == 'DECAL_1' and data.lock_normal is False:
location, hit_normal, face_index, distance = self.modal.raycast_object(context.object,
ray_origin,
ray_vector)
if hit_normal is not None:
face_up, face_right = VIEW3D_OP_SprytileModalTool.get_face_up_vector(context, face_index, 0.4, bias_right=True)
if face_up is not None and face_right is not None:
plane_normal = hit_normal
up_vector = face_up
right_vector = face_right
# Rotate the vectors
rotation = Quaternion(plane_normal, data.mesh_rotate)
up_vector = rotation @ up_vector
right_vector = rotation @ right_vector
# raycast grid to get the grid position under the mouse
grid_coord, grid_right, grid_up, plane_pos = sprytile_utils.raycast_grid(
scene, context,
up_vector, right_vector, plane_normal,
ray_origin, ray_vector,
as_coord=True
)
# Record starting grid position of stroke
if is_start:
self.start_coord = grid_coord
# Not starting stroke, filter out when can build
elif self.start_coord is not None:
start_offset = (grid_coord[0] - self.start_coord[0],
grid_coord[1] - self.start_coord[1])
coord_mod = (start_offset[0] % grid.tile_selection[2],
start_offset[1] % grid.tile_selection[3])
# Isn't at exact position for grid made by tile selection, with start_coord as origin
if coord_mod[0] > 0 or coord_mod[1] > 0:
# Try to snap grid_coord
tolerance_min = (floor(grid.tile_selection[2] * 0.25),
floor(grid.tile_selection[3] * 0.25))
tolerance_max = (grid.tile_selection[2] - tolerance_min[0],
grid.tile_selection[3] - tolerance_min[1])
allow_snap_x = tolerance_min[0] <= coord_mod[0] <= tolerance_max[0]
allow_snap_y = tolerance_min[1] <= coord_mod[1] <= tolerance_max[1]
# If neither x or y can be snapped, return
if not allow_snap_x and not allow_snap_y:
return
coord_frac = [start_offset[0] / grid.tile_selection[2],
start_offset[1] / grid.tile_selection[3]]
if coord_mod[0] > (grid.tile_selection[2] / 2.0):
coord_frac[0] = ceil(coord_frac[0])
else:
coord_frac[0] = floor(coord_frac[0])
if coord_mod[1] > (grid.tile_selection[3] / 2.0):
coord_frac[1] = ceil(coord_frac[1])
else:
coord_frac[1] = floor(coord_frac[1])
grid_coord = (self.start_coord[0] + (coord_frac[0] * grid.tile_selection[2]),
self.start_coord[1] + (coord_frac[1] * grid.tile_selection[3]))
# Get the area to build
offset_tile_id, offset_grid, coord_min, coord_max = sprytile_utils.get_grid_area(
grid.tile_selection[2],
grid.tile_selection[3],
data.uv_flip_x, data.uv_flip_y
)
# Check if joining multi tile faces
grid_no_spacing = sprytile_utils.grid_no_spacing(grid)
is_single_pixel = sprytile_utils.grid_is_single_pixel(grid)
do_join = is_single_pixel
if do_join is False:
do_join = grid_no_spacing and data.auto_join
# 1x1 tile selections cannot be auto joined
tile_area = grid.tile_selection[2] * grid.tile_selection[3]
if do_join and tile_area == 1:
do_join = False
# Store vertices of constructed faces for cursor flow
faces_verts = []
require_base_layer = data.work_layer != 'BASE'
# Get the work layer filter, based on layer settings
work_layer_mask = sprytile_utils.get_work_layer_data(data)
# Build mode with join multi
if do_join:
origin_coord = ((grid_coord[0] + coord_min[0]),
(grid_coord[1] + coord_min[1]))
size_x = (coord_max[0] - coord_min[0]) + 1
size_y = (coord_max[1] - coord_min[1]) + 1
tile_origin = (grid.tile_selection[0],
grid.tile_selection[1])
tile_coord = (tile_origin[0] + grid.tile_selection[2],
tile_origin[1] + grid.tile_selection[3])
face_index = self.modal.construct_face(context, origin_coord, [size_x, size_y],
tile_coord, tile_origin,
grid_up, grid_right,
up_vector, right_vector, plane_normal,
require_base_layer=require_base_layer,
work_layer_mask=work_layer_mask)
if face_index is not None:
face_verts = self.modal.face_to_world_verts(context, face_index)
faces_verts.extend(face_verts)
# Build mode without auto join, try operation on each build coordinate
else:
virtual_cursor = scene.cursor.location + \
(grid_coord[0] * grid_right) + \
(grid_coord[1] * grid_up)
self.modal.add_virtual_cursor(virtual_cursor)
# Loop through grid coordinates to build
for i in range(len(offset_grid)):
grid_offset = offset_grid[i]
tile_offset = offset_tile_id[i]
grid_pos = [grid_coord[0] + grid_offset[0], grid_coord[1] + grid_offset[1]]
tile_pos = [tile_xy[0] + tile_offset[0], tile_xy[1] + tile_offset[1]]
face_index = self.modal.construct_face(context, grid_pos, [1, 1],
tile_pos, tile_xy,
grid_up, grid_right,
up_vector, right_vector, plane_normal,
require_base_layer=require_base_layer,
work_layer_mask=work_layer_mask)
if face_index is not None:
face_verts = self.modal.face_to_world_verts(context, face_index)
faces_verts.extend(face_verts)
if plane_pos is not None:
self.modal.add_virtual_cursor(plane_pos)
if data.cursor_flow and data.work_layer == "BASE" and len(faces_verts) > 0:
# Find which vertex the cursor should flow to
new_cursor_pos = self.modal.flow_cursor_verts(context, faces_verts, plane_pos)
if new_cursor_pos is not None:
# Not base layer, move position back by offset
if data.work_layer != 'BASE':
new_cursor_pos -= plane_normal * data.mesh_decal_offset
# Calculate the world position of old start_coord
old_start_pos = scene.cursor.location + (self.start_coord[0] * grid_right) + (self.start_coord[1] * grid_up)
# find the offset of the old start position from the new cursor position
new_start_offset = old_start_pos - new_cursor_pos
# get how much the grid x/y vectors need to scale by to normalize
scale_right = 1.0 / grid_right.magnitude
scale_up = 1.0 / grid_up.magnitude
# scale the offset by grid x/y, so can use normalized dot product to
# find the grid coordinates the start position is from new cursor pos
new_start_coord = Vector((
(new_start_offset * scale_right).dot(grid_right.normalized()),
(new_start_offset * scale_up).dot(grid_up.normalized())
))
# Record the new offset starting coord,
# for the nice painting snap
self.start_coord = new_start_coord
scene.cursor.location = new_cursor_pos
@staticmethod
def build_preview(context, scene, ray_origin, ray_vector):
obj = context.object
data = scene.sprytile_data
grid_id = obj.sprytile_gridid
target_grid = sprytile_utils.get_grid(context, grid_id)
if target_grid is None:
return
# Reset can build flag
ToolBuild.can_build = False
target_img = sprytile_utils.get_grid_texture(obj, target_grid)
if target_img is None:
sprytile_preview.clear_preview_data()
return
# If building on base layer, get from current virtual grid
up_vector, right_vector, plane_normal = sprytile_utils.get_current_grid_vectors(scene, False)
# Building on decal layer, get from face under mouse
if data.work_layer == 'DECAL_1' and data.lock_normal is False:
location, hit_normal, face_index, distance = sprytile_modal.VIEW3D_OP_SprytileModalTool.raycast_object(context.object,
ray_origin,
ray_vector)
# For decals, if not hitting the object don't draw preview
if hit_normal is None:
sprytile_preview.clear_preview_data()
return
# Do a coplanar check between hit location and cursor
grid_origin = scene.cursor.location.copy()
grid_origin += hit_normal * data.mesh_decal_offset
check_coplanar = distance_point_to_plane(location, grid_origin, hit_normal)
check_coplanar = abs(check_coplanar) < 0.05
if check_coplanar is False:
sprytile_preview.clear_preview_data()
return
face_up, face_right = VIEW3D_OP_SprytileModalTool.get_face_up_vector(context, face_index, 0.4, bias_right=True)
if face_up is not None and face_right is not None:
plane_normal = hit_normal
up_vector = face_up
right_vector = face_right
else:
sprytile_preview.clear_preview_data()
return
rotation = Quaternion(plane_normal, data.mesh_rotate)
up_vector = rotation @ up_vector
right_vector = rotation @ right_vector
# Raycast to the virtual grid
face_position, x_vector, y_vector, plane_cursor = sprytile_utils.raycast_grid(
scene, context,
up_vector, right_vector, plane_normal,
ray_origin, ray_vector
)
if face_position is None:
sprytile_preview.clear_preview_data()
return
# Passed can build checks, set flag to true
ToolBuild.can_build = True
offset_tile_id, offset_grid, coord_min, coord_max = sprytile_utils.get_grid_area(
target_grid.tile_selection[2],
target_grid.tile_selection[3],
data.uv_flip_x,
data.uv_flip_y)
grid_no_spacing = sprytile_utils.grid_no_spacing(target_grid)
# No spacing in grid, automatically join the preview together
if grid_no_spacing:
origin_coord = face_position + coord_min[0] * x_vector + coord_min[1] * y_vector
size_x = (coord_max[0] - coord_min[0]) + 1
size_y = (coord_max[1] - coord_min[1]) + 1
size_x *= target_grid.grid[0]
size_y *= target_grid.grid[1]
x_vector *= size_x / target_grid.grid[0]
y_vector *= size_y / target_grid.grid[1]
preview_verts = sprytile_utils.get_build_vertices(origin_coord,
x_vector, y_vector,
up_vector, right_vector)
vtx_center = Vector((0, 0, 0))
for vtx in preview_verts:
vtx_center += vtx
vtx_center /= len(preview_verts)
origin_xy = (target_grid.tile_selection[0],
target_grid.tile_selection[1])
preview_uvs = sprytile_uv.get_uv_pos_size(data, target_img.size, target_grid,
origin_xy, size_x, size_y,
up_vector, right_vector,
preview_verts, vtx_center)
sprytile_preview.set_preview_data(preview_verts, preview_uvs)
return
# Spaced grids need to be tiled
preview_verts = []
preview_uvs = []
for i in range(len(offset_tile_id)):
grid_offset = offset_grid[i]
tile_offset = offset_tile_id[i]
x_offset = x_vector * grid_offset[0]
y_offset = y_vector * grid_offset[1]
coord_position = face_position + x_offset + y_offset
coord_verts = sprytile_utils.get_build_vertices(coord_position, x_vector, y_vector,
up_vector, right_vector)
# Get the center of the preview verts
vtx_center = Vector((0, 0, 0))
for vtx in coord_verts:
vtx_center += vtx
vtx_center /= len(coord_verts)
# Calculate the tile with offset
tile_xy = (target_grid.tile_selection[0] + tile_offset[0],
target_grid.tile_selection[1] + tile_offset[1])
coord_uvs = sprytile_uv.get_uv_positions(data, target_img.size, target_grid,
up_vector, right_vector, tile_xy,
coord_verts, vtx_center)
preview_verts.extend(coord_verts)
preview_uvs.extend(coord_uvs)
sprytile_preview.set_preview_data(preview_verts, preview_uvs)
def handle_error(self, err):
print("Error in build mode: {0}".format(err))
pass
def handle_complete(self):
pass
def register():
pass
def unregister():
pass
if __name__ == '__main__':
register()
|
|
import datetime
import mongoengine as mongo
import urllib2
import redis
from django.conf import settings
from apps.social.models import MSharedStory
from apps.profile.models import Profile
from apps.statistics.rstats import RStats, round_time
from utils import json_functions as json
from utils import db_functions
class MStatistics(mongo.Document):
key = mongo.StringField(unique=True)
value = mongo.DynamicField()
meta = {
'collection': 'statistics',
'allow_inheritance': False,
'indexes': ['key'],
}
def __unicode__(self):
return "%s: %s" % (self.key, self.value)
@classmethod
def get(cls, key, default=None):
obj = cls.objects.filter(key=key).first()
if not obj:
return default
return obj.value
@classmethod
def set(cls, key, value):
obj, _ = cls.objects.get_or_create(key=key)
obj.value = value
obj.save()
@classmethod
def all(cls):
stats = cls.objects.all()
values = dict([(stat.key, stat.value) for stat in stats])
for key, value in values.items():
if key in ('avg_time_taken', 'sites_loaded', 'stories_shared'):
values[key] = json.decode(value)
elif key in ('feeds_fetched', 'premium_users', 'standard_users', 'latest_sites_loaded',
'max_sites_loaded', 'max_stories_shared'):
values[key] = int(value)
elif key in ('latest_avg_time_taken', 'max_avg_time_taken'):
values[key] = float(value)
values['total_sites_loaded'] = sum(values['sites_loaded']) if 'sites_loaded' in values else 0
values['total_stories_shared'] = sum(values['stories_shared']) if 'stories_shared' in values else 0
return values
@classmethod
def collect_statistics(cls):
now = datetime.datetime.now()
cls.collect_statistics_premium_users()
print "Premiums: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_standard_users()
print "Standard users: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_sites_loaded()
print "Sites loaded: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_stories_shared()
print "Stories shared: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_for_db()
print "DB Stats: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_feeds_fetched()
print "Feeds Fetched: %s" % (datetime.datetime.now() - now)
@classmethod
def collect_statistics_feeds_fetched(cls):
feeds_fetched = RStats.count('feed_fetch', hours=24)
cls.objects(key='feeds_fetched').update_one(upsert=True,
set__key='feeds_fetched',
set__value=feeds_fetched)
return feeds_fetched
@classmethod
def collect_statistics_premium_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
premium_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=True).count()
cls.objects(key='premium_users').update_one(upsert=True, set__key='premium_users', set__value=premium_users)
return premium_users
@classmethod
def collect_statistics_standard_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
standard_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=False).count()
cls.objects(key='standard_users').update_one(upsert=True, set__key='standard_users', set__value=standard_users)
return standard_users
@classmethod
def collect_statistics_sites_loaded(cls):
now = round_time(datetime.datetime.now(), round_to=60)
sites_loaded = []
avg_time_taken = []
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour+1)
pipe = r.pipeline()
for m in range(60):
minute = start_hours_ago + datetime.timedelta(minutes=m)
key = "%s:%s" % (RStats.stats_type('page_load'), minute.strftime('%s'))
pipe.get("%s:s" % key)
pipe.get("%s:a" % key)
times = pipe.execute()
counts = [int(c) for c in times[::2] if c]
avgs = [float(a) for a in times[1::2] if a]
if counts and avgs:
count = sum(counts)
avg = round(sum(avgs) / count, 3)
else:
count = 0
avg = 0
sites_loaded.append(count)
avg_time_taken.append(avg)
sites_loaded.reverse()
avg_time_taken.reverse()
values = (
('sites_loaded', json.encode(sites_loaded)),
('avg_time_taken', json.encode(avg_time_taken)),
('latest_sites_loaded', sites_loaded[-1]),
('latest_avg_time_taken', avg_time_taken[-1]),
('max_sites_loaded', max(sites_loaded)),
('max_avg_time_taken', max(1, max(avg_time_taken))),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_stories_shared(cls):
now = datetime.datetime.now()
stories_shared = []
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour)
end_hours_ago = now - datetime.timedelta(hours=hour+1)
shares = MSharedStory.objects.filter(
shared_date__lte=start_hours_ago,
shared_date__gte=end_hours_ago
).count()
stories_shared.append(shares)
stories_shared.reverse()
values = (
('stories_shared', json.encode(stories_shared)),
('latest_stories_shared', stories_shared[-1]),
('max_stories_shared', max(stories_shared)),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_for_db(cls):
lag = db_functions.mongo_max_replication_lag(settings.MONGODB)
cls.set('mongodb_replication_lag', lag)
now = round_time(datetime.datetime.now(), round_to=60)
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
db_times = {}
latest_db_times = {}
for db in ['sql', 'mongo', 'redis']:
db_times[db] = []
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour+1)
pipe = r.pipeline()
for m in range(60):
minute = start_hours_ago + datetime.timedelta(minutes=m)
key = "DB:%s:%s" % (db, minute.strftime('%s'))
pipe.get("%s:c" % key)
pipe.get("%s:t" % key)
times = pipe.execute()
counts = [int(c or 0) for c in times[::2]]
avgs = [float(a or 0) for a in times[1::2]]
if counts and avgs:
count = sum(counts)
avg = round(sum(avgs) / count, 3) if count else 0
else:
count = 0
avg = 0
if hour == 0:
latest_count = float(counts[-1]) if len(counts) else 0
latest_avg = float(avgs[-1]) if len(avgs) else 0
latest_db_times[db] = latest_avg / latest_count if latest_count else 0
db_times[db].append(avg)
db_times[db].reverse()
values = (
('avg_sql_times', json.encode(db_times['sql'])),
('avg_mongo_times', json.encode(db_times['mongo'])),
('avg_redis_times', json.encode(db_times['redis'])),
('latest_sql_avg', latest_db_times['sql']),
('latest_mongo_avg', latest_db_times['mongo']),
('latest_redis_avg', latest_db_times['redis']),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
class MFeedback(mongo.Document):
date = mongo.StringField()
summary = mongo.StringField()
subject = mongo.StringField()
url = mongo.StringField()
style = mongo.StringField()
order = mongo.IntField()
meta = {
'collection': 'feedback',
'allow_inheritance': False,
'indexes': ['style'],
'ordering': ['order'],
}
def __unicode__(self):
return "%s: (%s) %s" % (self.style, self.date, self.subject)
@classmethod
def collect_feedback(cls):
data = urllib2.urlopen('https://getsatisfaction.com/newsblur/topics.widget').read()
data = json.decode(data[1:-1])
i = 0
if len(data):
cls.objects.delete()
for feedback in data:
feedback['order'] = i
i += 1
for removal in ['about', 'less than']:
if removal in feedback['date']:
feedback['date'] = feedback['date'].replace(removal, '')
for feedback in data:
# Convert unicode to strings.
fb = dict([(str(k), v) for k, v in feedback.items()])
fb['url'] = fb['url'].replace('?utm_medium=widget&utm_source=widget_newsblur', "")
cls.objects.create(**fb)
@classmethod
def all(cls):
feedbacks = cls.objects.all()[:4]
return feedbacks
class MAnalyticsFetcher(mongo.Document):
date = mongo.DateTimeField(default=datetime.datetime.now)
feed_id = mongo.IntField()
feed_fetch = mongo.FloatField()
feed_process = mongo.FloatField()
page = mongo.FloatField()
icon = mongo.FloatField()
total = mongo.FloatField()
server = mongo.StringField()
feed_code = mongo.IntField()
meta = {
'db_alias': 'nbanalytics',
'collection': 'feed_fetches',
'allow_inheritance': False,
'indexes': ['date', 'feed_id', 'server', 'feed_code'],
'ordering': ['date'],
}
def __unicode__(self):
return "%s: %.4s+%.4s+%.4s+%.4s = %.4ss" % (self.feed_id, self.feed_fetch,
self.feed_process,
self.page,
self.icon,
self.total)
@classmethod
def add(cls, feed_id, feed_fetch, feed_process,
page, icon, total, feed_code):
server_name = settings.SERVER_NAME
if 'app' in server_name: return
if icon and page:
icon -= page
if page and feed_process:
page -= feed_process
elif page and feed_fetch:
page -= feed_fetch
if feed_process and feed_fetch:
feed_process -= feed_fetch
cls.objects.create(feed_id=feed_id, feed_fetch=feed_fetch,
feed_process=feed_process,
page=page, icon=icon, total=total,
server=server_name, feed_code=feed_code)
@classmethod
def calculate_stats(cls, stats):
return cls.aggregate(**stats)
|
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import ctypes
import platform
import re
import subprocess
import time
try:
import pywintypes # pylint: disable=F0401
import win32api # pylint: disable=F0401
import win32con # pylint: disable=F0401
import win32process # pylint: disable=F0401
except ImportError:
pywintypes = None
win32api = None
win32con = None
win32process = None
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core.platform import desktop_platform_backend
from telemetry.core.platform import platform_backend
class WinPlatformBackend(desktop_platform_backend.DesktopPlatformBackend):
# pylint: disable=W0613
def StartRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def StopRawDisplayFrameRateMeasurement(self):
raise NotImplementedError()
def GetRawDisplayFrameRateMeasurements(self):
raise NotImplementedError()
def IsThermallyThrottled(self):
raise NotImplementedError()
def HasBeenThermallyThrottled(self):
raise NotImplementedError()
def GetSystemCommitCharge(self):
performance_info = self._GetPerformanceInfo()
return performance_info.CommitTotal * performance_info.PageSize / 1024
@decorators.Cache
def GetSystemTotalPhysicalMemory(self):
performance_info = self._GetPerformanceInfo()
return performance_info.PhysicalTotal * performance_info.PageSize / 1024
def GetCpuStats(self, pid):
cpu_info = self._GetWin32ProcessInfo(win32process.GetProcessTimes, pid)
# Convert 100 nanosecond units to seconds
cpu_time = (cpu_info['UserTime'] / 1e7 +
cpu_info['KernelTime'] / 1e7)
return {'CpuProcessTime': cpu_time}
def GetCpuTimestamp(self):
"""Return current timestamp in seconds."""
return {'TotalTime': time.time()}
def GetMemoryStats(self, pid):
memory_info = self._GetWin32ProcessInfo(
win32process.GetProcessMemoryInfo, pid)
return {'VM': memory_info['PagefileUsage'],
'VMPeak': memory_info['PeakPagefileUsage'],
'WorkingSetSize': memory_info['WorkingSetSize'],
'WorkingSetSizePeak': memory_info['PeakWorkingSetSize']}
def GetIOStats(self, pid):
io_stats = self._GetWin32ProcessInfo(win32process.GetProcessIoCounters, pid)
return {'ReadOperationCount': io_stats['ReadOperationCount'],
'WriteOperationCount': io_stats['WriteOperationCount'],
'ReadTransferCount': io_stats['ReadTransferCount'],
'WriteTransferCount': io_stats['WriteTransferCount']}
def KillProcess(self, pid, kill_process_tree=False):
# os.kill for Windows is Python 2.7.
cmd = ['taskkill', '/F', '/PID', str(pid)]
if kill_process_tree:
cmd.append('/T')
subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
def GetSystemProcessInfo(self):
# [3:] To skip 2 blank lines and header.
lines = subprocess.Popen(
['wmic', 'process', 'get',
'CommandLine,CreationDate,Name,ParentProcessId,ProcessId',
'/format:csv'],
stdout=subprocess.PIPE).communicate()[0].splitlines()[3:]
process_info = []
for line in lines:
if not line:
continue
parts = line.split(',')
pi = {}
pi['ProcessId'] = int(parts[-1])
pi['ParentProcessId'] = int(parts[-2])
pi['Name'] = parts[-3]
creation_date = None
if parts[-4]:
creation_date = float(re.split('[+-]', parts[-4])[0])
pi['CreationDate'] = creation_date
pi['CommandLine'] = ','.join(parts[1:-4])
process_info.append(pi)
return process_info
def GetChildPids(self, pid):
"""Retunds a list of child pids of |pid|."""
ppid_map = collections.defaultdict(list)
creation_map = {}
for pi in self.GetSystemProcessInfo():
ppid_map[pi['ParentProcessId']].append(pi['ProcessId'])
if pi['CreationDate']:
creation_map[pi['ProcessId']] = pi['CreationDate']
def _InnerGetChildPids(pid):
if not pid or pid not in ppid_map:
return []
ret = [p for p in ppid_map[pid] if creation_map[p] >= creation_map[pid]]
for child in ret:
if child == pid:
continue
ret.extend(_InnerGetChildPids(child))
return ret
return _InnerGetChildPids(pid)
def GetCommandLine(self, pid):
for pi in self.GetSystemProcessInfo():
if pid == pi['ProcessId']:
return pi['CommandLine']
raise exceptions.ProcessGoneException()
def GetOSName(self):
return 'win'
@decorators.Cache
def GetOSVersionName(self):
os_version = platform.uname()[3]
if os_version.startswith('5.1.'):
return platform_backend.OSVersion('xp', 5.1)
if os_version.startswith('6.0.'):
return platform_backend.OSVersion('vista', 6.0)
if os_version.startswith('6.1.'):
return platform_backend.OSVersion('win7', 6.1)
if os_version.startswith('6.2.'):
return platform_backend.OSVersion('win8', 6.2)
raise NotImplementedError('Unknown win version %s.' % os_version)
def CanFlushIndividualFilesFromSystemCache(self):
return True
def GetFlushUtilityName(self):
return 'clear_system_cache.exe'
def _GetWin32ProcessInfo(self, func, pid):
mask = (win32con.PROCESS_QUERY_INFORMATION |
win32con.PROCESS_VM_READ)
handle = None
try:
handle = win32api.OpenProcess(mask, False, pid)
return func(handle)
except pywintypes.error, e:
errcode = e[0]
if errcode == 87:
raise exceptions.ProcessGoneException()
raise
finally:
if handle:
win32api.CloseHandle(handle)
def _GetPerformanceInfo(self):
class PerformanceInfo(ctypes.Structure):
"""Struct for GetPerformanceInfo() call
http://msdn.microsoft.com/en-us/library/ms683210
"""
_fields_ = [('size', ctypes.c_ulong),
('CommitTotal', ctypes.c_size_t),
('CommitLimit', ctypes.c_size_t),
('CommitPeak', ctypes.c_size_t),
('PhysicalTotal', ctypes.c_size_t),
('PhysicalAvailable', ctypes.c_size_t),
('SystemCache', ctypes.c_size_t),
('KernelTotal', ctypes.c_size_t),
('KernelPaged', ctypes.c_size_t),
('KernelNonpaged', ctypes.c_size_t),
('PageSize', ctypes.c_size_t),
('HandleCount', ctypes.c_ulong),
('ProcessCount', ctypes.c_ulong),
('ThreadCount', ctypes.c_ulong)]
def __init__(self):
self.size = ctypes.sizeof(self)
super(PerformanceInfo, self).__init__()
performance_info = PerformanceInfo()
ctypes.windll.psapi.GetPerformanceInfo(
ctypes.byref(performance_info), performance_info.size)
return performance_info
|
|
from __future__ import print_function
import platform
PY3 = False
if platform.python_version().startswith('3'):
PY3 = True
import sys
import textwrap
import re
import math
from time import sleep
from io import StringIO
from subprocess import (
check_output,
CalledProcessError)
if PY3:
get_input = input
else:
get_input = raw_input
from . import COMPACT_TITLES
from .color import colors, THEME
from .banner import bannerize
from .album import gen_art
from .api import Client
# TODO
# windows detect terminal size
# leverage Radio/Station class to simplify menu "other stations"
# aka station switch selections
#
def stream_list(streams):
exploded = []
name_re = re.compile("name=(.*),url")
desc_re = re.compile("desc=\"(.*)\",art")
for s in streams:
name_m = name_re.search(s)
desc_m = desc_re.search(s)
s_exp = {
'name': name_m.group(1),
'desc': desc_m.group(1),
'repr': s
}
exploded.append(s_exp)
return exploded
def print_streams(station, streams, stations):
(term_w, term_h) = term_wh()
line_cnt = 0
if len(streams) == 0:
print("Exiting, empty station file, delete it and rerun")
sys.exit(1)
# set up to pretty print station data
# get left column width
name_len = max([len(s['name']) for s in streams]) + 1
# the first line has the name
# each subsequent line has whitespace up to column begin mark
# print the stations
i = 0
for s in streams:
prefix = (" %2d" % i + " ) " + s['name'] +
' ' * (name_len - len(s['name'])))
(w, h) = print_blockify(
prefix, THEME['ui_names'],
s['desc'], THEME['ui_desc'])
line_cnt += h
i += 1
# TODO get rid of hard coded access to the other stations
prefix = (" %2d" % len(streams) + " ) SomaFM" +
' ' * (name_len - len("SomaFM")))
desc = ("Enter " + str(len(streams)) +
" or 's' to show SomaFM streams")
if station == "soma":
prefix = (" %2d" % len(streams) + " ) Favorites" +
' ' * (name_len - len("Favorites")))
desc = ("Enter " + str(len(streams)) +
" or 'f' to show favorite streams")
(w, h) = print_blockify(
prefix, THEME['ui_names'],
desc, THEME['ui_desc'])
line_cnt += h
return line_cnt - 1
# \033[A moves cursor up 1 line
# ' ' overwrites text
# '\b' resets cursor to start of line
# if the term is narrow enough, you need to go up multiple lines
def del_prompt(num_chars):
# determine lines to move up, there is at least 1
# bc user pressed enter to give input
# when they pressed Enter, the cursor went to beginning of the line
(term_w, term_h) = term_wh()
move_up = int(math.ceil(float(num_chars) / float(term_w)))
print("\033[A" * move_up + ' ' * num_chars + '\b' * (num_chars), end='')
def term_wh():
(w, h) = (80, 40)
try:
# TODO os agnostic tty size
# *nix get terminal/console width
outp = check_output('stty size', shell=True)
except CalledProcessError:
return False
outp.decode('ascii').strip()
try:
rows, columns = outp.split()
except ValueError:
return (w, h)
try:
w = int(columns)
h = int(rows)
except ValueError:
pass
# print("term width: %d"% width)
return (w, h)
def read_input():
try:
stream_num = get_input("\nPlease select a stream [q to quit]: ")
except SyntaxError:
return
if not stream_num:
return
stream_num = str(stream_num).strip().lower()
if len(stream_num) == 0:
return
return stream_num
def try_as_int(stream_num, station, max_val):
try:
stream_num = int(stream_num)
except ValueError:
return None
# keys[len] is the other station
if stream_num < 0 or stream_num > max_val:
return None
# the final row is not a stream, but a station change
if stream_num == max_val:
if station == 'favs':
return (None, 'soma')
# else station == 'soma'
return (None, 'favs')
return (stream_num, station)
def get_choice(station, streams):
"""Get user choice of stream to play, or station to change"""
while True:
stream_num = read_input()
if stream_num is None:
continue
ctrl_char = stream_num[0]
if ctrl_char not in ['q', 'e', 's', 'f']:
retval = try_as_int(stream_num, station, len(streams))
if retval is None:
continue
else:
return retval
if (ctrl_char == 'q' or ctrl_char == 'e'):
return (None, 'q')
if ctrl_char == 'f':
return (None, 'favs')
if ctrl_char == 's':
return (None, 'soma')
# should never be here
def ui():
# set term title
sys.stdout.write("\x1b]0;" + "~=radio tuner=~" + "\x07")
c = Client()
do_another = True
next_st = 'favs'
while do_another:
try:
next_st = ui_loop(c, next_st)
except KeyboardInterrupt:
do_another = False
if next_st == 'q':
do_another = False
# clear term title
sys.stdout.write("\x1b]0;" + "\x07")
def ui_loop(client, station='favs'):
"""list possible stations, read user input, and call player"""
# when the player is exited, this loop happens again
c = client
if station is None:
station = c.stations()[0]
deets = c.station(station)
streams = stream_list(c.streams(station))
stations = c.stations()
# streams.sort() # put in alpha order
# ######
# print stations
(term_w, term_h) = term_wh()
banner_txt = deets['ui_name'] + ' Tuner'
with colors(THEME['ui_banner']):
(banner, font) = bannerize(banner_txt, term_w)
b_IO = StringIO(banner)
b_h = len(b_IO.readlines())
print(banner)
b_h += 1
line_cnt = print_streams(station, streams, stations)
loop_line_cnt = line_cnt + b_h + 2
loop_line_cnt += 1
if term_h > loop_line_cnt:
print('\n' * (term_h - loop_line_cnt - 1))
(stream_num, station) = get_choice(station, streams)
if station == 'q':
return 'q'
# no stream given, must have been a station change, refresh list
if stream_num is None:
return station
# ######
# otherwise stream num specified, so call player
##
# get the stream name only
to_stream = streams[stream_num]
# convert the name only into more details
stream = c.stream(station, to_stream['name'])
if stream is None:
print('Error, could not get stream details')
return station
display_album(stream['art'])
display_banner(stream['name'])
# this play->pause->play loop should never accumulate lines
# in the output (except for the first Enter they press
# at a prompt and even then, it's just an empty line)
i = 0
do_another = True
while do_another:
display_info()
try:
if display_metadata(c, stream):
c.stop()
do_another = False
# TODO poll user input for q to stop
except KeyboardInterrupt:
c.pause()
# clear ctrl+c
print('\b' * 5 + ' ' * 5 + '\b' * 5, end='')
if COMPACT_TITLES:
# clear info, name, song
to_del = term_wh()[0]
for i in range(3):
print("\033[A" + ' ' * to_del + '\b' * to_del, end='')
sys.stdout.flush()
prompt = "Paused. Press enter to Resume; m for menu. "
with colors(THEME['stream_exit_confirm']):
reloop = get_input(prompt)
del_prompt(len(prompt) + len(reloop))
# any key, not just m, takes you to the menu
if len(reloop) != 0:
c.stop()
do_another = False
i += 1
# you can't use mpg123's 'pause' cmd (spacebar) bc it'll
# fail a minute or two after resuming (buffer errors)
# it literally pauses the music,
# buffering the stream until unpaused, but the
# behavior we want is to stop recving the stream
# (like turning off a radio)
return station
def display_info():
msg1 = "Playing stream, enjoy..."
msg2 = "[ctrl-c for pause/options]"
with colors(THEME['stream_name_confirm']):
if term_wh()[0] <= (len(msg1) + len(msg2)):
print(msg1)
else:
print(msg1 + ' ' + msg2)
def display_metadata(client, stream):
# to test these updates against another stream
# without conflicting audio:
# mpg123 -f 0 -C -@ <url>
c = client
station_name = stream['station']
stream_name = stream['name']
if COMPACT_TITLES:
print()
print()
# don't assume that it's not playing from another client
if not c.play(station_name, stream_name):
print('Error, already playing %s' % c.status())
# TODO ignore if playing what was requested
return False
showed_name = False
i = 0
disp_name = stream['meta_name']
# disp names of '', like DEF CON Radio will escape loop
while i < 10 and disp_name is None:
stream = c.stream(station_name, stream_name)
disp_name = stream['meta_name']
sleep(0.5)
i += 1
if disp_name is None:
disp_name = stream_name
if disp_name is not None and disp_name.strip() != '':
showed_name = True
if COMPACT_TITLES:
print("\033[A" * 2, end='')
print_blockify(
THEME['meta_prefix_str'], THEME['meta_prefix'],
disp_name, THEME['meta_stream_name'],
wrap=False)
if COMPACT_TITLES:
print()
# wait for initial song
i = 0
song_len = 0
song_name = stream['meta_song']
# song names of '', like WCPE will escape loop
while i < 10 and song_name is None:
stream = c.stream(station_name, stream_name)
song_name = stream['meta_song']
sleep(0.5)
i += 1
showed_song = False
if song_name is not None and song_name.strip() != '':
showed_song = True
if COMPACT_TITLES:
print("\033[A", end='')
if not showed_name:
print("\033[A", end='')
song_len = print_blockify(
THEME['meta_prefix_str'], THEME['meta_prefix'],
song_name, THEME['meta_song_name'],
wrap=False)[0]
if COMPACT_TITLES and not showed_name:
print()
# keep polling for song title changes
do_another = True
while do_another:
status = c.status()
song_now = status['song']
if (song_now != song_name and
song_now is not None and song_now.strip() != ''):
if COMPACT_TITLES:
if not showed_name:
print("\033[A", end='')
if not showed_song:
print("\033[A", end='')
showed_song = True
if song_len > 0:
del_prompt(song_len)
song_len = print_blockify(
THEME['meta_prefix_str'], THEME['meta_prefix'],
song_now, THEME['meta_song_name'],
wrap=False)[0]
song_name = song_now
is_playing = status['currently_streaming']
if not is_playing:
return True
sleep(1)
return True
def print_blockify(prefix='', prefix_color='endc',
blk='', blk_color='endc',
wrap=True):
# NOTE won't print only prefix without blk
if len(blk) == 0:
return (0, 0)
p_len = len(prefix)
with colors(prefix_color):
# sys.stdout.write && flush
print(prefix, end='')
(term_w, term_h) = term_wh()
lines = textwrap.wrap(blk, term_w - p_len)
max_blk_len = len(lines[0])
with colors(blk_color):
print(lines[0])
if not wrap:
return (len(prefix) + max_blk_len, 1)
# prefix only appears on 1st line, justifying remainder
prefix = ' ' * p_len
for line in lines[1:]:
if len(line) > max_blk_len:
max_blk_len = len(line)
with colors(blk_color):
print(prefix + line)
return (max_blk_len, len(lines))
def display_album(art_url):
if art_url is None or art_url == '':
return
(term_w, term_h) = term_wh()
art = gen_art(art_url, term_w, term_h)
if art is None:
return
print("ASCII Printout of Station's Logo:")
print(art)
def display_banner(stream_name):
unhappy = True
while unhappy:
(term_w, term_h) = term_wh()
font = "unknown"
with colors(THEME['stream_name_banner']):
(banner, font) = bannerize(stream_name, term_w)
b_IO = StringIO(banner)
b_height = len(b_IO.readlines())
if term_h > (b_height + 3): # Playing, Station Name, Song Title
print('\n' * (term_h - b_height - 2))
print(banner, end='')
with colors(THEME['stream_name_confirm']):
prompt = "Press enter if you like banner"
prompt += " (font: " + font + "), else any char then enter "
try:
happiness = get_input(prompt)
except SyntaxError:
happiness = ''
del_prompt(len(prompt) + len(happiness))
if len(happiness) == 0:
unhappy = False
else:
print("") # empty line for pretty factor
|
|
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common import for all tests."""
import base64
import contextlib
import json
import logging
import optparse
import os
import shlex
import shutil
import signal
import socket
import subprocess
import sys
import time
import unittest
import urllib2
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
from vtdb import vtgate_client
import environment
from mysql_flavor import mysql_flavor
from mysql_flavor import set_mysql_flavor
import MySQLdb
from protocols_flavor import protocols_flavor
from topo_flavor.server import set_topo_server_flavor
from vtctl import vtctl_client
from vtdb import keyrange_constants
from vtgate_gateway_flavor.gateway import set_vtgate_gateway_flavor
from vtgate_gateway_flavor.gateway import vtgate_gateway_flavor
from vtproto import topodata_pb2
options = None
devnull = open('/dev/null', 'w')
hostname = socket.getaddrinfo(
socket.getfqdn(), None, 0, 0, 0, socket.AI_CANONNAME)[0][3]
class TestError(Exception):
pass
class Break(Exception):
pass
environment.setup()
class LoggingStream(object):
def __init__(self):
self.line = ''
def write(self, value):
if value == '\n':
# we already printed it
self.line = ''
return
self.line += value
logging.info('===== ' + self.line)
if value.endswith('\n'):
self.line = ''
def writeln(self, value):
self.write(value)
self.line = ''
def flush(self):
pass
def add_options(parser):
environment.add_options(parser)
parser.add_option('-d', '--debug', action='store_true',
help='utils.pause() statements will wait for user input')
parser.add_option('-k', '--keep-logs', action='store_true',
help='Do not delete log files on teardown.')
parser.add_option(
'-q', '--quiet', action='store_const', const=0, dest='verbose', default=1)
parser.add_option(
'-v', '--verbose', action='store_const', const=2, dest='verbose',
default=1)
parser.add_option('--skip-build', action='store_true',
help='Do not build the go binaries when running the test.')
parser.add_option(
'--skip-teardown', action='store_true',
help='Leave the global processes running after the test is done.')
parser.add_option('--mysql-flavor')
parser.add_option('--protocols-flavor', default='grpc')
parser.add_option('--topo-server-flavor', default='zk2')
parser.add_option('--vtgate-gateway-flavor', default='discoverygateway')
def set_options(opts):
global options
options = opts
set_mysql_flavor(options.mysql_flavor)
environment.setup_protocol_flavor(options.protocols_flavor)
set_topo_server_flavor(options.topo_server_flavor)
set_vtgate_gateway_flavor(options.vtgate_gateway_flavor)
environment.skip_build = options.skip_build
# main executes the test classes contained in the passed module, or
# __main__ if empty.
def main(mod=None, test_options=None):
"""The replacement main method, which parses args and runs tests.
Args:
mod: module that contains the test methods.
test_options: a function which adds OptionParser options that are specific
to a test file.
"""
if mod is None:
mod = sys.modules['__main__']
global options
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
add_options(parser)
if test_options:
test_options(parser)
(options, args) = parser.parse_args()
set_log_level(options.verbose)
logging.basicConfig(
format='-- %(asctime)s %(module)s:%(lineno)d %(levelname)s %(message)s')
set_options(options)
run_tests(mod, args)
def run_tests(mod, args):
try:
suite = unittest.TestSuite()
if not args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromModule(mod))
else:
if args[0] == 'teardown':
mod.tearDownModule()
elif args[0] == 'setup':
mod.setUpModule()
else:
for arg in args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromName(arg, mod))
if suite.countTestCases() > 0:
logger = LoggingStream()
result = unittest.TextTestRunner(
stream=logger, verbosity=options.verbose, failfast=True).run(suite)
if not result.wasSuccessful():
sys.exit(-1)
except KeyboardInterrupt:
logging.warning('======== Tests interrupted, cleaning up ========')
mod.tearDownModule()
# If you interrupt a test, you probably want to stop evaluating the rest.
sys.exit(1)
finally:
if options.keep_logs:
logging.warning('Leaving temporary files behind (--keep-logs), please '
'clean up before next run: ' + os.environ['VTDATAROOT'])
def remove_tmp_files():
if options.keep_logs:
return
try:
shutil.rmtree(environment.tmproot)
except OSError as e:
logging.debug('remove_tmp_files: %s', str(e))
def pause(prompt):
if options.debug:
raw_input(prompt)
# sub-process management
pid_map = {}
already_killed = []
def _add_proc(proc):
pid_map[proc.pid] = proc
with open(environment.tmproot+'/test-pids', 'a') as f:
print >> f, proc.pid, os.path.basename(proc.args[0])
def required_teardown():
"""Required cleanup steps that can't be skipped with --skip-teardown."""
# We can't skip closing of gRPC connections, because the Python interpreter
# won't let us die if any connections are left open.
global vtctld_connection
if vtctld_connection:
vtctld_connection.close()
vtctld_connection = None
def kill_sub_processes():
for proc in pid_map.values():
if proc.pid and proc.returncode is None:
proc.kill()
if not os.path.exists(environment.tmproot+'/test-pids'):
return
with open(environment.tmproot+'/test-pids') as f:
for line in f:
try:
parts = line.strip().split()
pid = int(parts[0])
proc = pid_map.get(pid)
if not proc or (proc and proc.pid and proc.returncode is None):
if pid not in already_killed:
os.kill(pid, signal.SIGTERM)
except OSError as e:
logging.debug('kill_sub_processes: %s', str(e))
def kill_sub_process(proc, soft=False):
if proc is None:
return
pid = proc.pid
if soft:
proc.terminate()
else:
proc.kill()
if pid and pid in pid_map:
del pid_map[pid]
already_killed.append(pid)
# run in foreground, possibly capturing output
def run(cmd, trap_output=False, raise_on_error=True, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
if trap_output:
kargs['stdout'] = subprocess.PIPE
kargs['stderr'] = subprocess.PIPE
logging.debug(
'run: %s %s', str(cmd),
', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = subprocess.Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode:
if raise_on_error:
pause('cmd fail: %s, pausing...' % (args))
raise TestError('cmd fail:', args, proc.returncode, stdout, stderr)
else:
logging.debug('cmd fail: %s %d %s %s',
str(args), proc.returncode, stdout, stderr)
return stdout, stderr
# run sub-process, expects failure
def run_fail(cmd, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
kargs['stdout'] = subprocess.PIPE
kargs['stderr'] = subprocess.PIPE
if options.verbose == 2:
logging.debug(
'run: (expect fail) %s %s', cmd,
', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = subprocess.Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode == 0:
logging.info('stdout:\n%sstderr:\n%s', stdout, stderr)
raise TestError('expected fail:', args, stdout, stderr)
return stdout, stderr
# run a daemon - kill when this script exits
def run_bg(cmd, **kargs):
if options.verbose == 2:
logging.debug(
'run: %s %s', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
if 'extra_env' in kargs:
kargs['env'] = os.environ.copy()
if kargs['extra_env']:
kargs['env'].update(kargs['extra_env'])
del kargs['extra_env']
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
proc = subprocess.Popen(args=args, **kargs)
proc.args = args
_add_proc(proc)
return proc
def wait_procs(proc_list, raise_on_error=True):
for proc in proc_list:
pid = proc.pid
if pid:
already_killed.append(pid)
for proc in proc_list:
proc.wait()
for proc in proc_list:
if proc.returncode:
if options.verbose >= 1 and proc.returncode not in (-9,):
sys.stderr.write('proc failed: %s %s\n' % (proc.returncode, proc.args))
if raise_on_error:
raise subprocess.CalledProcessError(proc.returncode,
' '.join(proc.args))
def validate_topology(ping_tablets=False):
if ping_tablets:
run_vtctl(['Validate', '-ping-tablets'])
else:
run_vtctl(['Validate'])
def zk_ls(path):
out, _ = run(environment.binary_argstr('zk')+' ls '+path, trap_output=True)
return sorted(out.splitlines())
def zk_cat(path):
out, _ = run(environment.binary_argstr('zk')+' cat '+path, trap_output=True)
return out
def zk_cat_json(path):
data = zk_cat(path)
return json.loads(data)
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# <step>
# if <done>:
# break
# timeout = utils.wait_step('description of condition', timeout)
def wait_step(msg, timeout, sleep_time=0.1):
timeout -= sleep_time
if timeout <= 0:
raise TestError('timeout waiting for condition "%s"' % msg)
logging.debug('Sleeping for %f seconds waiting for condition "%s"',
sleep_time, msg)
time.sleep(sleep_time)
return timeout
# vars helpers
def get_vars(port):
"""Returns the dict for vars from a vtxxx process. None if not available."""
try:
url = 'http://localhost:%d/debug/vars' % int(port)
f = urllib2.urlopen(url)
data = f.read()
f.close()
except urllib2.URLError:
return None
try:
return json.loads(data)
except ValueError:
print data
raise
def wait_for_vars(name, port, var=None, key=None, value=None, timeout=10.0):
"""Waits for the vars of a process, and optional values.
Args:
name: nickname for the process.
port: process port to look at.
var: if specified, waits for var in vars.
key: if specified, waits for vars[var][key]==value.
value: if key if specified, waits for vars[var][key]==value.
timeout: how long to wait.
"""
text = 'waiting for http://localhost:%d/debug/vars of %s' % (port, name)
if var:
text += ' value %s' % var
if key:
text += ' key %s:%s' % (key, value)
while True:
display_text = text
v = get_vars(port)
if v:
if var is None:
break
if var in v:
if key is None:
break
if key in v[var]:
if v[var][key] == value:
break
else:
display_text += ' (current value:%s)' % v[var][key]
else:
display_text += ' (no current value)'
else:
display_text += ' (%s not in vars)' % var
else:
display_text += ' (no vars yet)'
timeout = wait_step(display_text, timeout)
def poll_for_vars(
name, port, condition_msg, timeout=60.0, condition_fn=None,
require_vars=False):
"""Polls for debug variables to exist or match specific conditions.
This function polls in a tight loop, with no sleeps. This is useful for
variables that are expected to be short-lived (e.g., a 'Done' state
immediately before a process exits).
Args:
name: the name of the process that we're trying to poll vars from.
port: the port number that we should poll for variables.
condition_msg: string describing the conditions that we're polling for,
used for error messaging.
timeout: number of seconds that we should attempt to poll for.
condition_fn: a function that takes the debug vars dict as input, and
returns a truthy value if it matches the success conditions.
require_vars: True iff we expect the vars to always exist. If
True, and the vars don't exist, we'll raise a TestError. This
can be used to differentiate between a timeout waiting for a
particular condition vs if the process that you're polling has
already exited.
Raises:
TestError: if the conditions aren't met within the given timeout, or
if vars are required and don't exist.
Returns:
dict of debug variables
"""
start_time = time.time()
while True:
if (time.time() - start_time) >= timeout:
raise TestError(
'Timed out polling for vars from %s; condition "%s" not met' %
(name, condition_msg))
v = get_vars(port)
if v is None:
if require_vars:
raise TestError(
'Expected vars to exist on %s, but they do not; '
'process probably exited earlier than expected.' % (name,))
continue
if condition_fn is None:
return v
elif condition_fn(v):
return v
def apply_vschema(vschema):
for k, v in vschema.iteritems():
fname = os.path.join(environment.tmproot, 'vschema.json')
with open(fname, 'w') as f:
f.write(v)
run_vtctl(['ApplyVSchema', '-vschema_file', fname, k])
def wait_for_tablet_type(tablet_alias, expected_type, timeout=10):
"""Waits for a given tablet's SlaveType to become the expected value.
Args:
tablet_alias: Alias of the tablet.
expected_type: Type of the tablet e.g. "replica".
timeout: Timeout in seconds.
Raises:
TestError: SlaveType did not become expected_type within timeout seconds.
"""
type_as_int = topodata_pb2.TabletType.Value(expected_type.upper())
while True:
if run_vtctl_json(['GetTablet', tablet_alias])['type'] == type_as_int:
logging.debug('tablet %s went to expected type: %s',
tablet_alias, expected_type)
break
timeout = wait_step(
"%s's SlaveType to be %s" % (tablet_alias, expected_type),
timeout)
def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0):
"""Waits for tablet B to catch up to the replication position of tablet A.
Args:
tablet_a: tablet Object for tablet A.
tablet_b: tablet Object for tablet B.
timeout: Timeout in seconds.
Raises:
TestError: replication position did not catch up within timeout seconds.
"""
replication_pos_a = mysql_flavor().master_position(tablet_a)
while True:
replication_pos_b = mysql_flavor().master_position(tablet_b)
if mysql_flavor().position_at_least(replication_pos_b, replication_pos_a):
break
timeout = wait_step(
"%s's replication position to catch up %s's; "
'currently at: %s, waiting to catch up to: %s' % (
tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b,
replication_pos_a),
timeout, sleep_time=0.1)
# Save the first running instance of vtgate. It is saved when 'start'
# is called, and cleared when kill is called.
vtgate = None
class VtGate(object):
"""VtGate object represents a vtgate process."""
def __init__(self, port=None, mysql_server=False):
"""Creates the Vtgate instance and reserve the ports if necessary."""
self.port = port or environment.reserve_ports(1)
if protocols_flavor().vtgate_protocol() == 'grpc':
self.grpc_port = environment.reserve_ports(1)
self.proc = None
self.mysql_port = None
if mysql_server:
self.mysql_port = environment.reserve_ports(1)
def start(self, cell='test_nj', retry_count=2,
topo_impl=None, cache_ttl='1s',
healthcheck_conn_timeout='2s',
extra_args=None, tablets=None,
tablet_types_to_wait='MASTER,REPLICA',
l2vtgates=None):
"""Start vtgate. Saves it into the global vtgate variable if not set yet."""
args = environment.binary_args('vtgate') + [
'-port', str(self.port),
'-cell', cell,
'-retry-count', str(retry_count),
'-log_dir', environment.vtlogroot,
'-srv_topo_cache_ttl', cache_ttl,
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-stderrthreshold', get_log_level(),
'-normalize_queries',
]
if l2vtgates:
args.extend([
'-gateway_implementation', 'l2vtgategateway',
'-l2vtgategateway_addrs', ','.join(l2vtgates),
])
else:
args.extend([
'-healthcheck_conn_timeout', healthcheck_conn_timeout,
'-gateway_implementation', vtgate_gateway_flavor().flavor(),
])
args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets))
if tablet_types_to_wait:
args.extend(['-tablet_types_to_wait', tablet_types_to_wait])
if protocols_flavor().vtgate_protocol() == 'grpc':
args.extend(['-grpc_port', str(self.grpc_port)])
args.extend(['-grpc_max_message_size', str(environment.grpc_max_message_size)])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if topo_impl:
args.extend(['-topo_implementation', topo_impl])
else:
args.extend(environment.topo_server().flags())
if extra_args:
args.extend(extra_args)
if self.mysql_port:
args.extend(['-mysql_server_port', str(self.mysql_port)])
self.proc = run_bg(args)
wait_for_vars('vtgate', self.port)
global vtgate
if not vtgate:
vtgate = self
def kill(self):
"""Terminates the vtgate process, and waits for it to exit.
If this process is the one saved in the global vtgate variable,
clears it.
Note if the test is using just one global vtgate process, and
starting it with the test, and killing it at the end of the test,
there is no need to call this kill() method,
utils.kill_sub_processes() will do a good enough job.
"""
if self.proc is None:
return
kill_sub_process(self.proc, soft=True)
self.proc.wait()
self.proc = None
global vtgate
if vtgate == self:
vtgate = None
def addr(self):
"""Returns the address of the vtgate process, for web access."""
return 'localhost:%d' % self.port
def rpc_endpoint(self, python=False):
"""Returns the protocol and endpoint to use for RPCs."""
if python:
protocol = protocols_flavor().vtgate_python_protocol()
else:
protocol = protocols_flavor().vtgate_protocol()
if protocol == 'grpc':
return protocol, 'localhost:%d' % self.grpc_port
return protocol, self.addr()
def get_status(self):
"""Returns the status page for this process."""
return get_status(self.port)
def get_vars(self):
"""Returns the vars for this process."""
return get_vars(self.port)
@contextlib.contextmanager
def create_connection(self):
"""Connects to vtgate and allows to create a cursor to execute queries.
This method is preferred over the two other methods ("vtclient", "execute")
to execute a query in tests.
Yields:
A vtgate connection object.
Example:
with self.vtgate.create_connection() as conn:
c = conn.cursor(keyspace=KEYSPACE, shards=[SHARD], tablet_type='master',
writable=self.writable)
c.execute('SELECT * FROM buffer WHERE id = :id', {'id': 1})
"""
protocol, endpoint = self.rpc_endpoint(python=True)
# Use a very long timeout to account for slow tests.
conn = vtgate_client.connect(protocol, endpoint, 600.0)
yield conn
conn.close()
@contextlib.contextmanager
def write_transaction(self, **kwargs):
"""Begins a write transaction and commits automatically.
Note that each transaction contextmanager will create a new connection.
Args:
**kwargs: vtgate cursor args. See vtgate_cursor.VTGateCursor.
Yields:
A writable vtgate cursor.
Example:
with utils.vtgate.write_transaction(keyspace=KEYSPACE, shards=[SHARD],
tablet_type='master') as tx:
tx.execute('INSERT INTO table1 (id, msg) VALUES (:id, :msg)',
{'id': 1, 'msg': 'msg1'})
"""
with self.create_connection() as conn:
cursor = conn.cursor(writable=True, **kwargs)
cursor.begin()
yield cursor
cursor.commit()
def vtclient(self, sql, keyspace=None, tablet_type='master',
bindvars=None, streaming=False,
verbose=False, raise_on_error=True, json_output=False):
"""Uses the vtclient binary to send a query to vtgate."""
protocol, addr = self.rpc_endpoint()
args = environment.binary_args('vtclient') + [
'-server', addr,
'-vtgate_protocol', protocol]
if json_output:
args.append('-json')
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if streaming:
args.append('-streaming')
if keyspace:
args.extend(['-target', '%s@%s' % (keyspace, tablet_type)])
else:
args.extend(['-target', '@'+tablet_type])
if verbose:
args.append('-alsologtostderr')
args.append(sql)
out, err = run(args, raise_on_error=raise_on_error, trap_output=True)
if json_output:
return json.loads(out), err
return out, err
def execute(self, sql, tablet_type='master', bindvars=None,
execute_options=None):
"""Uses 'vtctl VtGateExecute' to execute a command.
Args:
sql: the command to execute.
tablet_type: the tablet_type to use.
bindvars: a dict of bind variables.
execute_options: proto-encoded ExecuteOptions object.
Returns:
the result of running vtctl command.
"""
_, addr = self.rpc_endpoint()
args = ['VtGateExecute', '-json',
'-server', addr,
'-target', '@'+tablet_type]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if execute_options:
args.extend(['-options', execute_options])
args.append(sql)
return run_vtctl_json(args)
def execute_shards(self, sql, keyspace, shards, tablet_type='master',
bindvars=None):
"""Uses 'vtctl VtGateExecuteShards' to execute a command."""
_, addr = self.rpc_endpoint()
args = ['VtGateExecuteShards', '-json',
'-server', addr,
'-keyspace', keyspace,
'-shards', shards,
'-tablet_type', tablet_type]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
args.append(sql)
return run_vtctl_json(args)
def split_query(self, sql, keyspace, split_count, bindvars=None):
"""Uses 'vtctl VtGateSplitQuery' to cut a query up in chunks."""
_, addr = self.rpc_endpoint()
args = ['VtGateSplitQuery',
'-server', addr,
'-keyspace', keyspace,
'-split_count', str(split_count)]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
args.append(sql)
return run_vtctl_json(args)
def wait_for_endpoints(self, name, count, timeout=20.0):
"""waits until vtgate gets endpoints.
Args:
name: name of the endpoint, in the form: 'keyspace.shard.type'.
count: how many endpoints to wait for.
timeout: how long to wait.
"""
wait_for_vars('vtgate', self.port,
var=vtgate_gateway_flavor().connection_count_vars(),
key=name, value=count, timeout=timeout)
class L2VtGate(object):
"""L2VtGate object represents a l2vtgate process."""
def __init__(self, port=None):
"""Creates the L2VTGate instance and reserve the ports if necessary."""
self.port = port or environment.reserve_ports(1)
if protocols_flavor().vtgate_protocol() == 'grpc':
self.grpc_port = environment.reserve_ports(1)
self.proc = None
def start(self, cell='test_nj', retry_count=2,
topo_impl=None, cache_ttl='1s',
healthcheck_conn_timeout='2s',
extra_args=None, tablets=None,
tablet_types_to_wait='MASTER,REPLICA',
tablet_filters=None):
"""Start l2vtgate."""
args = environment.binary_args('l2vtgate') + [
'-port', str(self.port),
'-cell', cell,
'-retry-count', str(retry_count),
'-log_dir', environment.vtlogroot,
'-srv_topo_cache_ttl', cache_ttl,
'-healthcheck_conn_timeout', healthcheck_conn_timeout,
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-gateway_implementation', vtgate_gateway_flavor().flavor(),
]
args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets))
if tablet_types_to_wait:
args.extend(['-tablet_types_to_wait', tablet_types_to_wait])
if tablet_filters:
args.extend(['-tablet_filters', tablet_filters])
if protocols_flavor().vtgate_protocol() == 'grpc':
args.extend(['-grpc_port', str(self.grpc_port)])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if topo_impl:
args.extend(['-topo_implementation', topo_impl])
else:
args.extend(environment.topo_server().flags())
if extra_args:
args.extend(extra_args)
self.proc = run_bg(args)
wait_for_vars('l2vtgate', self.port)
def kill(self):
"""Terminates the l2vtgate process, and waits for it to exit.
"""
if self.proc is None:
return
kill_sub_process(self.proc, soft=True)
self.proc.wait()
self.proc = None
def addr(self):
"""Returns the address of the l2vtgate process, for web access."""
return 'localhost:%d' % self.port
def rpc_endpoint(self):
"""Returns the protocol and endpoint to use for RPCs."""
protocol = protocols_flavor().vtgate_protocol()
if protocol == 'grpc':
return protocol, 'localhost:%d' % self.grpc_port
return protocol, self.addr()
def get_status(self):
"""Returns the status page for this process."""
return get_status(self.port)
def get_vars(self):
"""Returns the vars for this process."""
return get_vars(self.port)
def wait_for_endpoints(self, name, count, timeout=20.0):
"""waits until l2vtgate gets endpoints.
Args:
name: name of the endpoint, in the form: 'keyspace.shard.type'.
count: how many endpoints to wait for.
timeout: how long to wait.
"""
wait_for_vars('l2vtgate', self.port,
var=vtgate_gateway_flavor().connection_count_vars(),
key=name, value=count, timeout=timeout)
def verify_no_endpoint(self, name):
"""verifies the l2vtgate doesn't have any enpoint of the given name.
Args:
name: name of the endpoint, in the form: 'keyspace.shard.type'.
"""
def condition(v):
return (v.get(vtgate_gateway_flavor().connection_count_vars())
.get(name, None)) is None
poll_for_vars('l2vtgate', self.port,
'no endpoint named ' + name,
timeout=5.0,
condition_fn=condition)
# vtctl helpers
# The modes are not all equivalent, and we don't really thrive for it.
# If a client needs to rely on vtctl's command line behavior, make
# sure to use mode=utils.VTCTL_VTCTL
VTCTL_AUTO = 0
VTCTL_VTCTL = 1
VTCTL_VTCTLCLIENT = 2
VTCTL_RPC = 3
def run_vtctl(clargs, auto_log=False, expect_fail=False,
mode=VTCTL_AUTO, **kwargs):
if mode == VTCTL_AUTO:
if not expect_fail and vtctld:
mode = VTCTL_RPC
else:
mode = VTCTL_VTCTL
if mode == VTCTL_VTCTL:
return run_vtctl_vtctl(clargs, auto_log=auto_log,
expect_fail=expect_fail, **kwargs)
elif mode == VTCTL_VTCTLCLIENT:
result = vtctld.vtctl_client(clargs)
return result, ''
elif mode == VTCTL_RPC:
if auto_log:
logging.debug('vtctl: %s', ' '.join(clargs))
result = vtctl_client.execute_vtctl_command(vtctld_connection, clargs,
info_to_debug=True,
action_timeout=120)
return result, ''
raise Exception('Unknown mode: %s', mode)
def run_vtctl_vtctl(clargs, auto_log=False, expect_fail=False,
**kwargs):
args = environment.binary_args('vtctl') + [
'-log_dir', environment.vtlogroot,
'-enable_queries',
]
args.extend(environment.topo_server().flags())
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-tablet_protocol', protocols_flavor().tabletconn_protocol()])
args.extend(['-throttler_client_protocol',
protocols_flavor().throttler_client_protocol()])
args.extend(['-vtgate_protocol', protocols_flavor().vtgate_protocol()])
# TODO(b/26388813): Remove the next two lines once vtctl WaitForDrain is
# integrated in the vtctl MigrateServed* commands.
args.extend(['--wait_for_drain_sleep_rdonly', '0s'])
args.extend(['--wait_for_drain_sleep_replica', '0s'])
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
if isinstance(clargs, str):
cmd = ' '.join(args) + ' ' + clargs
else:
cmd = args + clargs
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
# run_vtctl_json runs the provided vtctl command and returns the result
# parsed as json
def run_vtctl_json(clargs, auto_log=True):
stdout, _ = run_vtctl(clargs, trap_output=True, auto_log=auto_log)
return json.loads(stdout)
def get_log_level():
if options.verbose == 2:
return '0'
elif options.verbose == 1:
return '1'
else:
return '2'
def set_log_level(verbose):
level = logging.DEBUG
if verbose == 0:
level = logging.WARNING
elif verbose == 1:
level = logging.INFO
logging.getLogger().setLevel(level)
# vtworker helpers
def run_vtworker(clargs, auto_log=False, expect_fail=False, **kwargs):
"""Runs a vtworker process, returning the stdout and stderr."""
cmd, _, _ = _get_vtworker_cmd(clargs, auto_log)
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
def run_vtworker_bg(clargs, auto_log=False, **kwargs):
"""Starts a background vtworker process."""
cmd, port, rpc_port = _get_vtworker_cmd(clargs, auto_log)
return run_bg(cmd, **kwargs), port, rpc_port
def _get_vtworker_cmd(clargs, auto_log=False):
"""Assembles the command that is needed to run a vtworker.
Args:
clargs: Command line arguments passed to vtworker.
auto_log: If true, set --stderrthreshold according to the test log level.
Returns:
cmd - list of cmd arguments, can be passed to any `run`-like functions
port - int with the port number that the vtworker is running with
rpc_port - int with the port number of the RPC interface
"""
port = environment.reserve_ports(1)
rpc_port = port
args = environment.binary_args('vtworker') + [
'-log_dir', environment.vtlogroot,
'-port', str(port),
'-executefetch_retry_time', '1s',
'-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
]
args.extend(environment.topo_server().flags())
if protocols_flavor().service_map():
args.extend(['-service_map',
','.join(protocols_flavor().service_map())])
if protocols_flavor().vtworker_client_protocol() == 'grpc':
rpc_port = environment.reserve_ports(1)
args.extend(['-grpc_port', str(rpc_port)])
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
cmd = args + clargs
return cmd, port, rpc_port
# vtworker client helpers
def run_vtworker_client_bg(args, rpc_port):
"""Runs vtworkerclient to execute a command on a remote vtworker.
Args:
args: Full vtworker command.
rpc_port: Port number.
Returns:
proc: process returned by subprocess.Popen
"""
return run_bg(
environment.binary_args('vtworkerclient') + [
'-log_dir', environment.vtlogroot,
'-vtworker_client_protocol',
protocols_flavor().vtworker_client_protocol(),
'-server', 'localhost:%d' % rpc_port,
'-stderrthreshold', get_log_level(),
] + args)
def run_automation_server(auto_log=False):
"""Starts a background automation_server process.
Args:
auto_log: True to log.
Returns:
rpc_port - int with the port number of the RPC interface
"""
rpc_port = environment.reserve_ports(1)
args = environment.binary_args('automation_server') + [
'-log_dir', environment.vtlogroot,
'-port', str(rpc_port),
'-vtctl_client_protocol',
protocols_flavor().vtctl_client_protocol(),
'-vtworker_client_protocol',
protocols_flavor().vtworker_client_protocol(),
]
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
return run_bg(args), rpc_port
# mysql helpers
def mysql_query(uid, dbname, query):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
cursor.execute(query)
try:
return cursor.fetchall()
finally:
conn.close()
def mysql_write_query(uid, dbname, query):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
conn.begin()
cursor.execute(query)
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def check_db_var(uid, name, value):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid))
cursor = conn.cursor()
cursor.execute("show variables like '%s'" % name)
row = cursor.fetchone()
if row != (name, value):
raise TestError('variable not set correctly', name, row)
conn.close()
def check_db_read_only(uid):
return check_db_var(uid, 'read_only', 'ON')
def check_db_read_write(uid):
return check_db_var(uid, 'read_only', 'OFF')
def wait_db_read_only(uid):
for _ in xrange(3):
try:
check_db_read_only(uid)
return
except TestError as e:
logging.warning('wait_db_read_only: %s', str(e))
time.sleep(1.0)
raise e
def check_srv_keyspace(cell, keyspace, expected, keyspace_id_type='uint64',
sharding_column_name='keyspace_id'):
ks = run_vtctl_json(['GetSrvKeyspace', cell, keyspace])
result = ''
pmap = {}
for partition in ks['partitions']:
tablet_type = topodata_pb2.TabletType.Name(partition['served_type']).lower()
if tablet_type == 'batch':
tablet_type = 'rdonly'
r = 'Partitions(%s):' % tablet_type
for shard in partition['shard_references']:
s = ''
e = ''
if 'key_range' in shard and shard['key_range']:
if 'start' in shard['key_range']:
s = shard['key_range']['start']
s = base64.b64decode(s).encode('hex') if s else ''
if 'end' in shard['key_range']:
e = shard['key_range']['end']
e = base64.b64decode(e).encode('hex') if e else ''
r += ' %s-%s' % (s, e)
pmap[tablet_type] = r + '\n'
for tablet_type in sorted(pmap):
result += pmap[tablet_type]
logging.debug('Cell %s keyspace %s has data:\n%s', cell, keyspace, result)
if expected != result:
raise Exception(
'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%'
's\ngot:\n%s' % (
cell, keyspace, expected, result))
if sharding_column_name != ks.get('sharding_column_name'):
raise Exception('Got wrong sharding_column_name in SrvKeyspace: %s' %
str(ks))
if keyspace_id_type != keyrange_constants.PROTO3_KIT_TO_STRING[
ks.get('sharding_column_type')]:
raise Exception('Got wrong sharding_column_type in SrvKeyspace: %s' %
str(ks))
def check_shard_query_service(
testcase, shard_name, tablet_type, expected_state):
"""Checks DisableQueryService in the shard record's TabletControlMap."""
# We assume that query service should be enabled unless
# DisableQueryService is explicitly True
query_service_enabled = True
tablet_controls = run_vtctl_json(
['GetShard', shard_name]).get('tablet_controls')
if tablet_controls:
for tc in tablet_controls:
if tc['tablet_type'] == tablet_type:
if tc.get('disable_query_service', False):
query_service_enabled = False
testcase.assertEqual(
query_service_enabled,
expected_state,
'shard %s does not have the correct query service state: '
'got %s but expected %s' %
(shard_name, query_service_enabled, expected_state)
)
def check_shard_query_services(
testcase, shard_names, tablet_type, expected_state):
for shard_name in shard_names:
check_shard_query_service(
testcase, shard_name, tablet_type, expected_state)
def check_tablet_query_service(
testcase, tablet, serving, tablet_control_disabled):
"""Check that the query service is enabled or disabled on the tablet."""
tablet_vars = get_vars(tablet.port)
if serving:
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
testcase.assertEqual(
tablet_vars['TabletStateName'], expected_state,
'tablet %s (%s/%s, %s) is not in the right serving state: got %s'
' expected %s' % (tablet.tablet_alias, tablet.keyspace, tablet.shard,
tablet.tablet_type,
tablet_vars['TabletStateName'], expected_state))
status = tablet.get_status()
tc_dqs = 'Query Service disabled: TabletControl.DisableQueryService set'
if tablet_control_disabled:
testcase.assertIn(tc_dqs, status)
else:
testcase.assertNotIn(tc_dqs, status)
if tablet.tablet_type == 'rdonly':
# Run RunHealthCheck to be sure the tablet doesn't change its serving state.
run_vtctl(['RunHealthCheck', tablet.tablet_alias],
auto_log=True)
tablet_vars = get_vars(tablet.port)
testcase.assertEqual(
tablet_vars['TabletStateName'], expected_state,
'tablet %s is not in the right serving state after health check: '
'got %s expected %s' %
(tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
def check_tablet_query_services(
testcase, tablets, serving, tablet_control_disabled):
for tablet in tablets:
check_tablet_query_service(
testcase, tablet, serving, tablet_control_disabled)
def get_status(port):
return urllib2.urlopen(
'http://localhost:%d%s' % (port, environment.status_url)).read()
def curl(url, request=None, data=None, background=False, retry_timeout=0,
**kwargs):
args = [environment.curl_bin, '--silent', '--no-buffer', '--location']
if not background:
args.append('--show-error')
if request:
args.extend(['--request', request])
if data:
args.extend(['--data', data])
args.append(url)
if background:
return run_bg(args, **kwargs)
if retry_timeout > 0:
while True:
try:
return run(args, trap_output=True, **kwargs)
except TestError as e:
retry_timeout = wait_step(
'cmd: %s, error: %s' % (str(args), str(e)), retry_timeout)
return run(args, trap_output=True, **kwargs)
class VtctldError(Exception):
pass
# save the first running instance, and an RPC connection to it,
# so we can use it to run remote vtctl commands
vtctld = None
vtctld_connection = None
class Vtctld(object):
def __init__(self):
self.port = environment.reserve_ports(1)
self.schema_change_dir = os.path.join(
environment.tmproot, 'schema_change_test')
if protocols_flavor().vtctl_client_protocol() == 'grpc':
self.grpc_port = environment.reserve_ports(1)
def start(self, enable_schema_change_dir=False, extra_flags=None):
# Note the vtctld2 web dir is set to 'dist', which is populated
# when a toplevel 'make build_web' is run. This is meant to test
# the development version of the UI. The real checked-in app is in
# app/.
args = environment.binary_args('vtctld') + [
'-enable_queries',
'-cell', 'test_nj',
'-web_dir', environment.vttop + '/web/vtctld',
'-web_dir2', environment.vttop + '/web/vtctld2/dist',
'--log_dir', environment.vtlogroot,
'--port', str(self.port),
'-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-throttler_client_protocol',
protocols_flavor().throttler_client_protocol(),
'-vtgate_protocol', protocols_flavor().vtgate_protocol(),
'-workflow_manager_init',
'-workflow_manager_use_election',
'-schema_swap_delay_between_errors', '1s',
] + environment.topo_server().flags()
if extra_flags:
args += extra_flags
# TODO(b/26388813): Remove the next two lines once vtctl WaitForDrain is
# integrated in the vtctl MigrateServed* commands.
args.extend(['--wait_for_drain_sleep_rdonly', '0s'])
args.extend(['--wait_for_drain_sleep_replica', '0s'])
if enable_schema_change_dir:
args += [
'--schema_change_dir', self.schema_change_dir,
'--schema_change_controller', 'local',
'--schema_change_check_interval', '1',
]
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if protocols_flavor().vtctl_client_protocol() == 'grpc':
args.extend(['-grpc_port', str(self.grpc_port)])
stdout_fd = open(os.path.join(environment.tmproot, 'vtctld.stdout'), 'w')
stderr_fd = open(os.path.join(environment.tmproot, 'vtctld.stderr'), 'w')
self.proc = run_bg(args, stdout=stdout_fd, stderr=stderr_fd)
# wait for the process to listen to RPC
timeout = 30
while True:
v = get_vars(self.port)
if v:
break
if self.proc.poll() is not None:
raise TestError('vtctld died while starting')
timeout = wait_step('waiting for vtctld to start', timeout,
sleep_time=0.2)
# save the running instance so vtctl commands can be remote executed now
global vtctld, vtctld_connection
if not vtctld:
vtctld = self
protocol, endpoint = self.rpc_endpoint(python=True)
vtctld_connection = vtctl_client.connect(protocol, endpoint, 30)
return self.proc
def rpc_endpoint(self, python=False):
"""RPC endpoint to vtctld.
The RPC endpoint may differ from the webinterface URL e.g. because gRPC
requires a dedicated port.
Args:
python: boolean, True iff this is for access with Python (as opposed to
Go).
Returns:
protocol - string e.g. 'grpc'
endpoint - string e.g. 'localhost:15001'
"""
if python:
protocol = protocols_flavor().vtctl_python_client_protocol()
else:
protocol = protocols_flavor().vtctl_client_protocol()
rpc_port = self.port
if protocol == 'grpc':
rpc_port = self.grpc_port
return (protocol, '%s:%d' % (socket.getfqdn(), rpc_port))
def process_args(self):
return ['-vtctld_addr', 'http://localhost:%d/' % self.port]
def vtctl_client(self, args):
if options.verbose == 2:
log_level = 'INFO'
elif options.verbose == 1:
log_level = 'WARNING'
else:
log_level = 'ERROR'
protocol, endpoint = self.rpc_endpoint()
out, _ = run(
environment.binary_args('vtctlclient') +
['-vtctl_client_protocol', protocol,
'-server', endpoint,
'-stderrthreshold', log_level] + args,
trap_output=True)
return out
def uint64_to_hex(integer):
"""Returns the hex representation of an int treated as a 64-bit unsigned int.
The result is padded by zeros if necessary to fill a 16 character string.
Useful for converting keyspace ids integers.
Example:
uint64_to_hex(1) == "0000000000000001"
uint64_to_hex(0xDEADBEAF) == "00000000DEADBEEF"
uint64_to_hex(0xDEADBEAFDEADBEAFDEADBEAF) raises an out of range exception.
Args:
integer: the value to print.
Returns:
String with the hex representation.
Raises:
ValueError: if the integer is out of range.
"""
if integer > (1<<64)-1 or integer < 0:
raise ValueError('Integer out of range: %d' % integer)
return '%016X' % integer
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for print_selective_registration_header."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.tools import selective_registration_header_lib
# Note that this graph def is not valid to be loaded - its inputs are not
# assigned correctly in all cases.
GRAPH_DEF_TXT = """
node: {
name: "node_1"
op: "Reshape"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
}
node: {
name: "node_2"
op: "MatMul"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
attr: { key: "transpose_a" value: { b: false } }
attr: { key: "transpose_b" value: { b: false } }
}
node: {
name: "node_3"
op: "MatMul"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_DOUBLE } }
attr: { key: "transpose_a" value: { b: false } }
attr: { key: "transpose_b" value: { b: false } }
}
"""
GRAPH_DEF_TXT_2 = """
node: {
name: "node_4"
op: "BiasAdd"
input: [ "none", "none" ]
device: "/cpu:0"
attr: { key: "T" value: { type: DT_FLOAT } }
}
"""
class PrintOpFilegroupTest(test.TestCase):
def setUp(self):
_, self.script_name = os.path.split(sys.argv[0])
def WriteGraphFiles(self, graphs):
fnames = []
for i, graph in enumerate(graphs):
fname = os.path.join(self.get_temp_dir(), 'graph%s.pb' % i)
with gfile.GFile(fname, 'wb') as f:
f.write(graph.SerializeToString())
fnames.append(fname)
return fnames
def testGetOps(self):
default_ops = 'NoOp:NoOp,_Recv:RecvOp,_Send:SendOp'
graphs = [
text_format.Parse(d, graph_pb2.GraphDef())
for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2]
]
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
self.assertListEqual(
[
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul', 'MatMulOp<CPUDevice, double, false >'), #
('MatMul', 'MatMulOp<CPUDevice, float, false >'), #
('NoOp', 'NoOp'), #
('Reshape', 'ReshapeOp'), #
('_Recv', 'RecvOp'), #
('_Send', 'SendOp'), #
],
ops_and_kernels)
graphs[0].node[0].ClearField('device')
graphs[0].node[2].ClearField('device')
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
self.assertListEqual(
[
('BiasAdd', 'BiasOp<CPUDevice, float>'), #
('MatMul', 'MatMulOp<CPUDevice, double, false >'), #
('MatMul', 'MatMulOp<CPUDevice, float, false >'), #
('NoOp', 'NoOp'), #
('Reshape', 'ReshapeOp'), #
('_Recv', 'RecvOp'), #
('_Send', 'SendOp'), #
],
ops_and_kernels)
def testAll(self):
default_ops = 'all'
graphs = [
text_format.Parse(d, graph_pb2.GraphDef())
for d in [GRAPH_DEF_TXT, GRAPH_DEF_TXT_2]
]
ops_and_kernels = selective_registration_header_lib.get_ops_and_kernels(
'rawproto', self.WriteGraphFiles(graphs), default_ops)
header = selective_registration_header_lib.get_header_from_ops_and_kernels(
ops_and_kernels, include_all_ops_and_kernels=True)
self.assertListEqual(
[
'// This file was autogenerated by %s' % self.script_name,
'#ifndef OPS_TO_REGISTER', #
'#define OPS_TO_REGISTER', #
'#define SHOULD_REGISTER_OP(op) true', #
'#define SHOULD_REGISTER_OP_KERNEL(clz) true', #
'#define SHOULD_REGISTER_OP_GRADIENT true', #
'#endif'
],
header.split('\n'))
self.assertListEqual(
header.split('\n'),
selective_registration_header_lib.get_header(
self.WriteGraphFiles(graphs), 'rawproto', default_ops).split('\n'))
def testGetSelectiveHeader(self):
default_ops = ''
graphs = [text_format.Parse(GRAPH_DEF_TXT_2, graph_pb2.GraphDef())]
expected = '''// This file was autogenerated by %s
#ifndef OPS_TO_REGISTER
#define OPS_TO_REGISTER
namespace {
constexpr const char* skip(const char* x) {
return (*x) ? (*x == ' ' ? skip(x + 1) : x) : x;
}
constexpr bool isequal(const char* x, const char* y) {
return (*skip(x) && *skip(y))
? (*skip(x) == *skip(y) && isequal(skip(x) + 1, skip(y) + 1))
: (!*skip(x) && !*skip(y));
}
template<int N>
struct find_in {
static constexpr bool f(const char* x, const char* const y[N]) {
return isequal(x, y[0]) || find_in<N - 1>::f(x, y + 1);
}
};
template<>
struct find_in<0> {
static constexpr bool f(const char* x, const char* const y[]) {
return false;
}
};
} // end namespace
constexpr const char* kNecessaryOpKernelClasses[] = {
"BiasOp<CPUDevice, float>",
};
#define SHOULD_REGISTER_OP_KERNEL(clz) (find_in<sizeof(kNecessaryOpKernelClasses) / sizeof(*kNecessaryOpKernelClasses)>::f(clz, kNecessaryOpKernelClasses))
constexpr inline bool ShouldRegisterOp(const char op[]) {
return false
|| isequal(op, "BiasAdd")
;
}
#define SHOULD_REGISTER_OP(op) ShouldRegisterOp(op)
#define SHOULD_REGISTER_OP_GRADIENT false
#endif''' % self.script_name
header = selective_registration_header_lib.get_header(
self.WriteGraphFiles(graphs), 'rawproto', default_ops)
print(header)
self.assertListEqual(expected.split('\n'), header.split('\n'))
if __name__ == '__main__':
test.main()
|
|
import asyncio
import json
import logging
import time
import collections
import base64
import binascii
import pathlib
import inspect
import motor.motor_asyncio
import itsdangerous
from aiohttp import web
from itsdangerous import TimestampSigner
import litecord.api as api
import litecord.managers as managers
from .enums import OP
from .utils import random_digits, _err, get_random_salt, \
pwd_hash, get, delete, maybe_coroutine, random_sid
from .voice.server import VoiceManager
from .objects import User
from .err import ConfigError, RequestCheckError
from .ratelimits import WSBucket, GatewayRatelimitModes
from .gateway import MAX_TRIES
log = logging.getLogger(__name__)
BOILERPLATES = {
'user': 'boilerplate_data/users.json',
'guild': 'boilerplate_data/guilds.json',
'channel': 'boilerplate_data/channels.json',
'role': 'boilerplate_data/roles.json',
}
API_PREFIXES = [
'/api',
'/api/v6',
'/api/v7'
]
def check_configuration(flags):
required_fields = [
'server', 'ratelimits', 'images',
'boilerplate.update', 'mongo_name',
'ssl',
]
for field in required_fields:
if field not in flags:
raise ConfigError(f"Field {field!r} not found in configuration")
def empty_ev_cache():
"""Return an empty event cache."""
return {
'shard_id': 0,
'shard_count': 0,
'properties': None,
'sent_seq': 0,
'recv_seq': 0,
'events': {},
}
class LitecordServer:
"""Main class for the Litecord server.
Arguments
---------
flags : dict
Server configuration flags.
loop : event loop
Event loop used for ``asyncio``.
Attributes
----------
flags: dict
Server configuration.
loop: event loop
asyncio event loop.
accept_clients: bool
If the server accepts new clients through REST or the Gateway.
endpoints: int
Amount of declared endpoints on the server
(:meth:`Litecord.compliance` fills it)
good: `asyncio.Event`
Set when the server has a "good" cache, if it is filled
with all the information from the collections it needs.
ssl_cxt: `ssl.SSLContext`
SSL context instance to make https and wss work.
mongo_client: `AsyncIOMotorClient`_
MongoDB Client.
event_cache: dict
Relates user IDs to the last events they received. Used for resuming.
users: list[:class:`User`]
Cache of user objects.
raw_users: dict
Cache of raw user objects.
atomic_markers: dict
Relates session IDs to bools representing
if that session comes from Atomic Discord.
sessions: dict
Relates session IDs to their respective :class:`Connection` object.
connections: dict
Relates user IDs to a list of :class:`Connection` objects tied to them.
images: :class:`Images`
Image manager instance.
guild_man: :class:`GuildManager`
Guild manager instance.
presence: :class:`PresenceManager`
Presence manager instance.
embed: :class:`EmbedManager`
Embed manager instance.
voice: :class:`VoiceManager`
Voice manager instance.
settings: :class:`SettingsManager`
Settings manager instance.
relations: :class:`RelationsManager`
Relationship manager instance.
apps: :class:`ApplicationManager`
Application manager instance.
request_counter: `collections.defaultdict(dict)`
Manages request counts for all identified connections.
buckets: dict
Ratelimit bucket objects.
"""
def __init__(self, flags=None, loop=None):
if flags is None:
flags = {}
self.flags = flags
check_configuration(flags)
self.accept_clients = True
self.endpoints = 0
self.endpoint_objs = []
self.good = asyncio.Event()
self.ssl_cxt = None
self.rest_ratelimits = {}
self.ws_ratelimits = {}
# if anybody needs
self.loop = loop
if loop is None:
self.loop = asyncio.get_event_loop()
# mongodb stuff
self.mongo_client = motor.motor_asyncio.AsyncIOMotorClient()
self.litecord_db = self.mongo_client[self.flags.get('mongo_name',
'litecord')]
# jesus christ the amount of collections
self.message_coll = self.litecord_db['messages']
self.user_coll = self.litecord_db['users']
self.guild_coll = self.litecord_db['gulids']
self.channel_coll = self.litecord_db['channels']
self.role_coll = self.litecord_db['roles']
self.invite_coll = self.litecord_db['invites']
self.member_coll = self.litecord_db['members']
self.presence_coll = self.litecord_db['presences']
self.settings_coll = self.litecord_db['settings']
self.relations_coll = self.litecord_db['relations']
self.app_coll = self.litecord_db['applications']
self.webhook_coll = self.litecord_db['webhooks']
self.users = []
self.raw_users = {}
self.atomic_markers = {}
self.states = []
self.request_counter = collections.defaultdict(dict)
self.connections = collections.defaultdict(list)
self.app = None
default = [120, 60]
rtl_config = flags['ratelimits']
global_req, global_sec = rtl_config.get('global_ws', default)
close = GatewayRatelimitModes.CLOSE
ignore = GatewayRatelimitModes.IGNORE_PACKET
self.buckets = {
'all': WSBucket('all', requests=global_req, seconds=global_sec,
mode=close),
'presence_updates': WSBucket('presence_updates', requests=5,
seconds=60, mode=ignore),
'identify': WSBucket('identify', requests=1, seconds=5, mode=close)
}
def get_state(self, session_id):
"""Get a :class:`ConnectionState` object."""
return get(self.states, session_id=session_id)
def gen_ssid(self) -> str:
"""Generate a new Session ID for a :class:`ConnectionState`.
Returns
-------
str
On success.
:py:meth:`None`
On failure.
"""
for i in range(MAX_TRIES):
possible = random_sid()
state = self.get_state(possible)
if state is not None:
continue
return possible
return None
def add_connection(self, user_id: int, conn):
"""Add a connection and tie it to a user.
Parameters
----------
user_id: int
The user that is going to have this connection referred to.
conn: :class:`Connection`
Connection object.
"""
user_id = int(user_id)
state = conn.state
log.debug('Linking %r to uid=%d', state, user_id)
if state.sharded:
log.debug('Linking a shard (%d).', state.shard_id)
self.connections[user_id].append(conn)
self.states.append(state)
def remove_state(self, state):
if state is None:
return
return delete(self.states, session_id=state.session_id)
def remove_connection(self, session_id: str):
"""Remove a connection from the connection table.
Parameters
----------
session_id: str
Session ID of the connection to be removed.
"""
session_id = str(session_id)
try:
self.request_counter.pop(session_id)
except KeyError:
pass
try:
state = delete(self.states, session_id=session_id)
except KeyError:
return
try:
user_id = state.user.id
except AttributeError:
return
log.debug('Unlinking %r from uid=%d', state, user_id)
ref = list(self.connections[user_id])
for i, conn in enumerate(ref):
if conn.state.session_id == session_id:
del ref[i]
break
def get_connections(self, user_id: int):
"""Yield all connections that are connected to a user."""
for conn in self.connections[user_id]:
yield conn
def count_connections(self, user_id: int):
"""Return the amount of connections connected to a user."""
return len(self.connections[user_id])
def get_shards(self, user_id: int) -> dict:
"""Get all shards for a user
Returns
-------
dict
Relating Shard IDs to :class:`Connection` objects.
"""
shards = {}
for conn in self.get_connections(user_id):
if conn.sharded:
shards[conn.shard_id] = conn
return shards
async def fill_boilerplate(self, key, data, b_flags):
coll = getattr(self, f'{key}_coll')
tot = 0
k_field = f'{key}_id'
for element in data:
query = {k_field: int(element[k_field])}
existing = await coll.find_one(query)
if (existing is not None) and not (b_flags.get(key)):
continue
for k in element:
# assume list of str -> list of int
if 'ids' in k:
element[k] = [int(v) for v in element[k]]
# assume str -> int
elif 'id' in k:
element[k] = int(element[k])
await coll.replace_one(query, element, True)
tot += 1
log.info(f"[boilerplate] Replaced {tot} elements in {key!r}")
async def boilerplate_init(self):
"""Load boilerplate data.
If the ``boilerplate.update`` config flag is set to ``True`` for each
field, this function overwrites the boilerplate data with the
current data, ignores if set to ``False``.
"""
b_flags = self.flags.get('boilerplate.update')
for key, path in BOILERPLATES.items():
data = None
with open(path, 'r') as f:
try:
data = json.load(f)
except Exception:
log.warning('[boilerplate] No boilerplate data '
f'found for field: {key!r}')
await self.fill_boilerplate(key, data, b_flags)
async def load_users(self):
"""Load the user collection into the server's cache.
While loading, it can generate a salt and hash for the password
if the data is not provided.
"""
self.users = []
self.raw_users = {}
cur = self.user_coll.find()
count = 0
async for raw_user in cur:
raw_user.pop('_id')
uid = raw_user['user_id']
password = raw_user['password']
if len(password['salt']) < 1:
password['salt'] = await get_random_salt()
# generate password if good
if len(password['hash']) < 1:
password['hash'] = pwd_hash(password['plain'],
password['salt'])
# we are trying to be secure here ok
password.pop('plain')
query = {'user_id': uid}
await self.user_coll.update_one(query, {'$set':
{'password': password}})
# add to cache
self.users.append(User(self, raw_user))
self.raw_users[uid] = raw_user
count += 1
log.info('Loaded %d users', count)
async def reload_user(self, user):
"""Update the user cache with an existing user object."""
raw_user = await self.user_coll.find_one({'user_id': user.id})
raw_user.pop('_id')
if raw_user is None:
# non-existing
try:
self.users.remove(user)
except ValueError:
pass
try:
self.raw_users.pop(user.id)
except KeyError:
pass
del user
return
user._raw.update(raw_user)
user._update(user._raw)
return user
async def insert_user(self, raw_user):
uid = raw_user['user_id']
old = await self.user_coll.find_one({'user_id': uid})
if old is not None:
log.warning('Inserting an existing user, ignoring')
return
await self.user_coll.insert_one(raw_user)
self.raw_users[uid] = raw_user
self.users.append(User(self, raw_user))
return True
def get_raw_user(self, user_id):
"""Get a raw user object using the user's ID."""
user_id = int(user_id)
u = self.raw_users.get(user_id)
if u is None:
return
# no one should use the _id field tbh
try:
u.pop('_id')
except KeyError:
pass
try:
keys = u.keys()
except AttributeError:
keys = None
log.debug('[get:raw_user] %d -> %r', user_id, keys)
return u
def get_user(self, user_id: int):
"""Get a :class:`User` object using the user's ID."""
if not user_id:
return None
u = get(self.users, id=user_id)
log.debug('[get:user] %r -> %r', user_id, u)
return u
async def get_raw_user_email(self, email):
"""Get a raw user object from a user's email."""
raw_user = await self.user_coll.find_one({'email': email})
try:
keys = raw_user.keys()
except AttributeError:
keys = None
log.debug('[get:raw_user:email] %r -> %r', email, keys)
return raw_user
async def _user(self, token):
"""Get a user object from its token.
This is a helper function to save lines of code in endpoint objects.
"""
# TODO: delet this
user_id = await self.token_find(token)
return self.get_user(user_id)
async def generate_token(self, user_id: str):
"""Generate a very random token tied to an user.
Parameters
----------
userid: str
User ID tied to that token
"""
user_id = str(user_id)
userid_encoded = base64.urlsafe_b64encode(user_id.encode())
raw_user = self.get_raw_user(user_id)
if raw_user is None:
raise Exception('User not found to generate a token from')
try:
pwd_hash = raw_user['password']['hash']
except:
log.debug(raw_user)
raise Exception('Raw user is not a good one')
s = TimestampSigner(pwd_hash)
return s.sign(userid_encoded).decode()
async def token_find(self, token: str) -> int:
"""Return a user ID from a token.
Parses the token to get the user ID and then unsigns it
using the user's hashed password as a secret key
"""
userid_encoded = token.split('.')[0]
try:
userid = int(base64.urlsafe_b64decode(userid_encoded))
except (binascii.Error, ValueError):
return None
raw_user = self.get_raw_user(userid)
if raw_user is None:
return
s = TimestampSigner(raw_user['password']['hash'])
try:
s.unsign(token)
except itsdangerous.BadSignature:
return
return userid
async def check(self) -> dict:
"""Returns a dictionary with self-checking data.
Used to determine the state of the server with:
- Mongo ping
"""
report = {
'good': True
}
t1 = time.monotonic()
await self.mongo_client.admin.command({'ping': 1})
t2 = time.monotonic()
mongo_ping_msec = round((t2 - t1) * 1000, 4)
report['mongo_ping'] = mongo_ping_msec
# dude the mongodb is local 10ms would be alarming
if mongo_ping_msec > 10:
report['good'] = False
return report
def get_gateway_url(self):
ws = self.flags['server']['ws']
proto = 'ws'
if self.ssl_cxt is not None:
proto = 'wss'
url = f'{proto}://{ws[2] if len(ws) == 3 else ws[0]}:{ws[1]}'
log.debug('Giving gateway URL: %r', url)
return url
async def check_request(self, request) -> 'tuple':
"""Checks a request to the API.
This function checks if a request has the required headers
to do any authenticated request to Litecord's API.
More information at:
https://discordapp.com/developers/docs/reference#authentication
NOTE: This function doesn't support OAuth2 Bearer tokens.
Returns
-------
tuple
With the token value and the user ID that
the token references.
Raises
------
:class:`RequestCheckError`
On any error with the request data
"""
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise RequestCheckError(_err('No header provided',
status_code=401))
if len(auth_header) < 1:
raise RequestCheckError(_err('Malformed header',
status_code=401))
try:
token_type, token_value = auth_header.split()
except ValueError:
token_type = 'Bot'
token_value = auth_header
if token_type != 'Bot':
raise RequestCheckError(_err('Invalid token type',
status_code=401))
try:
user_id = await self.token_find(token_value)
except itsdangerous.BadSignature:
raise RequestCheckError(_err(f'Invalid token',
status_code=401))
return token_value, user_id
async def get_discrim(self, username: str) -> str:
"""Generate a discriminator from a username."""
cursor = self.user_coll.find({
'username': username
})
raw_user_list = await cursor.to_list(length=None)
used_discrims = [raw_user['discriminator']
for raw_user in raw_user_list]
# only 9500 discrims per user
# because I want to.
if len(used_discrims) >= 9500:
return None
discrim = str(await random_digits(4))
while True:
try:
used_discrims.index(discrim)
discrim = str(await random_digits(4))
except ValueError:
log.info('[get:discrim] Generated discrim '
f'{discrim!r} for {username!r}')
return discrim
async def make_counts(self) -> dict:
"""Return a dictionary with some counts about the server."""
return {
'user_count': len(self.cache['id->raw_user']),
'guild_count': len(self.guild_man.guilds),
'channel_count': len(self.guild_man.channels),
'presence_count': await self.presence.count_all(),
}
def make_options_handler(self, method):
"""Returns a handler for `OPTIONS`."""
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'GET, POST, OPTIONS, HEAD, PATCH, DELETE, PUT, TRACE',
'Access-Control-Allow-Credentials': 'true',
'Access-Control-Allow-Headers': 'Authorization, Content-Type, X-Super-Properties, ',
}
async def options_handler(request):
headers['Access-Control-Allow-Origin'] = request.headers['Origin']
return web.Response(status=200, body='', headers=headers)
return options_handler
def add_empty(self, route, method):
self.app.router.add_route('OPTIONS', route,
self.make_options_handler(method))
def fix_fucking_cors(self, handler):
async def inner_handler(request):
response = await handler(request)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
return inner_handler
def add_get(self, route_path, handler):
_r = self.app.router
route_handler = self.fix_fucking_cors(handler)
self.endpoints += 1
routes = [f'{prefix}/{route_path}' for prefix in API_PREFIXES]
for route in routes:
_r.add_get(route, route_handler)
self.add_empty(route, 'GET')
def add_post(self, route_path, handler):
_r = self.app.router
route_handler = self.fix_fucking_cors(handler)
self.endpoints += 1
routes = [f'{prefix}/{route_path}' for prefix in API_PREFIXES]
for route in routes:
_r.add_post(route, route_handler)
self.add_empty(route, 'POST')
def add_put(self, route_path, handler):
_r = self.app.router
route_handler = self.fix_fucking_cors(handler)
self.endpoints += 1
routes = [f'{prefix}/{route_path}' for prefix in API_PREFIXES]
for route in routes:
_r.add_put(route, route_handler)
self.add_empty(route, 'PUT')
def add_patch(self, route_path, handler):
_r = self.app.router
route_handler = self.fix_fucking_cors(handler)
self.endpoints += 1
routes = [f'{prefix}/{route_path}' for prefix in API_PREFIXES]
for route in routes:
_r.add_patch(route, route_handler)
self.add_empty(route, 'PATCH')
def add_delete(self, route_path, handler):
_r = self.app.router
route_handler = self.fix_fucking_cors(handler)
self.endpoints += 1
routes = [f'{prefix}/{route_path}' for prefix in API_PREFIXES]
for route in routes:
_r.add_delete(route, route_handler)
self.add_empty(route, 'DELETE')
def compliance(self):
"""Measure compliance with the Server's routes"""
methods = ('DELETE', 'GET', 'PATCH', 'POST', 'PUT', 'PUT/PATCH')
endpoints = []
scopes = collections.Counter()
found_scopes = collections.Counter()
raw = (pathlib.Path(__file__).resolve().parents[0] /
'discord-endpoints.txt').read_text()
for line in raw.split('\n'):
for method_find in methods:
method = line.find(method_find)
if method == -1:
continue
name = line[:method].strip()
endpoint = line[method+len(method_find):].strip()
endpoints.append((name, method_find, endpoint))
scope = endpoint.split('/')[1]
scopes[scope] += 1
routes = self.app.router.routes()
routes = list(routes)
found = []
for _, epoint_method, epoint in endpoints:
_flag = False
for route in routes:
if _flag:
continue
if route.method != epoint_method:
continue
r = route.resource
ri = r.get_info()
if 'formatter' not in ri:
continue
epoint = epoint.replace('.', '_')
rf = ri['formatter']
rf = rf.replace('/api', '')
scope = rf.split('/')[1]
if epoint == rf:
found_scopes[scope] += 1
found.append(rf)
_flag = True
not_found = set([t[2] for t in endpoints]) ^ set(found)
names_notfound = [(ep[0], ep[2])
for ep in endpoints if ep[2] in not_found]
print('Endpoints not found:')
for ep_name, ep_route in names_notfound:
print(f'\t - {ep_name!r}, {ep_route!r}')
for scope, count in scopes.most_common():
found_count = found_scopes[scope]
log.info('scope %s: %d total, %d found',
scope, count, found_count)
total = len(endpoints)
found_count = len(found)
log.info('From %d listed endpoints, %d total, %d found, '
'%.2f%% compliant',
total, self.endpoints, found_count,
(found_count / total) * 100)
return
async def load_manager(self, manager):
classname, attribute = manager
manager_class = getattr(managers, f'{classname}Manager', None)
if manager_class is None:
try:
manager_class = getattr(managers, classname)
except AttributeError:
raise RuntimeError('Manager not found')
log.info('[init] %s', classname)
manager = manager_class(self)
load_hook = getattr(manager, '_load', None)
setattr(self, attribute, manager)
if load_hook is None:
log.warning('[load_manager] load hook not found')
else:
await maybe_coroutine(load_hook())
async def load_managers(self):
_managers = [
('Images', 'images'),
('Guild', 'guild_man'),
('Presence', 'presence'),
('Embed', 'embed'),
('Voice', 'voice'),
('Settings', 'settings'),
('Relations', 'relations'),
('Application', 'apps'),
]
loaded = 0
for manager in _managers:
if manager[0] == 'Voice':
# VoiceManager is in the voice scope, not in managers
self.voice = VoiceManager(self)
else:
await self.load_manager(manager)
loaded += 1
log.info('Loaded %d managers', loaded)
def load_endpoints(self):
"""Load all endpoint objects into
:attr:`LitecordServer.endpoint_objs`"""
loaded = 0
for attr in dir(api):
if 'ndpoint' not in attr:
continue
class_ = getattr(api, attr)
if inspect.isclass(class_):
inst = class_(self)
self.endpoint_objs.append(inst)
log.debug('[api] loaded %s', attr)
loaded += 1
log.info('[load_endpoints] Loaded %d endpoint objects', loaded)
async def init(self, app):
"""Initialize the server.
Loads databases, managers and endpoint objects.
"""
t_init = time.monotonic()
self.app = app
log.debug("[load] boilerplate data")
await self.boilerplate_init()
log.debug('[load] user cache')
await self.load_users()
await self.load_managers()
self.load_endpoints()
t_end = time.monotonic()
delta = (t_end - t_init) * 1000
log.info('[load:server] Loaded in %.2fms', delta)
self.good.set()
async def shutdown_conn(self, conn):
"""Shutdown a connection.
Sends an OP 7 Reconnect packet and waits 3 seconds so that the
connection is closed client-side, if the client doesn't close
in time, the server closes it.
"""
await conn.send_op(OP.RECONNECT)
await asyncio.sleep(2)
if conn.ws.open:
await conn.ws.close(4000, 'Shutdown procedure')
def shutdown(self):
"""Send a reconnect packet to all available connections,
and make the gateway stop receiving new ones.
Closes the event loop.
"""
self.accept_clients = False
loop = self.loop
reconnect_tasks = []
sent = 0
for state in self.states:
conn = state.conn
reconnect_tasks.append(loop.create_task(self.shutdown_conn(conn)))
sent += 1
rtasks_gathered = asyncio.gather(*reconnect_tasks, loop=loop)
log.info('[shutdown] Sending op 7 reconnect to %d connections', sent)
# finish sending RECONNECT to everyone, plz.
loop.run_until_complete(rtasks_gathered)
pending = asyncio.Task.all_tasks(loop=loop)
gathered = asyncio.gather(*pending, loop=loop)
try:
gathered.cancel()
loop.run_until_complete(gathered)
gathered.exception()
except Exception:
pass
loop.close()
|
|
import frappe, urllib
from frappe import _
from urlparse import parse_qs, urlparse
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from oauthlib.oauth2.rfc6749.grant_types import AuthorizationCodeGrant, ImplicitGrant, ResourceOwnerPasswordCredentialsGrant, ClientCredentialsGrant, RefreshTokenGrant, OpenIDConnectAuthCode
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2.rfc6749.endpoints.authorization import AuthorizationEndpoint
from oauthlib.oauth2.rfc6749.endpoints.token import TokenEndpoint
from oauthlib.oauth2.rfc6749.endpoints.resource import ResourceEndpoint
from oauthlib.oauth2.rfc6749.endpoints.revocation import RevocationEndpoint
from oauthlib.common import Request
def get_url_delimiter(separator_character=" "):
return separator_character
class WebApplicationServer(AuthorizationEndpoint, TokenEndpoint, ResourceEndpoint,
RevocationEndpoint):
"""An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a new web application server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
implicit_grant = ImplicitGrant(request_validator)
auth_grant = AuthorizationCodeGrant(request_validator)
refresh_grant = RefreshTokenGrant(request_validator)
openid_connect_auth = OpenIDConnectAuthCode(request_validator)
bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={
'code': auth_grant,
'code+token': openid_connect_auth,
'code+id_token': openid_connect_auth,
'code+token+id_token': openid_connect_auth,
'code token': openid_connect_auth,
'code id_token': openid_connect_auth,
'code token id_token': openid_connect_auth,
'token': implicit_grant
},
default_token_type=bearer)
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={
'authorization_code': auth_grant,
'refresh_token': refresh_grant,
},
default_token_type=bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': bearer})
RevocationEndpoint.__init__(self, request_validator)
class OAuthWebRequestValidator(RequestValidator):
# Pre- and post-authorization.
def validate_client_id(self, client_id, request, *args, **kwargs):
# Simple validity check, does client exist? Not banned?
cli_id = frappe.db.get_value("OAuth Client",{ "name":client_id })
if cli_id:
request.client = frappe.get_doc("OAuth Client", client_id).as_dict()
return True
else:
return False
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
# Is the client allowed to use the supplied redirect_uri? i.e. has
# the client previously registered this EXACT redirect uri.
redirect_uris = frappe.db.get_value("OAuth Client", client_id, 'redirect_uris').split(get_url_delimiter())
if redirect_uri in redirect_uris:
return True
else:
return False
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# The redirect used if none has been supplied.
# Prefer your clients to pre register a redirect uri rather than
# supplying one on each authorization request.
redirect_uri = frappe.db.get_value("OAuth Client", client_id, 'default_redirect_uri')
return redirect_uri
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
# Is the client allowed to access the requested scopes?
client_scopes = frappe.db.get_value("OAuth Client", client_id, 'scopes').split(get_url_delimiter())
are_scopes_valid = True
for scp in scopes:
are_scopes_valid = are_scopes_valid and True if scp in client_scopes else False
return are_scopes_valid
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
scopes = frappe.db.get_value("OAuth Client", client_id, 'scopes').split(get_url_delimiter())
request.scopes = scopes #Apparently this is possible.
return scopes
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of response type, the
# one associated with their one allowed grant type.
# In this case it must be "code".
allowed_response_types = [client.response_type.lower(),
"code token", "code id_token", "code token id_token",
"code+token", "code+id_token", "code+token id_token"]
return (response_type in allowed_response_types)
# Post-authorization
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
cookie_dict = get_cookie_dict_from_headers(request)
oac = frappe.new_doc('OAuth Authorization Code')
oac.scopes = get_url_delimiter().join(request.scopes)
oac.redirect_uri_bound_to_authorization_code = request.redirect_uri
oac.client = client_id
oac.user = urllib.unquote(cookie_dict['user_id'])
oac.authorization_code = code['code']
oac.save(ignore_permissions=True)
frappe.db.commit()
def authenticate_client(self, request, *args, **kwargs):
cookie_dict = get_cookie_dict_from_headers(request)
#Get ClientID in URL
if request.client_id:
oc = frappe.get_doc("OAuth Client", request.client_id)
else:
#Extract token, instantiate OAuth Bearer Token and use clientid from there.
if frappe.form_dict.has_key("refresh_token"):
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", {"refresh_token": frappe.form_dict["refresh_token"]}, 'client'))
elif frappe.form_dict.has_key("token"):
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", frappe.form_dict["token"], 'client'))
else:
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", frappe.get_request_header("Authorization").split(" ")[1], 'client'))
try:
request.client = request.client or oc.as_dict()
except Exception, e:
print "Failed body authentication: Application %s does not exist".format(cid=request.client_id)
return frappe.session.user == urllib.unquote(cookie_dict.get('user_id', "Guest"))
def authenticate_client_id(self, client_id, request, *args, **kwargs):
cli_id = frappe.db.get_value('OAuth Client', client_id, 'name')
if not cli_id:
# Don't allow public (non-authenticated) clients
return False
else:
request["client"] = frappe.get_doc("OAuth Client", cli_id)
return True
def validate_code(self, client_id, code, client, request, *args, **kwargs):
# Validate the code belongs to the client. Add associated scopes,
# state and user to request.scopes and request.user.
validcodes = frappe.get_all("OAuth Authorization Code", filters={"client": client_id, "validity": "Valid"})
checkcodes = []
for vcode in validcodes:
checkcodes.append(vcode["name"])
if code in checkcodes:
request.scopes = frappe.db.get_value("OAuth Authorization Code", code, 'scopes').split(get_url_delimiter())
request.user = frappe.db.get_value("OAuth Authorization Code", code, 'user')
return True
else:
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
saved_redirect_uri = frappe.db.get_value('OAuth Client', client_id, 'default_redirect_uri')
return saved_redirect_uri == redirect_uri
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of grant.
# In this case, it must be "authorization_code" or "refresh_token"
return (grant_type in ["authorization_code", "refresh_token"])
def save_bearer_token(self, token, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.user and
# request.client. The two former will be set when you validate
# the authorization code. Don't forget to save both the
# access_token and the refresh_token and set expiration for the
# access_token to now + expires_in seconds.
otoken = frappe.new_doc("OAuth Bearer Token")
otoken.client = request.client['name']
otoken.user = request.user if request.user else frappe.db.get_value("OAuth Bearer Token", {"refresh_token":request.body.get("refresh_token")}, "user")
otoken.scopes = get_url_delimiter().join(request.scopes)
otoken.access_token = token['access_token']
otoken.refresh_token = token.get('refresh_token')
otoken.expires_in = token['expires_in']
otoken.save(ignore_permissions=True)
frappe.db.commit()
default_redirect_uri = frappe.db.get_value("OAuth Client", request.client['name'], "default_redirect_uri")
return default_redirect_uri
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
# Authorization codes are use once, invalidate it when a Bearer token
# has been acquired.
frappe.db.set_value("OAuth Authorization Code", code, "validity", "Invalid")
frappe.db.commit()
# Protected resource request
def validate_bearer_token(self, token, scopes, request):
# Remember to check expiration and scope membership
otoken = frappe.get_doc("OAuth Bearer Token", token) #{"access_token": str(token)})
is_token_valid = (frappe.utils.datetime.datetime.now() < otoken.expiration_time) \
and otoken.status != "Revoked"
client_scopes = frappe.db.get_value("OAuth Client", otoken.client, 'scopes').split(get_url_delimiter())
are_scopes_valid = True
for scp in scopes:
are_scopes_valid = are_scopes_valid and True if scp in client_scopes else False
return is_token_valid and are_scopes_valid
# Token refresh request
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
# Obtain the token associated with the given refresh_token and
# return its scopes, these will be passed on to the refreshed
# access token if the client did not specify a scope during the
# request.
obearer_token = frappe.get_doc("OAuth Bearer Token", {"refresh_token": refresh_token})
return obearer_token.scopes
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
otoken = None
if token_type_hint == "access_token":
otoken = frappe.db.set_value("OAuth Bearer Token", token, 'status', 'Revoked')
elif token_type_hint == "refresh_token":
otoken = frappe.db.set_value("OAuth Bearer Token", {"refresh_token": token}, 'status', 'Revoked')
else:
otoken = frappe.db.set_value("OAuth Bearer Token", token, 'status', 'Revoked')
frappe.db.commit()
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
# """Ensure the Bearer token is valid and authorized access to scopes.
# OBS! The request.user attribute should be set to the resource owner
# associated with this refresh token.
# :param refresh_token: Unicode refresh token
# :param client: Client object set by you, see authenticate_client.
# :param request: The HTTP Request (oauthlib.common.Request)
# :rtype: True or False
# Method is used by:
# - Authorization Code Grant (indirectly by issuing refresh tokens)
# - Resource Owner Password Credentials Grant (also indirectly)
# - Refresh Token Grant
# """
otoken = frappe.get_doc("OAuth Bearer Token", {"refresh_token": refresh_token, "status": "Active"})
if not otoken:
return False
else:
return True
# OpenID Connect
def get_id_token(self, token, token_handler, request):
"""
In the OpenID Connect workflows when an ID Token is requested this method is called.
Subclasses should implement the construction, signing and optional encryption of the
ID Token as described in the OpenID Connect spec.
In addition to the standard OAuth2 request properties, the request may also contain
these OIDC specific properties which are useful to this method:
- nonce, if workflow is implicit or hybrid and it was provided
- claims, if provided to the original Authorization Code request
The token parameter is a dict which may contain an ``access_token`` entry, in which
case the resulting ID Token *should* include a calculated ``at_hash`` claim.
Similarly, when the request parameter has a ``code`` property defined, the ID Token
*should* include a calculated ``c_hash`` claim.
http://openid.net/specs/openid-connect-core-1_0.html (sections `3.1.3.6`_, `3.2.2.10`_, `3.3.2.11`_)
.. _`3.1.3.6`: http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
.. _`3.2.2.10`: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken
.. _`3.3.2.11`: http://openid.net/specs/openid-connect-core-1_0.html#HybridIDToken
:param token: A Bearer token dict
:param token_handler: the token handler (BearerToken class)
:param request: the HTTP Request (oauthlib.common.Request)
:return: The ID Token (a JWS signed JWT)
"""
# the request.scope should be used by the get_id_token() method to determine which claims to include in the resulting id_token
def validate_silent_authorization(self, request):
"""Ensure the logged in user has authorized silent OpenID authorization.
Silent OpenID authorization allows access tokens and id tokens to be
granted to clients without any user prompt or interaction.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if request.prompt == "login":
False
else:
True
def validate_silent_login(self, request):
"""Ensure session user has authorized silent OpenID login.
If no user is logged in or has not authorized silent login, this
method should return False.
If the user is logged in but associated with multiple accounts and
not selected which one to link to the token then this method should
raise an oauthlib.oauth2.AccountSelectionRequired error.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if frappe.session.user == "Guest" or request.prompt.lower() == "login":
return False
else:
return True
def validate_user_match(self, id_token_hint, scopes, claims, request):
"""Ensure client supplied user id hint matches session user.
If the sub claim or id_token_hint is supplied then the session
user must match the given ID.
:param id_token_hint: User identifier string.
:param scopes: List of OAuth 2 scopes and OpenID claims (strings).
:param claims: OpenID Connect claims dict.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if id_token_hint and id_token_hint == frappe.get_value("User", frappe.session.user, "frappe_userid"):
return True
else:
return False
def get_cookie_dict_from_headers(r):
if r.headers.get('Cookie'):
cookie = r.headers.get('Cookie')
cookie = cookie.split("; ")
cookie_dict = {k:v for k,v in (x.split('=') for x in cookie)}
return cookie_dict
else:
return {}
def calculate_at_hash(access_token, hash_alg):
"""Helper method for calculating an access token
hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
Its value is the base64url encoding of the left-most half of the hash of the octets
of the ASCII representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE
Header. For instance, if the alg is RS256, hash the access_token value with SHA-256,
then take the left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string.
Args:
access_token (str): An access token string.
hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256
"""
hash_digest = hash_alg(access_token.encode('utf-8')).digest()
cut_at = int(len(hash_digest) / 2)
truncated = hash_digest[:cut_at]
from jwt.utils import base64url_encode
at_hash = base64url_encode(truncated)
return at_hash.decode('utf-8')
def delete_oauth2_data():
# Delete Invalid Authorization Code and Revoked Token
commit_code, commit_token = False, False
code_list = frappe.get_all("OAuth Authorization Code", filters={"validity":"Invalid"})
token_list = frappe.get_all("OAuth Bearer Token", filters={"status":"Revoked"})
if len(code_list) > 0:
commit_code = True
if len(token_list) > 0:
commit_token = True
for code in code_list:
frappe.delete_doc("OAuth Authorization Code", code["name"])
for token in token_list:
frappe.delete_doc("OAuth Bearer Token", token["name"])
if commit_code or commit_token:
frappe.db.commit()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to create, read, write tf.Examples."""
import functools
import random
import bigtable_input
import coords
import dual_net
import features as features_lib
import go
import sgf_wrapper
import symmetries
import numpy as np
import tensorflow as tf
TF_RECORD_CONFIG = tf.python_io.TFRecordOptions(
tf.python_io.TFRecordCompressionType.ZLIB)
def _one_hot(index):
onehot = np.zeros([go.N * go.N + 1], dtype=np.float32)
onehot[index] = 1
return onehot
def make_tf_example(features, pi, value):
"""
Args:
features: [N, N, FEATURE_DIM] nparray of uint8
pi: [N * N + 1] nparray of float32
value: float
"""
return tf.train.Example(features=tf.train.Features(feature={
'x': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[features.tostring()])),
'pi': tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[pi.tostring()])),
'outcome': tf.train.Feature(
float_list=tf.train.FloatList(
value=[value]))}))
def write_tf_examples(filename, tf_examples, serialize=True):
"""
Args:
filename: Where to write tf.records
tf_examples: An iterable of tf.Example
serialize: whether to serialize the examples.
"""
with tf.python_io.TFRecordWriter(
filename, options=TF_RECORD_CONFIG) as writer:
for ex in tf_examples:
if serialize:
writer.write(ex.SerializeToString())
else:
writer.write(ex)
def batch_parse_tf_example(batch_size, layout, example_batch):
"""
Args:
batch_size: batch size
layout: 'nchw' or 'nhwc'
example_batch: a batch of tf.Example
Returns:
A tuple (feature_tensor, dict of output tensors)
"""
planes = dual_net.get_features_planes()
features = {
'x': tf.FixedLenFeature([], tf.string),
'pi': tf.FixedLenFeature([], tf.string),
'outcome': tf.FixedLenFeature([], tf.float32),
}
parsed = tf.parse_example(example_batch, features)
x = tf.decode_raw(parsed['x'], tf.uint8)
x = tf.cast(x, tf.float32)
if layout == 'nhwc':
shape = [batch_size, go.N, go.N, planes]
else:
shape = [batch_size, planes, go.N, go.N]
x = tf.reshape(x, shape)
pi = tf.decode_raw(parsed['pi'], tf.float32)
pi = tf.reshape(pi, [batch_size, go.N * go.N + 1])
outcome = parsed['outcome']
outcome.set_shape([batch_size])
return x, {'pi_tensor': pi, 'value_tensor': outcome}
def read_tf_records(batch_size, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None, interleave=True,
filter_amount=1.0):
"""
Args:
batch_size: batch size to return
tf_records: a list of tf_record filenames
num_repeats: how many times the data should be read (default: One)
shuffle_records: whether to shuffle the order of files read
shuffle_examples: whether to shuffle the tf.Examples
shuffle_buffer_size: how big of a buffer to fill before shuffling.
interleave: iwhether to interleave examples from multiple tf_records
filter_amount: what fraction of records to keep
Returns:
a tf dataset of batched tensors
"""
if shuffle_examples and not shuffle_buffer_size:
raise ValueError("Must set shuffle buffer size if shuffling examples")
tf_records = list(tf_records)
if shuffle_records:
random.shuffle(tf_records)
record_list = tf.data.Dataset.from_tensor_slices(tf_records)
# compression_type here must agree with write_tf_examples
map_func = functools.partial(
tf.data.TFRecordDataset,
buffer_size=8 * 1024 * 1024,
compression_type='ZLIB')
if interleave:
# cycle_length = how many tfrecord files are read in parallel
# The idea is to shuffle both the order of the files being read,
# and the examples being read from the files.
dataset = record_list.apply(tf.data.experimental.parallel_interleave(
map_func, cycle_length=64, sloppy=True))
else:
dataset = record_list.flat_map(map_func)
if filter_amount < 1.0:
dataset = dataset.filter(
lambda _: tf.random_uniform([]) < filter_amount)
dataset = dataset.repeat(num_repeats)
if shuffle_examples:
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.batch(batch_size)
return dataset
def _random_rotation(feature_layout, x_tensor, outcome_tensor):
pi_tensor = outcome_tensor['pi_tensor']
if feature_layout == 'nhwc':
x_rot_tensor, pi_rot_tensor=symmetries.rotate_train_nhwc(
x_tensor, pi_tensor)
else:
x_rot_tensor, pi_rot_tensor=symmetries.rotate_train_nchw(
x_tensor, pi_tensor)
outcome_tensor['pi_tensor'] = pi_rot_tensor
return x_rot_tensor, outcome_tensor
def get_input_tensors(batch_size, feature_layout, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05, random_rotation=True):
"""Read tf.Records and prepare them for ingestion by dual_net.
See `read_tf_records` for parameter documentation.
Returns a dict of tensors (see return value of batch_parse_tf_example)
"""
print("Reading tf_records from {} inputs".format(len(tf_records)))
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size))
return dataset.make_one_shot_iterator().get_next()
def get_tpu_input_tensors(batch_size, feature_layout, tf_records, num_repeats=1,
shuffle_records=True, shuffle_examples=True,
shuffle_buffer_size=None,
filter_amount=0.05, random_rotation=True):
# TPUs trains on sequential golden chunks to simplify preprocessing and
# reproducibility.
assert len(tf_records) < 101, "Use example_buffer to build a golden_chunk"
dataset = read_tf_records(
batch_size,
tf_records,
num_repeats=num_repeats,
shuffle_records=shuffle_records,
shuffle_examples=shuffle_examples,
shuffle_buffer_size=shuffle_buffer_size,
filter_amount=filter_amount,
interleave=False)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
# TODO(sethtroisi@): Unify
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size, drop_remainder=True))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def get_tpu_bt_input_tensors(games, games_nr, batch_size, feature_layout,
num_repeats=1,
number_of_games=500e3,
fresh_fraction=0.05,
random_rotation=True):
dataset = bigtable_input.get_unparsed_moves_from_last_n_games(
games, games_nr, number_of_games)
dataset = dataset.repeat(num_repeats)
dataset = dataset.batch(batch_size)
dataset = dataset.filter(lambda t: tf.equal(tf.shape(t)[0], batch_size))
dataset = dataset.map(
functools.partial(batch_parse_tf_example, batch_size, feature_layout))
if random_rotation:
# Unbatch the dataset so we can rotate it
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.apply(tf.data.experimental.map_and_batch(
functools.partial(_random_rotation, feature_layout),
batch_size, drop_remainder=True))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def make_dataset_from_selfplay(data_extracts):
"""
Returns an iterable of tf.Examples.
Args:
data_extracts: An iterable of (position, pi, result) tuples
"""
f = dual_net.get_features()
tf_examples = (make_tf_example(features_lib.extract_features(pos, f),
pi, result)
for pos, pi, result in data_extracts)
return tf_examples
def make_dataset_from_sgf(sgf_filename, tf_record):
pwcs = sgf_wrapper.replay_sgf_file(sgf_filename)
tf_examples = map(_make_tf_example_from_pwc, pwcs)
write_tf_examples(tf_record, tf_examples)
def _make_tf_example_from_pwc(position_w_context):
f = dual_net.get_features()
features = features_lib.extract_features(position_w_context.position, f)
pi = _one_hot(coords.to_flat(position_w_context.next_move))
value = position_w_context.result
return make_tf_example(features, pi, value)
|
|
#################################################################################
# Copyright (c) 2011-2013, Pacific Biosciences of California, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Pacific Biosciences nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY PACIFIC BIOSCIENCES AND ITS
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PACIFIC BIOSCIENCES OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#################################################################################
# Author: Jim Drake
# Rare variant caller algorithm. Based on plurality caller.
from __future__ import absolute_import
import math, logging, numpy as np
from collections import defaultdict, namedtuple, OrderedDict
from bisect import bisect_left, bisect_right
from itertools import izip
from ..utils import error_probability_to_qv
from ..options import options
from .. import (io,
reference)
from ..Worker import WorkerProcess, WorkerThread
from ..ResultCollector import ResultCollectorProcess, ResultCollectorThread
from ..alignment import AlignmentColumn
from ..plurality.plurality import PluralityResult, PluralityLocusSummary
try:
from scipy.stats import binom
availability = (True, "OK")
except:
availability = (False, "Cannot import SciPy (required for rare variant analysis)")
# NOTES:
# This needs some rethinking. We're returning multiple variants for a given
# column. Each variant will have a frequency and confidence value associated
# with it. The locus summary should reflect that somehow. There may also
# need to be some re-design work here to better reflect the data structure
# needs of the rare variant algorithm.
# We're only using CCS reads here for a first pass. To support a new downstream
# feature, correlated mutations, we may want to output read ids associated
# with each particular variant.
class RareAlignmentColumn(AlignmentColumn):
def __init__(self, *args):
super(RareAlignmentColumn,self).__init__(*args)
def confidence(self):
pass
# NOTE: This differs from plurality.consensus in that it returns a list of locus
# summaries instead of just one. I'm leaning in favor of renaming this method
# `summary` instead of `consensus`, which seems to make more sense. However,
# all the downstream processes look for a `consensus` field, so we'll leave
# it for now.
# This is really just a looser filter than plurality.
# Isolate the test from the data structures, that way we can swap algos.
def consensus(self):
"""
Generates coverage information only for positions that have a variant.
Binomial test runs using scipy.stats.binom.sf
x = # of variant observations
n = Total number of observations (coverage)
f = x / n
p = Probability of success, or that the variant observed is a sequencing error.
For CCS reads this is presumed to be 0.01
for each snippit:
if n > 500 and f > 0.01:
pval = binom.sf(x, n, p=0.01)
save to output, calculate confidence based on pval
"""
coverage = self.coverage()
loci = []
# Don't bother processing if they all match the reference.
saf = self._snippetsAndFrequencies
if self.referenceBase in saf and saf[self.referenceBase] == coverage:
return loci
# Rare variants are 1% < freq < 50% at a coverage > 500. Dominant
# alleles (i.e., 50% < freq) are also detected and reported.
for snippet, count in self.orderedSnippetsAndFrequencies:
freq = count / float(coverage)
if coverage > options.variantCoverageThreshold and freq > 0.01:
# TODO: modify to use QV values (defaults to nominal CCS error rate)
pval = binom.sf(count, coverage, 0.01)
loci.append(PluralityLocusSummary(
self.referenceId,
self.referencePos,
coverage,
snippet,
error_probability_to_qv(pval),
count))
# TODO: needed? only using in unit tests right now
# tie-breaking sort, same as alignment.AlignmentColumn
loci.sort(key=lambda pls: (pls.consensusFrequency, pls.consensus))
return loci
class RareCaller(object):
# The type of the result that will be passed on to the result
# collector is:
# (ReferenceWindow, list of (Locus, Tuple)]
# where each Tuple must be convertible to a numpy array with dtype
# as below:
def onChunk(self, referenceWindow, alnHits):
return (referenceWindow, self.rare(referenceWindow, alnHits))
@staticmethod
def rare(referenceWindow, alignments):
"""
Modeled after the PluralityCaller.plurality method
Input: referenceWindow, iterable of alignmentHits
Output: list of (Locus, PluralityLocusSummary)
Like plurality, we compute a tally of snippits and frequencies.
Then perform a binomial analysis of the table and return a list of
variants based on cutoffs on both the frequency and p-value.
"""
refId, refWindowStart, refWindowEnd = referenceWindow
alignmentColumns = {}
for hit in alignments:
alignedRefPositions = hit.referencePositions(orientation="genomic")
begin = bisect_left(alignedRefPositions, refWindowStart)
end = bisect_right(alignedRefPositions, refWindowEnd)
alignedRefPositions = alignedRefPositions[begin:end]
alignedRef = hit.reference(orientation="genomic")[begin:end]
alignedRead = hit.read(orientation="genomic")[begin:end]
readBases = []
for i, (refPos, refBase, readBase) in enumerate(izip(alignedRefPositions,
alignedRef,
alignedRead)):
if readBase != "-":
readBases.append(readBase)
if refBase != "-":
if (refId, refPos) not in alignmentColumns:
alignmentColumns[refId, refPos] = RareAlignmentColumn(refId, refPos, refBase)
alignmentColumns[refId, refPos].addReadSnippet("".join(readBases))
readBases = []
return [(locus, consensus)
for locus, algCol in alignmentColumns.iteritems()
for consensus in algCol.consensus() ]
class RareResult(PluralityResult):
"""
We override some data structures and methods here in order to support the
data coming from rare variants. Consumers downstream need to be aware of
this. Rare variants will only create a GFF file, so only the GFF consumer
needs to be aware of the changes.
"""
def initTable(self, refId, refEntry):
self.consensusByRefId[refId] = dict()
def installInTable(self, locusSummary, tbl, rowNumber):
# TODO: Test memory footprint
if rowNumber not in tbl:
tbl[rowNumber] = list()
# filter out indels here
if len(locusSummary.consensus) == 1:
tbl[rowNumber].append(locusSummary)
# define both process and thread-based rare variant callers
class RareWorkerProcess(RareCaller,WorkerProcess):
pass
class RareWorkerThread(RareCaller,WorkerThread):
pass
# define both process and thread-based plurality collectors
class RareResultCollectorProcess(RareResult, ResultCollectorProcess):
pass
class RareResultCollectorThread(RareResult, ResultCollectorThread):
pass
#
# Plugin API
#
__all__ = [ "name",
"availability",
"additionalDefaultOptions",
"compatibilityWithCmpH5",
"slaveFactories" ]
name = "Rare variant analysis"
additionalDefaultOptions = { "referenceChunkOverlap" : 0,
"variantCoverageThreshold" : 500,
"variantConfidenceThreshold" : 20 }
def compatibilityWithCmpH5(cmpH5):
# TODO: check whether the cmp.h5 is CCS
return (True, "OK")
def slaveFactories(threaded):
if threaded:
return (RareWorkerThread, RareResultCollectorThread)
else:
return (RareWorkerProcess, RareResultCollectorProcess)
|
|
import warnings
import unittest
import sys
import os
import atexit
import numpy as np
from scipy import sparse
import pytest
from sklearn.utils.deprecation import deprecated
from sklearn.utils.metaestimators import if_delegate_has_method
from sklearn.utils._testing import (
assert_raises,
assert_warns,
assert_no_warnings,
set_random_state,
assert_raise_message,
ignore_warnings,
check_docstring_parameters,
assert_allclose_dense_sparse,
assert_raises_regex,
TempMemmap,
create_memmap_backed_data,
_delete_folder,
_convert_container,
raises,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert tree.random_state == 3
def test_assert_allclose_dense_sparse():
x = np.arange(9).reshape(3, 3)
msg = "Not equal to tolerance "
y = sparse.csc_matrix(x)
for X in [x, y]:
# basic compare
with pytest.raises(AssertionError, match=msg):
assert_allclose_dense_sparse(X, X * 2)
assert_allclose_dense_sparse(X, X)
with pytest.raises(ValueError, match="Can only compare two sparse"):
assert_allclose_dense_sparse(x, y)
A = sparse.diags(np.ones(5), offsets=0).tocsr()
B = sparse.csr_matrix(np.ones((1, 5)))
with pytest.raises(AssertionError, match="Arrays are not equal"):
assert_allclose_dense_sparse(B, A)
def test_assert_raises_msg():
with assert_raises_regex(AssertionError, "Hello world"):
with assert_raises(ValueError, msg="Hello world"):
pass
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test", _raise_ValueError, "test")
assert_raises(
AssertionError,
assert_raise_message,
ValueError,
"something else",
_raise_ValueError,
"test",
)
assert_raises(
ValueError,
assert_raise_message,
TypeError,
"something else",
_raise_ValueError,
"test",
)
assert_raises(AssertionError, assert_raise_message, ValueError, "test", _no_raise)
# multiple exceptions in a tuple
assert_raises(
AssertionError,
assert_raise_message,
(ValueError, AttributeError),
"test",
_no_raise,
)
def test_ignore_warning():
# This check that ignore_warning decorator and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function, category=DeprecationWarning))
with pytest.warns(DeprecationWarning):
ignore_warnings(_warning_function, category=UserWarning)()
with pytest.warns(UserWarning):
ignore_warnings(_multiple_warning_function, category=FutureWarning)()
with pytest.warns(DeprecationWarning):
ignore_warnings(_multiple_warning_function, category=UserWarning)()
assert_no_warnings(
ignore_warnings(_warning_function, category=(DeprecationWarning, UserWarning))
)
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
decorator_no_user_warning()
with pytest.warns(UserWarning):
decorator_no_deprecation_multiple_warning()
with pytest.warns(DeprecationWarning):
decorator_no_user_multiple_warning()
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
with pytest.warns(DeprecationWarning):
context_manager_no_user_warning()
with pytest.warns(UserWarning):
context_manager_no_deprecation_multiple_warning()
with pytest.warns(DeprecationWarning):
context_manager_no_user_multiple_warning()
# Check that passing warning class as first positional argument
warning_class = UserWarning
match = "'obj' should be a callable.+you should use 'category=UserWarning'"
with pytest.raises(ValueError, match=match):
silence_warnings_func = ignore_warnings(warning_class)(_warning_function)
silence_warnings_func()
with pytest.raises(ValueError, match=match):
@ignore_warnings(warning_class)
def test():
pass
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
filters_orig = warnings.filters[:]
# TODO: remove in 1.2
with pytest.warns(FutureWarning):
assert assert_warns(UserWarning, f) == 3
# test that assert_warns doesn't have side effects on warnings
# filters
assert warnings.filters == filters_orig
with pytest.raises(AssertionError):
assert_no_warnings(f)
assert assert_no_warnings(lambda x: x, 1) == 1
# TODO: remove in 1.2
@ignore_warnings(category=FutureWarning)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", FutureWarning)
failed = False
filters = sys.modules["warnings"].filters[:]
try:
try:
# Should raise an AssertionError
# assert_warns has a special handling of "FutureWarning" that
# pytest.warns does not have
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules["warnings"].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
# Tests for docstrings:
def f_ok(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_sections(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
c = a + b
return c
def f_bad_order(b, a):
"""Function f
Parameters
----------
a : int
Parameter a
b : float
Parameter b
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_too_many_param_docstring(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
b : int
Parameter b
c : int
Parameter c
Returns
-------
d : list
Parameter c
"""
d = a + b
return d
def f_missing(a, b):
"""Function f
Parameters
----------
a : int
Parameter a
Returns
-------
c : list
Parameter c
"""
c = a + b
return c
def f_check_param_definition(a, b, c, d, e):
"""Function f
Parameters
----------
a: int
Parameter a
b:
Parameter b
c :
Parameter c
d:int
Parameter d
e
No typespec is allowed without colon
"""
return a + b + c + d
class Klass:
def f_missing(self, X, y):
pass
def f_bad_sections(self, X, y):
"""Function f
Parameter
----------
a : int
Parameter a
b : float
Parameter b
Results
-------
c : list
Parameter c
"""
pass
class MockEst:
def __init__(self):
"""MockEstimator"""
def fit(self, X, y):
return X
def predict(self, X):
return X
def predict_proba(self, X):
return X
def score(self, X):
return 1.0
class MockMetaEstimator:
def __init__(self, delegate):
"""MetaEstimator to check if doctest on delegated methods work.
Parameters
---------
delegate : estimator
Delegated estimator.
"""
self.delegate = delegate
@if_delegate_has_method(delegate="delegate")
def predict(self, X):
"""This is available only if delegate has predict.
Parameters
----------
y : ndarray
Parameter y
"""
return self.delegate.predict(X)
@if_delegate_has_method(delegate="delegate")
@deprecated("Testing a deprecated delegated method")
def score(self, X):
"""This is available only if delegate has score.
Parameters
---------
y : ndarray
Parameter y
"""
@if_delegate_has_method(delegate="delegate")
def predict_proba(self, X):
"""This is available only if delegate has predict_proba.
Parameters
---------
X : ndarray
Parameter X
"""
return X
@deprecated("Testing deprecated function with wrong params")
def fit(self, X, y):
"""Incorrect docstring but should not be tested"""
def test_check_docstring_parameters():
pytest.importorskip(
"numpydoc", reason="numpydoc is required to test the docstrings"
)
incorrect = check_docstring_parameters(f_ok)
assert incorrect == []
incorrect = check_docstring_parameters(f_ok, ignore=["b"])
assert incorrect == []
incorrect = check_docstring_parameters(f_missing, ignore=["b"])
assert incorrect == []
with pytest.raises(RuntimeError, match="Unknown section Results"):
check_docstring_parameters(f_bad_sections)
with pytest.raises(RuntimeError, match="Unknown section Parameter"):
check_docstring_parameters(Klass.f_bad_sections)
incorrect = check_docstring_parameters(f_check_param_definition)
assert incorrect == [
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('a: int')",
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('b:')",
"sklearn.utils.tests.test_testing.f_check_param_definition "
"Parameter 'c :' has an empty type spec. Remove the colon",
"sklearn.utils.tests.test_testing.f_check_param_definition There "
"was no space between the param name and colon ('d:int')",
]
messages = [
[
"In function: sklearn.utils.tests.test_testing.f_bad_order",
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'b' != 'a'",
"Full diff:",
"- ['b', 'a']",
"+ ['a', 'b']",
],
[
"In function: "
+ "sklearn.utils.tests.test_testing.f_too_many_param_docstring",
"Parameters in function docstring have more items w.r.t. function"
" signature, first extra item: c",
"Full diff:",
"- ['a', 'b']",
"+ ['a', 'b', 'c']",
"? +++++",
],
[
"In function: sklearn.utils.tests.test_testing.f_missing",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: b",
"Full diff:",
"- ['a', 'b']",
"+ ['a']",
],
[
"In function: sklearn.utils.tests.test_testing.Klass.f_missing",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X', 'y']",
"+ []",
],
[
"In function: "
+ "sklearn.utils.tests.test_testing.MockMetaEstimator.predict",
"There's a parameter name mismatch in function docstring w.r.t."
" function signature, at index 0 diff: 'X' != 'y'",
"Full diff:",
"- ['X']",
"? ^",
"+ ['y']",
"? ^",
],
[
"In function: "
+ "sklearn.utils.tests.test_testing.MockMetaEstimator."
+ "predict_proba",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X']",
"+ []",
],
[
"In function: "
+ "sklearn.utils.tests.test_testing.MockMetaEstimator.score",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X']",
"+ []",
],
[
"In function: " + "sklearn.utils.tests.test_testing.MockMetaEstimator.fit",
"Parameters in function docstring have less items w.r.t. function"
" signature, first missing item: X",
"Full diff:",
"- ['X', 'y']",
"+ []",
],
]
mock_meta = MockMetaEstimator(delegate=MockEst())
for msg, f in zip(
messages,
[
f_bad_order,
f_too_many_param_docstring,
f_missing,
Klass.f_missing,
mock_meta.predict,
mock_meta.predict_proba,
mock_meta.score,
mock_meta.fit,
],
):
incorrect = check_docstring_parameters(f)
assert msg == incorrect, '\n"%s"\n not in \n"%s"' % (msg, incorrect)
class RegistrationCounter:
def __init__(self):
self.nb_calls = 0
def __call__(self, to_register_func):
self.nb_calls += 1
assert to_register_func.func is _delete_folder
def check_memmap(input_array, mmap_data, mmap_mode="r"):
assert isinstance(mmap_data, np.memmap)
writeable = mmap_mode != "r"
assert mmap_data.flags.writeable is writeable
np.testing.assert_array_equal(input_array, mmap_data)
def test_tempmemmap(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
with TempMemmap(input_array) as data:
check_memmap(input_array, data)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 1
mmap_mode = "r+"
with TempMemmap(input_array, mmap_mode=mmap_mode) as data:
check_memmap(input_array, data, mmap_mode=mmap_mode)
temp_folder = os.path.dirname(data.filename)
if os.name != "nt":
assert not os.path.exists(temp_folder)
assert registration_counter.nb_calls == 2
def test_create_memmap_backed_data(monkeypatch):
registration_counter = RegistrationCounter()
monkeypatch.setattr(atexit, "register", registration_counter)
input_array = np.ones(3)
data = create_memmap_backed_data(input_array)
check_memmap(input_array, data)
assert registration_counter.nb_calls == 1
data, folder = create_memmap_backed_data(input_array, return_folder=True)
check_memmap(input_array, data)
assert folder == os.path.dirname(data.filename)
assert registration_counter.nb_calls == 2
mmap_mode = "r+"
data = create_memmap_backed_data(input_array, mmap_mode=mmap_mode)
check_memmap(input_array, data, mmap_mode)
assert registration_counter.nb_calls == 3
input_list = [input_array, input_array + 1, input_array + 2]
mmap_data_list = create_memmap_backed_data(input_list)
for input_array, data in zip(input_list, mmap_data_list):
check_memmap(input_array, data)
assert registration_counter.nb_calls == 4
@pytest.mark.parametrize(
"constructor_name, container_type",
[
("list", list),
("tuple", tuple),
("array", np.ndarray),
("sparse", sparse.csr_matrix),
("sparse_csr", sparse.csr_matrix),
("sparse_csc", sparse.csc_matrix),
("dataframe", lambda: pytest.importorskip("pandas").DataFrame),
("series", lambda: pytest.importorskip("pandas").Series),
("index", lambda: pytest.importorskip("pandas").Index),
("slice", slice),
],
)
@pytest.mark.parametrize(
"dtype, superdtype",
[
(np.int32, np.integer),
(np.int64, np.integer),
(np.float32, np.floating),
(np.float64, np.floating),
],
)
def test_convert_container(
constructor_name,
container_type,
dtype,
superdtype,
):
"""Check that we convert the container to the right type of array with the
right data type."""
if constructor_name in ("dataframe", "series", "index"):
# delay the import of pandas within the function to only skip this test
# instead of the whole file
container_type = container_type()
container = [0, 1]
container_converted = _convert_container(
container,
constructor_name,
dtype=dtype,
)
assert isinstance(container_converted, container_type)
if constructor_name in ("list", "tuple", "index"):
# list and tuple will use Python class dtype: int, float
# pandas index will always use high precision: np.int64 and np.float64
assert np.issubdtype(type(container_converted[0]), superdtype)
elif hasattr(container_converted, "dtype"):
assert container_converted.dtype == dtype
elif hasattr(container_converted, "dtypes"):
assert container_converted.dtypes[0] == dtype
def test_raises():
# Tests for the raises context manager
# Proper type, no match
with raises(TypeError):
raise TypeError()
# Proper type, proper match
with raises(TypeError, match="how are you") as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# Proper type, proper match with multiple patterns
with raises(TypeError, match=["not this one", "how are you"]) as cm:
raise TypeError("hello how are you")
assert cm.raised_and_matched
# bad type, no match
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError) as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# Bad type, no match, with a err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, err_msg="the failure message") as cm:
raise ValueError()
assert not cm.raised_and_matched
# bad type, with match (is ignored anyway)
with pytest.raises(ValueError, match="this will be raised"):
with raises(TypeError, match="this is ignored") as cm:
raise ValueError("this will be raised")
assert not cm.raised_and_matched
# proper type but bad match
with pytest.raises(
AssertionError, match="should contain one of the following patterns"
):
with raises(TypeError, match="hello") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# proper type but bad match, with err_msg
with pytest.raises(AssertionError, match="the failure message"):
with raises(TypeError, match="hello", err_msg="the failure message") as cm:
raise TypeError("Bad message")
assert not cm.raised_and_matched
# no raise with default may_pass=False
with pytest.raises(AssertionError, match="Did not raise"):
with raises(TypeError) as cm:
pass
assert not cm.raised_and_matched
# no raise with may_pass=True
with raises(TypeError, match="hello", may_pass=True) as cm:
pass # still OK
assert not cm.raised_and_matched
# Multiple exception types:
with raises((TypeError, ValueError)):
raise TypeError()
with raises((TypeError, ValueError)):
raise ValueError()
with pytest.raises(AssertionError):
with raises((TypeError, ValueError)):
pass
|
|
import datetime
import json
import math
import random
import re
from django.conf import settings
from django.contrib.humanize.templatetags import humanize
from django.core.cache import cache
from django.db.models import Q
from django.http import Http404, HttpResponse
from django.shortcuts import redirect, get_object_or_404
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.views.generic import ListView, TemplateView, View
from django.views.decorators.cache import patch_cache_control
import pytz
from botbot.apps.bots.utils import reverse_channel
from botbot.apps.bots.views import ChannelMixin
from . import forms
from botbot.apps.logs.models import Log
from botbot.apps.kudos.models import KudosTotal
class Help(ChannelMixin, TemplateView):
"""
Help page for a channel.
"""
template_name = 'logs/help.html'
class PaginatorPageLinksMixin(object):
def paginate_queryset(self, queryset, page_size):
paginator, page, object_list, has_other_pages = super(
PaginatorPageLinksMixin, self).paginate_queryset(
queryset, page_size)
self.next_page = self.get_next_page_link(page)
self.prev_page = self.get_previous_page_link(page)
self.current_page = self.get_current_page_link(page)
return paginator, page, object_list, has_other_pages
def get_next_page_link(self, page):
url = self.request.path
params = self.request.GET.copy()
if not page.has_next():
return ""
else:
params['page'] = page.next_page_number()
return '{0}?{1}'.format(url, params.urlencode())
def get_previous_page_link(self, page):
url = self.request.path
params = self.request.GET.copy()
if not page.has_previous():
return ""
else:
params['page'] = page.previous_page_number()
return '{0}?{1}'.format(url, params.urlencode())
def get_current_page_link(self, page):
url = self.request.path
params = self.request.GET.copy()
params['page'] = page.number
return '{0}?{1}'.format(url, params.urlencode())
class LogDateMixin(object):
def _get_base_queryset(self):
return self.channel.filtered_logs()
def channel_date_url(self, date=None):
if not date:
date = self.date
viewname = self.format == 'text' and 'log_day_text' or 'log_day'
return reverse_channel(
self.channel, viewname, kwargs=self._kwargs_with_date(date))
def _kwargs_with_date(self, date):
kwargs = {
'year': date.year,
'month': "%02d" % date.month,
'day': "%02d" % date.day
}
return kwargs
def _local_date_at_midnight(self, timestamp):
# cast timestamp into local timezone
localized = timestamp.astimezone(self.request_timezone)
# create a new date object starting at midnight in that timezone
return datetime.datetime(localized.year,
localized.month,
localized.day,
tzinfo=localized.tzinfo)
def _get_previous_date(self):
"""
Find the previous day, that has content.
"""
date = None
try:
ts = (self._get_base_queryset()
.filter(timestamp__lt=self.date)[0].timestamp)
date = self._local_date_at_midnight(ts)
except IndexError:
pass
return date
def _get_next_date(self):
"""
Find the next day, that has content.
"""
date = None
try:
ts = (self._get_base_queryset()
.filter(timestamp__gte=datetime.timedelta(days=1) + self.date)
.order_by('timestamp')[0].timestamp)
date = self._local_date_at_midnight(ts)
except IndexError:
pass
return date
def _date_query_set(self, date):
qs = self._get_base_queryset()
return qs.filter(timestamp__gte=date,
timestamp__lt=date + datetime.timedelta(days=1))
class LogStream(ChannelMixin, View):
def get(self, request, channel_slug, bot_slug):
response = HttpResponse()
response['X-Accel-Redirect'] = '/internal-channel-stream/{}'.format(
self.channel.pk)
if 'HTTP_LAST_EVENT_ID' in request.META:
response['Last-Event-ID'] = request.META['HTTP_LAST_EVENT_ID']
return response
def _utc_now():
return datetime.datetime.now(tz=pytz.timezone('UTC'))
def _find_pk(pk, queryset):
"""Find a PK in a queryset in memory"""
found = None
try:
pk = int(pk)
found = next(obj for obj in queryset if obj.pk == pk)
except (ValueError, StopIteration):
pass
return found
def _timeline_context(timeline):
"""
Context (template) vars needed for timeline display.
"""
if not timeline:
return {}
today = _utc_now().date()
last_monday = today - datetime.timedelta(days=today.weekday())
last_week = last_monday - datetime.timedelta(days=7)
# the last month in the timeline needs special treatment so it
# doesn't get ordered ahead of the last/current weeks
last_month = timeline[timeline.keyOrder[-1]].pop()
if last_month >= last_week:
last_month_adjusted = (last_week -
datetime.timedelta(days=1))
elif last_month >= last_monday:
last_month_adjusted = (last_monday -
datetime.timedelta(days=1))
else:
last_month_adjusted = last_month
result = {
'timeline': timeline,
'this_week': last_monday,
'last_week': last_week,
'last_month': {'real': last_month,
'adjusted': last_month_adjusted},
}
return result
class LogViewer(ChannelMixin, object):
context_object_name = "message_list"
newest_first = False
show_first_header = False # Display date header above first line
paginate_by = 150
format = ''
def __init__(self, *args, **kwargs):
super(LogViewer, self).__init__(*args, **kwargs)
self.next_page = ""
self.prev_page = ""
self.current_page = ""
def dispatch(self, request, *args, **kwargs):
self._setup_response_format()
return super(LogViewer, self).dispatch(request, *args, **kwargs)
def _setup_response_format(self):
if self.format == 'text':
self.include_timeline = False
self.template_name = 'logs/logs.txt'
self.content_type = 'text/plain; charset=utf-8'
elif self.request.is_ajax():
self.format = 'ajax'
self.include_timeline = False
self.template_name = 'logs/log_display.html'
# Default to HTML view
else:
self.format = 'html'
self.include_timeline = True
self.template_name = "logs/logs.html"
def get_ordered_queryset(self, queryset):
order = 'timestamp'
if self.newest_first:
order = '-timestamp'
return queryset.order_by(order)
def get_context_data(self, **kwargs):
context = super(LogViewer, self).get_context_data(**kwargs)
if self.include_timeline:
context.update(
_timeline_context(self.channel.get_months_active()))
if self.format == 'html':
context.update({
'is_current': getattr(self, 'is_current', False),
'search_form': forms.SearchForm(),
'show_first_header': self.show_first_header,
'newest_first': self.newest_first,
'show_kudos': self.channel.user_can_access_kudos(
self.request.user),
})
size = self.channel.current_size()
context.update({
'size': size,
'big': (size >= settings.BIG_CHANNEL),
'prev_page': self.prev_page,
'next_page': self.next_page,
'current_page': self.current_page,
})
return context
def render_to_response(self, context, **response_kwargs):
response = super(LogViewer, self).render_to_response(
context, **response_kwargs)
has_next_page = False
if self.format == 'html':
# Official SEO header
links = []
if self.next_page:
links.append('{0}; rel="next"'.format(self.next_page))
has_next_page = True
if self.prev_page:
links.append('{0}; rel="prev"'.format(self.prev_page))
response['Link'] = ','.join(links)
else:
# No HTML, pass page info in easily parseable headers
if self.next_page:
response['X-NextPage'] = self.next_page
has_next_page = True
if self.prev_page:
response['X-PrevPage'] = self.prev_page
if has_next_page and self.request.user.is_anonymous():
patch_cache_control(
response, public=True,
max_age=settings.CACHE_MIDDLEWARE_SECONDS)
else:
patch_cache_control(response, private=True)
return response
def _pages_for_queryset(self, queryset):
return int(math.ceil(queryset.count() / float(self.paginate_by)))
class DayLogViewer(PaginatorPageLinksMixin, LogDateMixin, LogViewer, ListView):
show_first_header = False
allow_empty = True
def get(self, request, *args, **kwargs):
self.date = self.set_view_date()
self.object_list = self.get_queryset()
# Redirect to nearby logs if this queryset is empty to avoid a 404
if not self.get_allow_empty() and not self.object_list.exists():
url = self._nearby_log_url()
if url:
return redirect(url)
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data()
return self.render_to_response(context)
def _nearby_log_url(self):
"""Find a date-based log URL that will not be empty"""
# First check if there is anything in the past
closet_qs = self.channel.filtered_logs().order_by(
"-timestamp").filter(timestamp__lte=self.date)
# If not go to the future
if not closet_qs.exists():
closet_qs = self.channel.filtered_logs().order_by(
"timestamp").filter(
timestamp__gte=self.date)
# Return the URL where the first log line found will be
try:
return self.channel_date_url(closet_qs[0].timestamp)
except IndexError:
pass
return None
def get_context_data(self):
context = super(DayLogViewer, self).get_context_data()
try:
context.update({
'highlight': int(self.request.GET.get('msg')),
})
except (TypeError, ValueError):
pass
return context
def get_queryset(self):
qs = self.channel.filtered_logs()
qs = self.get_ordered_queryset(qs)
start = self.date
end = start + datetime.timedelta(days=1)
return qs.filter(timestamp__gte=start, timestamp__lt=end)
def _date_paginator(self, date):
qs = self._date_query_set(date)
return self.get_paginator(qs, self.get_paginate_by(qs))
def paginate_queryset(self, queryset, page_size):
paginator, page, object_list, has_other_pages = super(
DayLogViewer, self).paginate_queryset(queryset, page_size)
if not self.next_page:
self.is_current = True
return paginator, page, object_list, has_other_pages
def get_previous_page_link(self, page):
"""
Generate a link to the next page, from the current one.
"""
url = self.channel_date_url()
# copy, to maintain any params that came in to original request.
params = self.request.GET.copy()
if not page.has_previous():
date = self._get_previous_date()
if not date:
# We have no more logs!
return None
# Use new paginator to get dates max page number.
paginator = self._date_paginator(date)
params['page'] = paginator.num_pages
url = self.channel_date_url(date)
else:
params['page'] = page.previous_page_number()
return '{0}?{1}'.format(url, params.urlencode())
def get_next_page_link(self, page):
"""
Generate a link to the next page, from the current one.
"""
url = self.channel_date_url()
# copy, to maintain any params that came in to original request.
params = self.request.GET.copy()
if not page.has_next():
date = self._get_next_date()
if date:
url = self.channel_date_url(date)
params['page'] = 1 # If new date, always start at page 1.
else:
return ""
else:
params['page'] = page.next_page_number()
return '{0}?{1}'.format(url, params.urlencode())
def get_current_page_link(self, page):
# copy, to maintain any params that came in to original request.
params = self.request.GET.copy()
date = _utc_now()
url = self.channel_date_url(date)
params['page'] = page.number
return '{0}?{1}'.format(url, params.urlencode())
@cached_property
def request_timezone(self):
"""
Read timezone in from GET param otherwise use UTC
"""
try:
tz = pytz.timezone(self.request.GET.get('tz', ''))
except pytz.UnknownTimeZoneError:
tz = pytz.timezone('UTC')
return tz
def set_view_date(self):
"""Determine start date for queryset"""
if all([field in self.kwargs for field in ['year', 'month', 'day']]):
# localize date so logs start at local time
try:
return datetime.datetime(year=int(self.kwargs['year']),
month=int(self.kwargs['month']),
day=int(self.kwargs['day']),
tzinfo=self.request_timezone)
except ValueError:
raise Http404("Invalid date.")
# Use the last page.
self.kwargs['page'] = 'last'
return _utc_now().date()
class SearchLogViewer(PaginatorPageLinksMixin, LogViewer, ListView):
show_first_header = True
newest_first = True
allow_empty = True
include_timeline = False
def get(self, request, *args, **kwargs):
self.form = forms.SearchForm(request.GET)
return super(SearchLogViewer, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Add the search term to the context data.
"""
context = super(SearchLogViewer, self).get_context_data(**kwargs)
context.update({
'q': self.search_term,
'search_form': self.form,
})
return context
def get_queryset(self):
"""
Use search results rather than the standard queryset.
"""
self.form = forms.SearchForm(self.request.GET)
if self.form.is_valid():
self.search_term = self.form.cleaned_data.get("q", "")
else:
self.search_term = ""
self.search_term = self.search_term.replace('%', '%%')
filter_args = self.channel.visible_commands_filter
# If a user is mentioned, filter those users first
matches = re.search(r'(\bnick:([\w\-]+)\b)', self.search_term)
if matches:
self.search_term = self.search_term.replace(matches.groups()[0], '')
filter_args = filter_args & Q(nick__icontains=matches.groups()[1])
return self.channel.log_set.search(self.search_term).filter(filter_args)
class SingleLogViewer(DayLogViewer):
"""
Find a single log line and redirect to a permalink to it.
This inherits from DayLogViewer because it needs to use same queryset
and pagination methods to ensure the page is found in the same place.
"""
def get(self, request, *args, **kwargs):
try:
log = get_object_or_404(Log.objects, pk=self.kwargs['msg_pk'])
except ValueError:
raise Http404
# set date to midnight so get_queryset starts pages correctly
self.date = log.timestamp.date()
self.object_list = self.get_queryset()
# Find the page in the queryset the message is located on.
page_url = self._permalink_to_log(log)
return redirect(page_url, permanent=True)
def _permalink_to_log(self, log):
"""Scan pages for a single log. Return to permalink to page"""
cache_key = "line:{}:permalink".format(log.pk)
url, params = cache.get(cache_key, [None, {}])
if not url:
paginator = self.get_paginator(
self.object_list, self.get_paginate_by(self.object_list))
for n in paginator.page_range:
page = paginator.page(n)
if log in page.object_list:
params = {"msg": log.pk, "page": n}
url = self.channel_date_url()
cache.set(cache_key, [url, params], None)
break # Found the page.
# page wasn't found
if not url:
raise Http404
oparams = self.request.GET.copy()
oparams.update(params)
return '{0}?{1}'.format(url, oparams.urlencode())
class MissedLogViewer(PaginatorPageLinksMixin, LogViewer, ListView):
include_timeline = False
show_first_header = True
newest_first = False
def get_context_data(self, **kwargs):
data = super(MissedLogViewer, self).get_context_data(**kwargs)
data['use_absolute_url'] = True
return data
def get_queryset(self):
queryset = self.get_ordered_queryset(self.channel.log_set.all())
nick = self.kwargs['nick']
try:
# cover nicks in the form: nick OR nick_ OR nick|<something>
last_exit = (queryset
.filter(
Q(nick__iexact=nick) |
Q(nick__istartswith="{0}|".format(nick)) |
Q(nick__iexact="{0}_".format(nick)),
Q(command='QUIT') | Q(command='PART'))
.order_by('-timestamp')[0])
except IndexError:
raise Http404("User hasn't left room")
try:
last_join = queryset.filter(
Q(nick__iexact=nick) |
Q(nick__istartswith="{0}|".format(nick)) |
Q(nick__iexact="{0}_".format(nick)), Q(command='JOIN'),
Q(timestamp__gt=last_exit.timestamp)).order_by('timestamp')[0]
date_filter = {'timestamp__range': (last_exit.timestamp,
last_join.timestamp)}
except IndexError:
date_filter = {'timestamp__gte': last_exit.timestamp}
# Only fetch results from when the user logged out.
self.fetch_after = (
last_exit.timestamp - datetime.timedelta(milliseconds=1))
return queryset.filter(**date_filter)
class KudosMixin(object):
"""
View mixin to check that kudos access is allowed.
If the channel's ``public_kudos`` is False then only accessible to channel
admins.
Must go after ChannelMixin.
"""
def dispatch(self, *args, **kwargs):
"""
Check kudos authorization.
"""
if not self.channel.user_can_access_kudos(self.request.user):
raise Http404("Only accessible to channel admins")
return super(KudosMixin, self).dispatch(*args, **kwargs)
class Kudos(ChannelMixin, KudosMixin, View):
"""
View that returns a ranked JSON list of users with the most kudos.
Not accessible to anonymous users.
"""
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated():
raise Http404('Only accessible to authenticated users')
return super(Kudos, self).dispatch(*args, **kwargs)
def get(self, *args, **kwargs):
return HttpResponse(
json.dumps(
self.channel.kudos_set.ranks(debug=settings.DEBUG),
indent=2 if settings.DEBUG else None),
content_type='text/json')
class ChannelKudos(ChannelMixin, KudosMixin, TemplateView):
"""
Display a shuffled subset of the people with the most kudos.
"""
template_name = 'logs/kudos.html'
def rounded_percentage(self, score, total):
percentage = score / float(total) * 100
for i in (1, 10, 25, 50):
if i >= percentage:
return i
def get_context_data(self, **kwargs):
nick = self.request.GET.get('nick')
ranks = self.channel.kudos_set.ranks(debug=nick)
top_tier = ranks[:100]
if len(top_tier) > 20:
scoreboard = [r[0] for r in random.sample(top_tier, 20)]
elif len(top_tier) > 4:
scoreboard = random.shuffle([r[0] for r in ranks])
else:
scoreboard = None
kwargs.update({
'random_scoreboard': scoreboard,
})
try:
channel_kudos = self.channel.kudostotal
except KudosTotal.DoesNotExist:
channel_kudos = None
if channel_kudos and channel_kudos.message_count:
if channel_kudos.message_count > 1000000:
kwargs['channel_messages'] = humanize.intword(
channel_kudos.message_count)
else:
kwargs['channel_messages'] = humanize.intcomma(
channel_kudos.message_count)
kwargs['channel_kudos_perc'] = '{:.2%}'.format(
channel_kudos.appreciation)
if nick:
nick_lower = nick.lower()
details = None
for rank_nick, alltime, info in ranks:
if rank_nick == nick_lower:
details = {
'alltime': alltime,
'alltime_perc': self.rounded_percentage(
alltime, len(ranks)),
'current': info['current_rank'],
'current_perc': self.rounded_percentage(
info['current_rank'], len(ranks)),
}
break
kwargs['search'] = {'nick': nick, 'details': details}
return super(ChannelKudos, self).get_context_data(**kwargs)
|
|
# Copyright 2015 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: HDF5
:platform: Unix
:synopsis: Transport for saving and loading files using hdf5
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import logging
import numpy as np
import socket
import os
import copy
from mpi4py import MPI
from itertools import chain
from savu.core.utils import logfunction
from savu.data.transport_mechanism import TransportMechanism
from savu.core.utils import logmethod
class Hdf5Transport(TransportMechanism):
def transport_control_setup(self, options):
processes = options["process_names"].split(',')
if len(processes) is 1:
options["mpi"] = False
options["process"] = 0
options["processes"] = processes
self.set_logger_single(options)
else:
options["mpi"] = True
print("Options for mpi are")
print(options)
self.mpi_setup(options)
def mpi_setup(self, options):
print("Running mpi_setup")
RANK_NAMES = options["process_names"].split(',')
RANK = MPI.COMM_WORLD.rank
SIZE = MPI.COMM_WORLD.size
RANK_NAMES_SIZE = len(RANK_NAMES)
if RANK_NAMES_SIZE > SIZE:
RANK_NAMES_SIZE = SIZE
MACHINES = SIZE/RANK_NAMES_SIZE
MACHINE_RANK = RANK/MACHINES
MACHINE_RANK_NAME = RANK_NAMES[MACHINE_RANK]
MACHINE_NUMBER = RANK % MACHINES
MACHINE_NUMBER_STRING = "%03i" % (MACHINE_NUMBER)
ALL_PROCESSES = [[i]*MACHINES for i in RANK_NAMES]
options["processes"] = list(chain.from_iterable(ALL_PROCESSES))
options["process"] = RANK
self.set_logger_parallel(MACHINE_NUMBER_STRING, MACHINE_RANK_NAME)
MPI.COMM_WORLD.barrier()
logging.info("Starting the reconstruction pipeline process")
logging.debug("Rank : %i - Size : %i - host : %s", RANK, SIZE, socket.gethostname())
IP = socket.gethostbyname(socket.gethostname())
logging.debug("ip address is : %s", IP)
self.call_mpi_barrier()
logging.debug("LD_LIBRARY_PATH is %s", os.getenv('LD_LIBRARY_PATH'))
self.call_mpi_barrier()
@logfunction
def call_mpi_barrier(self):
logging.debug("Waiting at the barrier")
MPI.COMM_WORLD.barrier()
def set_logger_single(self, options):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(os.path.join(options["out_path"],'log.txt'), mode='w')
fh.setFormatter(logging.Formatter('L %(relativeCreated)12d M CPU0 0' +
' %(levelname)-6s %(message)s'))
logger.addHandler(fh)
logging.info("Starting the reconstruction pipeline process")
def set_logger_parallel(self, number, rank):
logging.basicConfig(level=0, format='L %(relativeCreated)12d M' +
number + ' ' + rank +
' %(levelname)-6s %(message)s', datefmt='%H:%M:%S')
logging.info("Starting the reconstruction pipeline process")
def transport_run_plugin_list(self, exp):
"""
Runs a chain of plugins
"""
plugin_list = exp.meta_data.plugin_list.plugin_list
# run the loader plugin
self.plugin_loader(exp, plugin_list[0])
# create all output data_objects and backing files
in_data = exp.index["in_data"][exp.index["in_data"].keys()[0]]
out_data_objects = in_data.load_data(self, exp)
# clear all out_data objects in experiment dictionary
exp.clear_data_objects()
print "running the plugins"
self.plugin_loader(exp, plugin_list[0])
exp.barrier()
for i in range(1, len(plugin_list)-1):
print plugin_list[i]["id"]
exp.barrier()
for key in out_data_objects[i-1]:
exp.index["out_data"][key] = out_data_objects[i-1][key]
exp.barrier()
plugin = self.plugin_loader(exp, plugin_list[i], pos=i)
plugin.run_plugin(exp, self)
exp.barrier()
# delete fixed directions, as this is related only to the finished
# plugin and not to the dataset
for in_objs in plugin.parameters["in_datasets"]:
exp.index["in_data"][in_objs].delete_fixed_directions()
# close any files that are no longer required
for out_objs in plugin.parameters["out_datasets"]:
if out_objs in exp.index["in_data"].keys():
exp.index["in_data"][out_objs].save_data()
for key in exp.index["out_data"]:
exp.index["in_data"][key] = \
copy.deepcopy(exp.index["out_data"][key])
## if plugin == 0:
## cite_info = plugin.get_citation_information()
## if cite_info is not None:
## plugin_list.add_plugin_citation(output_filename, count,
## cite_info)
## group_name = "%i-%s" % (count, plugin.name)
## plugin_list.add_intermediate_data_link(output_filename,
## output, group_name)
exp.clear_out_data_objects()
# close all remaining files
for key in exp.index["in_data"].keys():
exp.index["in_data"][key].save_data()
return
@logmethod
def timeseries_field_correction(self, plugin, in_data, out_data, expInfo, params):
in_data = in_data[0]
out_data = out_data[0]
dark = in_data.meta_data.get_meta_data("dark")
flat = in_data.meta_data.get_meta_data("flat")
image_keys = in_data.meta_data.get_meta_data("image_key")
[in_slice_list, frame_list] = in_data.get_slice_list_per_process(expInfo)
[out_slice_list, frame_list] = out_data.get_slice_list_per_process(expInfo)
for count in range(len(in_slice_list)):
idx = frame_list[count]
out_data.data[out_slice_list[count]] = \
plugin.correction(in_data.data[in_slice_list[count]],
image_keys, params)
in_slice_list = in_data.get_grouped_slice_list()
@logmethod
def reconstruction_setup(self, plugin, in_data, out_data, expInfo, params):
in_data = in_data[0]
out_data = out_data[0]
[slice_list, frame_list] = in_data.get_slice_list_per_process(expInfo)
cor = in_data.meta_data.get_meta_data("centre_of_rotation")[frame_list]
count = 0
for sl in slice_list:
frame = plugin.reconstruct(np.squeeze(in_data.data[sl]),
cor[count],
out_data.get_pattern_shape(),
params)
out_data.data[sl] = frame
count += 1
plugin.count += 1
print plugin.count
def filter_chunk(self, plugin, in_data, out_data, expInfo, params):
logging.debug("Running filter._filter_chunk")
in_slice_list = []
for ind in range(len(in_data)):
[slice_list, frame_list] = in_data[ind].get_slice_list_per_process(expInfo)
in_slice_list.append(slice_list)
out_data = out_data[0]
[out_slice_list, frame_list] = out_data.get_slice_list_per_process(expInfo)
padding = plugin.get_filter_padding()
for count in range(len(in_slice_list[0])):
section = []
for ind in range(len(in_data)):
section.append(in_data[ind].get_padded_slice_data(
in_slice_list[ind][count], padding, in_data[ind]))
result = plugin.filter_frame(section, params)
if type(result) == dict:
for key in result.keys():
if key == 'center_of_rotation':
frame = in_data[0].get_orthogonal_slice(in_slice_list[count],
in_data[0].core_directions[plugin.get_filter_frame_type()])
out_data.center_of_rotation[frame] = result[key]
elif key == 'data':
out_data.data[out_slice_list[count]] = \
in_data[0].get_unpadded_slice_data(in_slice_list[count],
padding, in_data[0], result)
else:
out_data.data[out_slice_list[count]] = \
in_data[0].get_unpadded_slice_data(in_slice_list[0][count], padding,
in_data[0], result)
|
|
#
# Kivy - Cross-platform UI framework
# https://kivy.org/
#
import sys
build_examples = False
if "--build_examples" in sys.argv:
build_examples = True
sys.argv.remove("--build_examples")
from kivy.utils import pi_version
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, basename, isdir
from os import walk, environ, makedirs
from distutils.command.build_ext import build_ext
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from sysconfig import get_paths
from pathlib import Path
import logging
from setuptools import setup, Extension, find_packages
if sys.version_info[0] == 2:
logging.critical(
'Unsupported Python version detected!: Kivy 2.0.0 and higher does not '
'support Python 2. Please upgrade to Python 3, or downgrade Kivy to '
'1.11.1 - the last Kivy release that still supports Python 2.')
def ver_equal(self, other):
return self.version == other
# fix error with py3's LooseVersion comparisons
LooseVersion.__eq__ = ver_equal
def get_description():
with open(join(dirname(__file__), 'README.md'), 'rb') as fileh:
return fileh.read().decode("utf8").replace('\r\n', '\n')
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
ext = token[:2].decode('utf-8')
flag = flag_map.get(ext)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
def get_isolated_env_paths():
try:
# sdl2_dev is installed before setup.py is run, when installing from
# source due to pyproject.toml. However, it is installed to a
# pip isolated env, which we need to add to compiler
import kivy_deps.sdl2_dev as sdl2_dev
except ImportError:
return [], []
root = os.path.abspath(join(sdl2_dev.__path__[0], '../../../..'))
includes = [join(root, 'Include')] if isdir(join(root, 'Include')) else []
libs = [join(root, 'libs')] if isdir(join(root, 'libs')) else []
return includes, libs
# -----------------------------------------------------------------------------
# Determine on which platform we are
build_examples = build_examples or \
os.environ.get('KIVY_BUILD_EXAMPLES', '0') == '1'
platform = sys.platform
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
# proprietary broadcom video core drivers
if exists('/opt/vc/include/bcm_host.h'):
# The proprietary broadcom video core drivers are not available on the
# Raspberry Pi 4
if (pi_version or 4) < 4:
platform = 'rpi'
# use mesa video core drivers
if environ.get('VIDEOCOREMESA', None) == '1':
platform = 'vc'
mali_paths = (
'/usr/lib/arm-linux-gnueabihf/libMali.so',
'/usr/lib/arm-linux-gnueabihf/mali-egl/libmali.so',
'/usr/local/mali-egl/libmali.so')
if any((exists(path) for path in mali_paths)):
platform = 'mali'
# Needed when cross-compiling
if environ.get('KIVY_CROSS_PLATFORM'):
platform = environ.get('KIVY_CROSS_PLATFORM')
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_egl'] = False
c_options['use_opengl_es2'] = None
c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'
c_options['use_sdl2'] = None
c_options['use_pangoft2'] = None
c_options['use_ios'] = False
c_options['use_android'] = False
c_options['use_mesagl'] = False
c_options['use_x11'] = False
c_options['use_wayland'] = False
c_options['use_gstreamer'] = None
c_options['use_avfoundation'] = platform in ['darwin', 'ios']
c_options['use_osx_frameworks'] = platform == 'darwin'
c_options['debug_gl'] = False
# Set the alpha size, this will be 0 on the Raspberry Pi and 8 on all other
# platforms, so SDL2 works without X11
c_options['kivy_sdl_gl_alpha_size'] = 8 if pi_version is None else 0
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
# kivy_sdl_gl_alpha_size should be an integer, the rest are booleans
value = int(environ[ukey])
if key != 'kivy_sdl_gl_alpha_size':
value = bool(value)
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
use_embed_signature = environ.get('USE_EMBEDSIGNATURE', '0') == '1'
use_embed_signature = use_embed_signature or bool(
platform not in ('ios', 'android'))
# -----------------------------------------------------------------------------
# We want to be able to install kivy as a wheel without a dependency
# on cython, but we also want to use cython where possible as a setup
# time dependency through `pyproject.toml` if building from source.
# There are issues with using cython at all on some platforms;
# exclude them from using or declaring cython.
# This determines whether Cython specific functionality may be used.
can_use_cython = True
if platform in ('ios', 'android'):
# NEVER use or declare cython on these platforms
print('Not using cython on %s' % platform)
can_use_cython = False
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
print("Current directory is: {}".format(os.getcwd()))
print("Source and initial build directory is: {}".format(src_path))
# __version__ is imported by exec, but help linter not complain
__version__ = None
with open(join(src_path, 'kivy', '_version.py'), encoding="utf-8") as f:
exec(f.read())
class KivyBuildExt(build_ext, object):
def __new__(cls, *a, **kw):
# Note how this class is declared as a subclass of distutils
# build_ext as the Cython version may not be available in the
# environment it is initially started in. However, if Cython
# can be used, setuptools will bring Cython into the environment
# thus its version of build_ext will become available.
# The reason why this is done as a __new__ rather than through a
# factory function is because there are distutils functions that check
# the values provided by cmdclass with issublcass, and so it would
# result in an exception.
# The following essentially supply a dynamically generated subclass
# that mix in the cython version of build_ext so that the
# functionality provided will also be executed.
if can_use_cython:
from Cython.Distutils import build_ext as cython_build_ext
build_ext_cls = type(
'KivyBuildExt', (KivyBuildExt, cython_build_ext), {})
return super(KivyBuildExt, cls).__new__(build_ext_cls)
else:
return super(KivyBuildExt, cls).__new__(cls)
def finalize_options(self):
retval = super(KivyBuildExt, self).finalize_options()
# Build the extensions in parallel if the options has not been set
if hasattr(self, 'parallel') and self.parallel is None:
# Use a maximum of 4 cores. If cpu_count returns None, then parallel
# build will be disabled
self.parallel = min(4, os.cpu_count() or 0)
if self.parallel:
print('Building extensions in parallel using {} cores'.format(
self.parallel))
global build_path
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
print("Updated build directory to: {}".format(build_path))
return retval
def build_extensions(self):
# build files
config_h_fn = ('include', 'config.h')
config_pxi_fn = ('include', 'config.pxi')
config_py_fn = ('setupconfig.py', )
# generate headers
config_h = '// Autogenerated file for Kivy C configuration\n'
config_h += '#define __PY3 1\n'
config_pxi = '# Autogenerated file for Kivy Cython configuration\n'
config_pxi += 'DEF PY3 = 1\n'
config_py = '# Autogenerated file for Kivy configuration\n'
config_py += 'PY3 = 1\n'
config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format(
repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))
config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map(
str, CYTHON_UNSUPPORTED))))
# generate content
print('Build configuration is:')
for opt, value in c_options.items():
# kivy_sdl_gl_alpha_size is already an integer
if opt != 'kivy_sdl_gl_alpha_size':
value = int(bool(value))
print(' * {0} = {1}'.format(opt, value))
opt = opt.upper()
config_h += '#define __{0} {1}\n'.format(opt, value)
config_pxi += 'DEF {0} = {1}\n'.format(opt, value)
config_py += '{0} = {1}\n'.format(opt, value)
debug = bool(self.debug)
print(' * debug = {0}'.format(debug))
config_pxi += 'DEF DEBUG = {0}\n'.format(debug)
config_py += 'DEBUG = {0}\n'.format(debug)
config_pxi += 'DEF PLATFORM = "{0}"\n'.format(platform)
config_py += 'PLATFORM = "{0}"\n'.format(platform)
for fn, content in (
(config_h_fn, config_h), (config_pxi_fn, config_pxi),
(config_py_fn, config_py)):
build_fn = expand(build_path, *fn)
if self.update_if_changed(build_fn, content):
print('Updated {}'.format(build_fn))
src_fn = expand(src_path, *fn)
if src_fn != build_fn and self.update_if_changed(src_fn, content):
print('Updated {}'.format(src_fn))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
super(KivyBuildExt, self).build_extensions()
def update_if_changed(self, fn, content):
need_update = True
if exists(fn):
with open(fn) as fd:
need_update = fd.read() != content
if need_update:
directory_name = dirname(fn)
if not exists(directory_name):
makedirs(directory_name)
with open(fn, 'w') as fd:
fd.write(content)
return need_update
def _check_and_fix_sdl2_mixer(f_path):
# Between SDL_mixer 2.0.1 and 2.0.4, the included frameworks changed
# smpeg2 have been replaced with mpg123, but there is no need to fix.
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path)
if not exists(smpeg2_path):
return
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
# -----------------------------------------------------------------------------
print("Python path is:\n{}\n".format('\n'.join(sys.path)))
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# Cython check
# on python-for-android and kivy-ios, cython usage is external
from kivy.tools.packaging.cython_cfg import get_cython_versions, get_cython_msg
CYTHON_REQUIRES_STRING, MIN_CYTHON_STRING, MAX_CYTHON_STRING, \
CYTHON_UNSUPPORTED = get_cython_versions()
cython_min_msg, cython_max_msg, cython_unsupported_msg = get_cython_msg()
if can_use_cython:
import Cython
print('\nFound Cython at', Cython.__file__)
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('Detected supported Cython version {}'.format(cy_version_str))
if cy_ver < LooseVersion(MIN_CYTHON_STRING):
print(cython_min_msg)
elif cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported_msg)
elif cy_ver > LooseVersion(MAX_CYTHON_STRING):
print(cython_max_msg)
sleep(1)
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. Also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi', 'mali', 'vc'):
c_options['use_opengl_es2'] = True
elif c_options['use_opengl_es2'] is None:
c_options['use_opengl_es2'] = \
environ.get('KIVY_GRAPHICS', '').lower() == 'gles'
print('Using this graphics system: {}'.format(
['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
c_options['use_ios'] = True
c_options['use_sdl2'] = True
elif platform == 'android':
c_options['use_android'] = True
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if platform not in ('ios', 'android') and (c_options['use_gstreamer']
in (None, True)):
gstreamer_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('GStreamer framework not found, fallback on pkg-config')
else:
print('GStreamer framework found')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
elif platform == 'win32':
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
else:
_includes = get_isolated_env_paths()[0] + [get_paths()['include']]
for include_dir in _includes:
if exists(join(include_dir, 'gst', 'gst.h')):
print('GStreamer found via gst.h')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'libraries':
['gstreamer-1.0', 'glib-2.0', 'gobject-2.0']}
break
if not gstreamer_valid:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
c_options['use_gstreamer'] = True
# detect SDL2, only on desktop and iOS, or android if explicitly enabled
# works if we forced the options or in autodetection
sdl2_flags = {}
if platform == 'win32' and c_options['use_sdl2'] is None:
c_options['use_sdl2'] = True
if c_options['use_sdl2'] or (
platform not in ('android',) and c_options['use_sdl2'] is None):
sdl2_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_frameworks_search_path = environ.get(
"KIVY_SDL2_FRAMEWORKS_SEARCH_PATH", "/Library/Frameworks"
)
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F{}'.format(sdl2_frameworks_search_path),
'-Xlinker', '-rpath',
'-Xlinker', sdl2_frameworks_search_path,
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F{}'.format(sdl2_frameworks_search_path)]
}
for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):
f_path = '{}/{}.framework'.format(sdl2_frameworks_search_path, name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('SDL2 frameworks not found, fallback on pkg-config')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
if not sdl2_valid and platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
print('SDL2 found via pkg-config')
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def expand(root, *args):
return join(root, 'kivy', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': use_embed_signature,
'language_level': 3,
'unraisable_tracebacks': True,
}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
includes, libs = get_isolated_env_paths()
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')] + includes,
'library_dirs': [] + libs,
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin' and c_options['use_osx_frameworks']:
v = os.uname()
if v[2] >= '13.0.0':
if 'SDKROOT' in environ:
sysroot = join(environ['SDKROOT'], 'System/Library/Frameworks')
else:
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_gl_flags():
kivy_graphics_include = join(src_path, 'kivy', 'include')
flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
cross_sysroot = environ.get('KIVY_CROSS_SYSROOT')
if c_options['use_opengl_mock']:
return flags, base_flags
if platform == 'win32':
flags['libraries'] = ['opengl32', 'glew32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL']
elif platform.startswith('freebsd'):
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['library_dirs'] = ['/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
if not cross_sysroot:
flags['include_dirs'] = [
'/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
brcm_lib_files = (
'/opt/vc/lib/libbrcmEGL.so',
'/opt/vc/lib/libbrcmGLESv2.so')
else:
print("KIVY_CROSS_SYSROOT: " + cross_sysroot)
flags['include_dirs'] = [
cross_sysroot + '/usr/include',
cross_sysroot + '/usr/include/interface/vcos/pthreads',
cross_sysroot + '/usr/include/interface/vmcs_host/linux']
flags['library_dirs'] = [cross_sysroot + '/usr/lib']
brcm_lib_files = (
cross_sysroot + '/usr/lib/libbrcmEGL.so',
cross_sysroot + '/usr/lib/libbrcmGLESv2.so')
if all((exists(lib) for lib in brcm_lib_files)):
print('Found brcmEGL and brcmGLES library files '
'for rpi platform at ' + dirname(brcm_lib_files[0]))
gl_libs = ['brcmEGL', 'brcmGLESv2']
else:
print(
'Failed to find brcmEGL and brcmGLESv2 library files '
'for rpi platform, falling back to EGL and GLESv2.')
gl_libs = ['EGL', 'GLESv2']
flags['libraries'] = ['bcm_host'] + gl_libs
elif platform in ['mali', 'vc']:
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
flags['libraries'] = ['GLESv2']
c_options['use_x11'] = True
c_options['use_egl'] = True
else:
flags['libraries'] = ['GL']
return flags, base_flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
includes, _ = get_isolated_env_paths()
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl2_paths = []
for include in includes + [join(sys.prefix, 'include')]:
sdl_inc = join(include, 'SDL2')
if isdir(sdl_inc):
sdl2_paths.append(sdl_inc)
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
fn = join(d, '{}.h'.format(lib))
if exists(fn):
found = True
print('SDL2: found {} header at {}'.format(lib, fn))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
gl_flags, gl_flags_base = determine_gl_flags()
# -----------------------------------------------------------------------------
# sources to compile
# all the dependencies have been found manually with:
# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}
graphics_dependencies = {
'buffer.pyx': ['common.pxi'],
'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],
'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],
'compiler.pxd': ['instructions.pxd'],
'compiler.pyx': ['context_instructions.pxd'],
'cgl.pyx': ['cgl.pxd'],
'cgl_mock.pyx': ['cgl.pxd'],
'cgl_sdl2.pyx': ['cgl.pxd'],
'cgl_gl.pyx': ['cgl.pxd'],
'cgl_glew.pyx': ['cgl.pxd'],
'context_instructions.pxd': [
'transformation.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pyx': [
'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],
'gl_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],
'instructions.pxd': [
'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',
'texture.pxd', '../_event.pxd'],
'instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],
'opengl.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],
'opengl_utils.pyx': [
'opengl_utils_def.pxi', 'cgl.pxd', ],
'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],
'shader.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd',
'vertex.pxd', 'transformation.pxd', 'context.pxd',
'gl_debug_logger.pxi'],
'stencil_instructions.pxd': ['instructions.pxd'],
'stencil_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'gl_debug_logger.pxi'],
'scissor_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd'],
'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',
'vertex_instructions.pxd', 'tesselator.pxd'],
'texture.pxd': ['cgl.pxd'],
'texture.pyx': [
'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',
'cgl.pxd', 'opengl_utils.pxd',
'img_tools.pxi', 'gl_debug_logger.pxi'],
'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],
'vbo.pyx': [
'config.pxi', 'common.pxi', 'context.pxd',
'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],
'vertex.pxd': ['cgl.pxd'],
'vertex.pyx': ['config.pxi', 'common.pxi'],
'vertex_instructions.pyx': [
'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',
'instructions.pxd', 'vertex_instructions.pxd',
'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],
'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}
sources = {
'_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),
'_clock.pyx': {},
'weakproxy.pyx': {},
'properties.pyx': merge(
base_flags, {'depends': ['_event.pxd', '_metrics.pxd']}),
'_metrics.pyx': merge(base_flags, {'depends': ['_event.pxd']}),
'graphics/buffer.pyx': merge(base_flags, gl_flags_base),
'graphics/context.pyx': merge(base_flags, gl_flags_base),
'graphics/compiler.pyx': merge(base_flags, gl_flags_base),
'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/fbo.pyx': merge(base_flags, gl_flags_base),
'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),
'graphics/shader.pyx': merge(base_flags, gl_flags_base),
'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/texture.pyx': merge(base_flags, gl_flags_base),
'graphics/transformation.pyx': merge(base_flags, gl_flags_base),
'graphics/vbo.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),
'core/text/text_layout.pyx': base_flags,
'core/window/window_info.pyx': base_flags,
'graphics/tesselator.pyx': merge(base_flags, {
'include_dirs': ['kivy/lib/libtess2/Include'],
'c_depends': [
'lib/libtess2/Source/bucketalloc.c',
'lib/libtess2/Source/dict.c',
'lib/libtess2/Source/geom.c',
'lib/libtess2/Source/mesh.c',
'lib/libtess2/Source/priorityq.c',
'lib/libtess2/Source/sweep.c',
'lib/libtess2/Source/tess.c'
]
}),
'graphics/svg.pyx': merge(base_flags, gl_flags_base)
}
if c_options["use_sdl2"]:
sdl2_flags = determine_sdl2()
if c_options['use_sdl2'] and sdl2_flags:
sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(
sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)
sdl2_depends = {'depends': ['lib/sdl2.pxi']}
for source_file in ('core/window/_window_sdl2.pyx',
'core/image/_img_sdl2.pyx',
'core/text/_text_sdl2.pyx',
'core/audio/audio_sdl2.pyx',
'core/clipboard/_clipboard_sdl2.pyx'):
sources[source_file] = merge(
base_flags, sdl2_flags, sdl2_depends)
if c_options['use_pangoft2'] in (None, True) and platform not in (
'android', 'ios', 'win32'):
pango_flags = pkgconfig('pangoft2')
if pango_flags and 'libraries' in pango_flags:
print('Pango: pangoft2 found via pkg-config')
c_options['use_pangoft2'] = True
pango_depends = {'depends': [
'lib/pango/pangoft2.pxi',
'lib/pango/pangoft2.h']}
sources['core/text/_text_pango.pyx'] = merge(
base_flags, pango_flags, pango_depends)
print(sources['core/text/_text_pango.pyx'])
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO',
'-framework', 'Accelerate']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= [10, 7] or platform == 'ios':
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': ['core/camera/camera_avfoundation_implem.m']}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
if c_options['use_x11']:
libs = ['Xrender', 'X11']
if c_options['use_egl']:
libs += ['EGL']
else:
libs += ['GL']
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, {
# FIXME add an option to depend on them but not compile them
# cause keytab is included in core, and core is included in
# window_x11
#
# 'depends': [
# 'core/window/window_x11_keytab.c',
# 'core/window/window_x11_core.c'],
'libraries': libs})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags, {
'depends': ['lib/gstplayer/_gstplayer.h']})
# -----------------------------------------------------------------------------
# extension modules
def get_dependencies(name, deps=None):
if deps is None:
deps = []
for dep in graphics_dependencies.get(name, []):
if dep not in deps:
deps.append(dep)
get_dependencies(dep, deps)
return deps
def resolve_dependencies(fn, depends):
fn = basename(fn)
deps = []
get_dependencies(fn, deps)
get_dependencies(fn.replace('.pyx', '.pxd'), deps)
deps_final = []
paths_to_test = ['graphics', 'include']
for dep in deps:
found = False
for path in paths_to_test:
filename = expand(src_path, path, dep)
if exists(filename):
deps_final.append(filename)
found = True
break
if not found:
print('ERROR: Dependency for {} not resolved: {}'.format(
fn, dep
))
return deps_final
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
is_graphics = pyx.startswith('graphics')
pyx_path = expand(src_path, pyx)
depends = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not can_use_cython:
# can't use cython, so use the .c files instead.
pyx_path = '%s.c' % pyx_path[:-4]
if is_graphics:
depends = resolve_dependencies(pyx_path, depends)
f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = '.'.join(['kivy'] + pyx[:-4].split('/'))
flags_clean = {'depends': depends}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(
module_name, [pyx_path] + f_depends + c_depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
split_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',
'glsl', 'zip')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if directory not in examples:
examples[directory] = []
examples[directory].append(filename)
binary_deps = []
binary_deps_path = join(src_path, 'kivy', 'binary_deps')
if isdir(binary_deps_path):
for root, dirnames, filenames in walk(binary_deps_path):
for fname in filenames:
binary_deps.append(
join(root.replace(binary_deps_path, 'binary_deps'), fname))
def glob_paths(*patterns, excludes=('.pyc', )):
files = []
base = Path(join(src_path, 'kivy'))
for pat in patterns:
for f in base.glob(pat):
if f.suffix in excludes:
continue
files.append(str(f.relative_to(base)))
return files
# -----------------------------------------------------------------------------
# setup !
if not build_examples:
setup(
name='Kivy',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
long_description=get_description(),
long_description_content_type='text/markdown',
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=find_packages(include=['kivy*']),
package_dir={'kivy': 'kivy'},
package_data={
'kivy':
glob_paths('*.pxd', '*.pxi') +
glob_paths('**/*.pxd', '**/*.pxi') +
glob_paths('data/**/*.*') +
glob_paths('include/**/*.*') +
glob_paths('tools/**/*.*', excludes=('.pyc', '.enc')) +
glob_paths('graphics/**/*.h') +
glob_paths('tests/**/*.*') +
[
'setupconfig.py',
] + binary_deps
},
data_files=[] if split_examples else list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
('Topic :: Software Development :: Libraries :: '
'Application Frameworks'),
'Topic :: Software Development :: User Interfaces'])
else:
setup(
name='Kivy-examples',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=('Kivy examples.'),
long_description_content_type='text/markdown',
long_description=get_description(),
data_files=list(examples.items()))
|
|
import math
from datetime import datetime, timedelta
from xmlrpclib import ServerProxy
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseForbidden
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.db import connection
from django.db.models import Q, F, Sum
from django.utils import translation
from django.utils.encoding import smart_str
from django.views.decorators.http import require_POST
from django.db import transaction
from django.views.decorators.csrf import csrf_exempt
from djangobb_forum.util import render_to, paged, build_form, paginate, set_language
from djangobb_forum.models import Category, Forum, Topic, Post, Profile, Reputation,\
Report, Attachment, PostTracking
from djangobb_forum.forms import AddPostForm, EditPostForm, UserSearchForm,\
PostSearchForm, ReputationForm, MailToForm, EssentialsProfileForm,\
PersonalProfileForm, MessagingProfileForm, PersonalityProfileForm,\
DisplayProfileForm, PrivacyProfileForm, ReportForm, UploadAvatarForm
from djangobb_forum.templatetags import forum_extras
from djangobb_forum import settings as forum_settings
from djangobb_forum.util import smiles, convert_text_to_html
from djangobb_forum.templatetags.forum_extras import forum_moderated_by
from haystack.query import SearchQuerySet, SQ
@render_to('forum/index.html')
def index(request, full=True):
users_cached = cache.get('users_online', {})
users_online = users_cached and User.objects.filter(id__in = users_cached.keys()) or []
guests_cached = cache.get('guests_online', {})
guest_count = len(guests_cached)
users_count = len(users_online)
cats = {}
forums = {}
user_groups = request.user.groups.all()
if request.user.is_anonymous(): # in django 1.1 EmptyQuerySet raise exception
user_groups = []
_forums = Forum.objects.filter(
Q(category__groups__in=user_groups) | \
Q(category__groups__isnull=True)).select_related('last_post__topic',
'last_post__user',
'category')
for forum in _forums:
cat = cats.setdefault(forum.category.id,
{'id': forum.category.id, 'cat': forum.category, 'forums': []})
cat['forums'].append(forum)
forums[forum.id] = forum
cmpdef = lambda a, b: cmp(a['cat'].position, b['cat'].position)
cats = sorted(cats.values(), cmpdef)
to_return = {'cats': cats,
'posts': Post.objects.count(),
'topics': Topic.objects.count(),
'users': User.objects.count(),
'users_online': users_online,
'online_count': users_count,
'guest_count': guest_count,
'last_user': User.objects.latest('date_joined'),
}
if full:
return to_return
else:
to_return['TEMPLATE'] = 'forum/lofi/index.html'
return to_return
@transaction.commit_on_success
@render_to('forum/moderate.html')
@paged('topics', forum_settings.FORUM_PAGE_SIZE)
def moderate(request, forum_id):
forum = get_object_or_404(Forum, pk=forum_id)
topics = forum.topics.order_by('-sticky', '-updated').select_related()
if request.user.is_superuser or request.user in forum.moderators.all():
topic_ids = request.POST.getlist('topic_id')
if 'move_topics' in request.POST:
return {
'categories': Category.objects.all(),
'topic_ids': topic_ids,
'exclude_forum': forum,
'TEMPLATE': 'forum/move_topic.html'
}
elif 'delete_topics' in request.POST:
for topic_id in topic_ids:
topic = get_object_or_404(Topic, pk=topic_id)
topic.delete()
return HttpResponseRedirect(reverse('djangobb:index'))
elif 'open_topics' in request.POST:
for topic_id in topic_ids:
open_close_topic(request, topic_id)
return HttpResponseRedirect(reverse('djangobb:index'))
elif 'close_topics' in request.POST:
for topic_id in topic_ids:
open_close_topic(request, topic_id)
return HttpResponseRedirect(reverse('djangobb:index'))
return {'forum': forum,
'topics': topics,
#'sticky_topics': forum.topics.filter(sticky=True),
'paged_qs': topics,
'posts': forum.posts.count(),
}
else:
raise Http404
@render_to('forum/search_topics.html')
@paged('results', forum_settings.SEARCH_PAGE_SIZE)
def search(request):
# TODO: move to form
if 'action' in request.GET:
action = request.GET['action']
#FIXME: show_user for anonymous raise exception,
#django bug http://code.djangoproject.com/changeset/14087 :|
groups = request.user.groups.all() or [] #removed after django > 1.2.3 release
topics = Topic.objects.filter(
Q(forum__category__groups__in=groups) | \
Q(forum__category__groups__isnull=True))
if action == 'show_24h':
date = datetime.today() - timedelta(1)
topics = topics.filter(created__gte=date)
elif action == 'show_new':
last_read = PostTracking.objects.get(user=request.user).last_read
if last_read:
topics = topics.filter(last_post__updated__gte=last_read).all()
else:
#searching more than forum_settings.SEARCH_PAGE_SIZE in this way - not good idea :]
topics = [topic for topic in topics[:forum_settings.SEARCH_PAGE_SIZE] if forum_extras.has_unreads(topic, request.user)]
elif action == 'show_unanswered':
topics = topics.filter(post_count=1)
elif action == 'show_subscriptions':
topics = topics.filter(subscribers__id=request.user.id)
elif action == 'show_user':
user_id = request.GET['user_id']
posts = Post.objects.filter(user__id=user_id)
topics = [post.topic for post in posts if post.topic in topics]
elif action == 'search':
keywords = request.GET.get('keywords')
author = request.GET.get('author')
forum = request.GET.get('forum')
search_in = request.GET.get('search_in')
sort_by = request.GET.get('sort_by')
sort_dir = request.GET.get('sort_dir')
if not (keywords or author):
return HttpResponseRedirect(reverse('djangobb:search'))
query = SearchQuerySet().models(Post)
if author:
query = query.filter(author__username=author)
if forum != u'0':
query = query.filter(forum__id=forum)
if keywords:
if search_in == 'all':
query = query.filter(SQ(topic=keywords) | SQ(text=keywords))
elif search_in == 'messsage':
query = query.filter(text=keywords)
elif search_in == 'topic':
query = query.filter(topic=keywords)
order = {'0': 'created',
'1': 'user',
'2': 'topic',
'3': 'forum'}.get(sort_by, 'created')
if sort_dir == 'DESC':
order = '-' + order
posts = query.order_by(order)
if 'topics' in request.GET['show_as']:
topics = []
topics_to_exclude = []
for post in posts:
if post.object.topic not in topics:
if post.object.topic.forum.category.has_access(request.user):
topics.append(post.object.topic)
else:
topics_to_exclude |= SQ(topic=post.object.topic)
if topics_to_exclude:
posts = posts.exclude(topics_to_exclude)
return {'paged_qs': topics}
elif 'posts' in request.GET['show_as']:
return {'paged_qs': posts,
'TEMPLATE': 'forum/search_posts.html'
}
return {'paged_qs': topics}
else:
form = PostSearchForm()
return {'categories': Category.objects.all(),
'form': form,
'TEMPLATE': 'forum/search_form.html'
}
@login_required
@render_to('forum/report.html')
def misc(request):
if 'action' in request.GET:
action = request.GET['action']
if action =='markread':
user = request.user
PostTracking.objects.filter(user__id=user.id).update(last_read=datetime.now(), topics=None)
return HttpResponseRedirect(reverse('djangobb:index'))
elif action == 'report':
if request.GET.get('post_id', ''):
post_id = request.GET['post_id']
post = get_object_or_404(Post, id=post_id)
form = build_form(ReportForm, request, reported_by=request.user, post=post_id)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(post.get_absolute_url())
return {'form':form}
elif 'submit' in request.POST and 'mail_to' in request.GET:
form = MailToForm(request.POST)
if form.is_valid():
user = get_object_or_404(User, username=request.GET['mail_to'])
subject = form.cleaned_data['subject']
body = form.cleaned_data['body'] + '\n %s %s [%s]' % (Site.objects.get_current().domain,
request.user.username,
request.user.email)
user.email_user(subject, body, request.user.email)
return HttpResponseRedirect(reverse('djangobb:index'))
elif 'mail_to' in request.GET:
user = get_object_or_404(User, username=request.GET['mail_to'])
form = MailToForm()
return {'form':form,
'user': user,
'TEMPLATE': 'forum/mail_to.html'
}
@render_to('forum/forum.html')
@paged('topics', forum_settings.FORUM_PAGE_SIZE)
def show_forum(request, forum_id, full=True):
forum = get_object_or_404(Forum, pk=forum_id)
if not forum.category.has_access(request.user):
return HttpResponseForbidden()
topics = forum.topics.order_by('-sticky', '-updated').select_related()
moderator = request.user.is_superuser or\
request.user in forum.moderators.all()
to_return = {'categories': Category.objects.all(),
'forum': forum,
'paged_qs': topics,
'posts': forum.post_count,
'topics': forum.topic_count,
'moderator': moderator,
}
if full:
return to_return
else:
pages, paginator, paged_list_name = paginate(topics, request, forum_settings.FORUM_PAGE_SIZE)
to_return.update({'pages': pages,
'paginator': paginator,
'topics': paged_list_name,
'TEMPLATE': 'forum/lofi/forum.html'
})
del to_return['paged_qs']
return to_return
@transaction.commit_on_success
@render_to('forum/topic.html')
@paged('posts', forum_settings.TOPIC_PAGE_SIZE)
def show_topic(request, topic_id, full=True):
topic = get_object_or_404(Topic.objects.select_related(), pk=topic_id)
if not topic.forum.category.has_access(request.user):
return HttpResponseForbidden()
Topic.objects.filter(pk=topic.id).update(views=F('views') + 1)
last_post = topic.last_post
if request.user.is_authenticated():
topic.update_read(request.user)
posts = topic.posts.all().select_related()
users = set(post.user.id for post in posts)
profiles = Profile.objects.filter(user__pk__in=users)
profiles = dict((profile.user_id, profile) for profile in profiles)
for post in posts:
post.user.forum_profile = profiles[post.user.id]
if forum_settings.REPUTATION_SUPPORT:
replies_list = Reputation.objects.filter(to_user__pk__in=users).values('to_user_id').annotate(Sum('sign'))
replies = {}
for r in replies_list:
replies[r['to_user_id']] = r['sign__sum']
for post in posts:
post.user.forum_profile.reply_total = replies.get(post.user.id, 0)
initial = {}
if request.user.is_authenticated():
initial = {'markup': request.user.forum_profile.markup}
form = AddPostForm(topic=topic, initial=initial)
moderator = request.user.is_superuser or\
request.user in topic.forum.moderators.all()
if request.user.is_authenticated() and request.user in topic.subscribers.all():
subscribed = True
else:
subscribed = False
highlight_word = request.GET.get('hl', '')
if full:
return {'categories': Category.objects.all(),
'topic': topic,
'last_post': last_post,
'form': form,
'moderator': moderator,
'subscribed': subscribed,
'paged_qs': posts,
'highlight_word': highlight_word,
}
else:
pages, paginator, paged_list_name = paginate(posts, request, forum_settings.TOPIC_PAGE_SIZE)
return {'categories': Category.objects.all(),
'topic': topic,
'pages': pages,
'paginator': paginator,
'posts': paged_list_name,
'TEMPLATE': 'forum/lofi/topic.html'
}
@login_required
@transaction.commit_on_success
@render_to('forum/add_post.html')
def add_post(request, forum_id, topic_id):
forum = None
topic = None
posts = None
if forum_id:
forum = get_object_or_404(Forum, pk=forum_id)
if not forum.category.has_access(request.user):
return HttpResponseForbidden()
elif topic_id:
topic = get_object_or_404(Topic, pk=topic_id)
posts = topic.posts.all().select_related()
if not topic.forum.category.has_access(request.user):
return HttpResponseForbidden()
if topic and topic.closed:
return HttpResponseRedirect(topic.get_absolute_url())
ip = request.META.get('REMOTE_ADDR', None)
form = build_form(AddPostForm, request, topic=topic, forum=forum,
user=request.user, ip=ip,
initial={'markup': request.user.forum_profile.markup})
if 'post_id' in request.GET:
post_id = request.GET['post_id']
post = get_object_or_404(Post, pk=post_id)
form.fields['body'].initial = "[quote=%s]%s[/quote]" % (post.user, post.body)
if form.is_valid():
post = form.save();
icq_notify(post)
return HttpResponseRedirect(post.get_absolute_url())
return {'form': form,
'posts': posts,
'topic': topic,
'forum': forum,
}
@transaction.commit_on_success
@render_to('forum/user.html')
def user(request, username):
user = get_object_or_404(User, username=username)
if request.user.is_authenticated() and user == request.user or request.user.is_superuser:
if 'section' in request.GET:
section = request.GET['section']
profile_url = reverse('djangobb:forum_profile', args=[user.username]) + '?section=' + section
if section == 'privacy':
form = build_form(PrivacyProfileForm, request, instance=user.forum_profile)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(profile_url)
return {'active_menu':'privacy',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_privacy.html'
}
elif section == 'display':
form = build_form(DisplayProfileForm, request, instance=user.forum_profile)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(profile_url)
return {'active_menu':'display',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_display.html'
}
elif section == 'personality':
form = build_form(PersonalityProfileForm, request, markup=user.forum_profile.markup, instance=user.forum_profile)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(profile_url)
return {'active_menu':'personality',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_personality.html'
}
elif section == 'messaging':
form = build_form(MessagingProfileForm, request, instance=user.forum_profile)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(profile_url)
return {'active_menu':'messaging',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_messaging.html'
}
elif section == 'personal':
form = build_form(PersonalProfileForm, request, instance=user.forum_profile, user=user)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(profile_url)
return {'active_menu':'personal',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_personal.html'
}
elif section == 'essentials':
form = build_form(EssentialsProfileForm, request, instance=user.forum_profile,
user_view=user, user_request=request.user)
if request.method == 'POST' and form.is_valid():
profile = form.save()
set_language(request, profile.language)
return HttpResponseRedirect(profile_url)
return {'active_menu':'essentials',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_essentials.html'
}
elif 'action' in request.GET:
action = request.GET['action']
if action == 'upload_avatar':
form = build_form(UploadAvatarForm, request, instance=user.forum_profile)
if request.method == 'POST' and form.is_valid():
form.save()
return HttpResponseRedirect(reverse('djangobb:forum_profile', args=[user.username]))
return {'form': form,
'avatar_width': forum_settings.AVATAR_WIDTH,
'avatar_height': forum_settings.AVATAR_HEIGHT,
'TEMPLATE': 'forum/upload_avatar.html'
}
elif action == 'delete_avatar':
profile = get_object_or_404(Profile, user=request.user)
profile.avatar = None
profile.save()
return HttpResponseRedirect(reverse('djangobb:forum_profile', args=[user.username]))
else:
form = build_form(EssentialsProfileForm, request, instance=user.forum_profile,
user_view=user, user_request=request.user)
if request.method == 'POST' and form.is_valid():
profile = form.save()
set_language(request, profile.language)
return HttpResponseRedirect(reverse('djangobb:forum_profile', args=[user.username]))
return {'active_menu':'essentials',
'profile': user,
'form': form,
'TEMPLATE': 'forum/profile/profile_essentials.html'
}
else:
topic_count = Topic.objects.filter(user__id=user.id).count()
if user.forum_profile.post_count < forum_settings.POST_USER_SEARCH and not request.user.is_authenticated():
return HttpResponseRedirect(reverse('user_signin') + '?next=%s' % request.path)
return {'profile': user,
'topic_count': topic_count,
}
@login_required
@transaction.commit_on_success
@render_to('forum/reputation.html')
def reputation(request, username):
user = get_object_or_404(User, username=username)
form = build_form(ReputationForm, request, from_user=request.user, to_user=user)
if 'action' in request.GET:
if request.user == user:
return HttpResponseForbidden(u'You can not change the reputation of yourself')
if 'post_id' in request.GET:
sign = 0
post_id = request.GET['post_id']
form.fields['post'].initial = post_id
if request.GET['action'] == 'plus':
form.fields['sign'].initial = 1
elif request.GET['action'] == 'minus':
form.fields['sign'].initial = -1
return {'form': form,
'TEMPLATE': 'forum/reputation_form.html'
}
else:
raise Http404
elif request.method == 'POST':
if 'del_reputation' in request.POST and request.user.is_superuser:
reputation_list = request.POST.getlist('reputation_id')
for reputation_id in reputation_list:
reputation = get_object_or_404(Reputation, pk=reputation_id)
reputation.delete()
return HttpResponseRedirect(reverse('djangobb:index'))
elif form.is_valid():
form.save()
post_id = request.POST['post']
post = get_object_or_404(Post, id=post_id)
return HttpResponseRedirect(post.get_absolute_url())
else:
return {'form': form,
'TEMPLATE': 'forum/reputation_form.html'
}
else:
reputations = Reputation.objects.filter(to_user__id=user.id).order_by('-time').select_related()
return {'reputations': reputations,
'profile': user.forum_profile,
}
def show_post(request, post_id):
post = get_object_or_404(Post, pk=post_id)
count = post.topic.posts.filter(created__lt=post.created).count() + 1
page = math.ceil(count / float(forum_settings.TOPIC_PAGE_SIZE))
url = '%s?page=%d#post-%d' % (reverse('djangobb:topic', args=[post.topic.id]), page, post.id)
return HttpResponseRedirect(url)
@login_required
@transaction.commit_on_success
@render_to('forum/edit_post.html')
def edit_post(request, post_id):
from djangobb_forum.templatetags.forum_extras import forum_editable_by
post = get_object_or_404(Post, pk=post_id)
topic = post.topic
if not forum_editable_by(post, request.user):
return HttpResponseRedirect(post.get_absolute_url())
form = build_form(EditPostForm, request, topic=topic, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.updated_by = request.user
post.save()
return HttpResponseRedirect(post.get_absolute_url())
return {'form': form,
'post': post,
}
@login_required
@transaction.commit_on_success
@render_to('forum/delete_posts.html')
@paged('posts', forum_settings.TOPIC_PAGE_SIZE)
def delete_posts(request, topic_id):
topic = Topic.objects.select_related().get(pk=topic_id)
if forum_moderated_by(topic, request.user):
deleted = False
post_list = request.POST.getlist('post')
for post_id in post_list:
if not deleted:
deleted = True
delete_post(request, post_id)
if deleted:
return HttpResponseRedirect(topic.get_absolute_url())
last_post = topic.posts.latest()
if request.user.is_authenticated():
topic.update_read(request.user)
posts = topic.posts.all().select_related()
profiles = Profile.objects.filter(user__pk__in=set(x.user.id for x in posts))
profiles = dict((x.user_id, x) for x in profiles)
for post in posts:
post.user.forum_profile = profiles[post.user.id]
initial = {}
if request.user.is_authenticated():
initial = {'markup': request.user.forum_profile.markup}
form = AddPostForm(topic=topic, initial=initial)
moderator = request.user.is_superuser or\
request.user in topic.forum.moderators.all()
if request.user.is_authenticated() and request.user in topic.subscribers.all():
subscribed = True
else:
subscribed = False
return {
'topic': topic,
'last_post': last_post,
'form': form,
'moderator': moderator,
'subscribed': subscribed,
'paged_qs': posts,
}
@login_required
@transaction.commit_on_success
@render_to('forum/move_topic.html')
def move_topic(request):
if 'topic_id' in request.GET:
#if move only 1 topic
topic_ids = [request.GET['topic_id']]
else:
topic_ids = request.POST.getlist('topic_id')
first_topic = topic_ids[0]
topic = get_object_or_404(Topic, pk=first_topic)
from_forum = topic.forum
if 'to_forum' in request.POST:
to_forum_id = int(request.POST['to_forum'])
to_forum = get_object_or_404(Forum, pk=to_forum_id)
for topic_id in topic_ids:
topic = get_object_or_404(Topic, pk=topic_id)
if topic.forum != to_forum:
if forum_moderated_by(topic, request.user):
topic.forum = to_forum
topic.save()
#TODO: not DRY
try:
last_post = Post.objects.filter(topic__forum__id=from_forum.id).latest()
except Post.DoesNotExist:
last_post = None
from_forum.last_post = last_post
from_forum.topic_count = from_forum.topics.count()
from_forum.post_count = from_forum.posts.count()
from_forum.save()
return HttpResponseRedirect(to_forum.get_absolute_url())
return {'categories': Category.objects.all(),
'topic_ids': topic_ids,
'exclude_forum': from_forum,
}
@login_required
@transaction.commit_on_success
def stick_unstick_topic(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
if forum_moderated_by(topic, request.user):
topic.sticky = not topic.sticky
topic.save()
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
@transaction.commit_on_success
@render_to('forum/delete_post.html')
def delete_post(request, post_id):
post = get_object_or_404(Post, pk=post_id)
last_post = post.topic.last_post
topic = post.topic
forum = post.topic.forum
allowed = False
if request.user.is_superuser or\
request.user in post.topic.forum.moderators.all() or \
(post.user == request.user and post == last_post):
allowed = True
if not allowed:
return HttpResponseRedirect(post.get_absolute_url())
post.delete()
try:
Topic.objects.get(pk=topic.id)
except Topic.DoesNotExist:
#removed latest post in topic
return HttpResponseRedirect(forum.get_absolute_url())
else:
return HttpResponseRedirect(topic.get_absolute_url())
@login_required
@transaction.commit_on_success
def open_close_topic(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
if forum_moderated_by(topic, request.user):
topic.closed = not topic.closed
topic.save()
return HttpResponseRedirect(topic.get_absolute_url())
@render_to('forum/users.html')
@paged('users', forum_settings.USERS_PAGE_SIZE)
def users(request):
users = User.objects.filter(forum_profile__post_count__gte=forum_settings.POST_USER_SEARCH).order_by('username')
form = UserSearchForm(request.GET)
users = form.filter(users)
return {'paged_qs': users,
'form': form,
}
@login_required
@transaction.commit_on_success
def delete_subscription(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
topic.subscribers.remove(request.user)
if 'from_topic' in request.GET:
return HttpResponseRedirect(reverse('djangobb:topic', args=[topic.id]))
else:
return HttpResponseRedirect(reverse('djangobb:forum_profile', args=[request.user.username]))
@login_required
@transaction.commit_on_success
def add_subscription(request, topic_id):
topic = get_object_or_404(Topic, pk=topic_id)
topic.subscribers.add(request.user)
return HttpResponseRedirect(reverse('djangobb:topic', args=[topic.id]))
@login_required
def show_attachment(request, hash):
attachment = get_object_or_404(Attachment, hash=hash)
file_data = file(attachment.get_absolute_path(), 'rb').read()
response = HttpResponse(file_data, mimetype=attachment.content_type)
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(attachment.name)
return response
@login_required
@csrf_exempt
@render_to('forum/post_preview.html')
def post_preview(request):
'''Preview for markitup'''
markup = request.user.forum_profile.markup
data = request.POST.get('data', '')
data = convert_text_to_html(data, markup)
if forum_settings.SMILES_SUPPORT:
data = smiles(data)
return {'data': data}
def icq_notify(post):
token = "V9PQJ9Dzn9egfq237hqr3RdFnnyXJ7F1gBFg2BfoYoIMije4LHqRYNCu88jSd73s"
url = "http://127.0.0.1:7123"
notified_people = ("332381612", "452440557", "574359334", "422534350")
topic = post.topic
text = u"%s hat in Thema '%s' was neues geschrieben.\n\nhttp://www.shockg.de%s" % (post.user.username, topic.name, post.get_absolute_url())
proxy = ServerProxy(url)
for person in notified_people:
# Silence all errors, this is not fatal at all
try:
proxy.send_message(token, person, text.strip())
except:
pass
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Convert from VPP API trace to JSON.
import argparse
import struct
import sys
import logging
import json
from ipaddress import *
from collections import namedtuple
from vpp_papi import MACAddress, VPPApiJSONFiles
import base64
import os
import textwrap
def serialize_likely_small_unsigned_integer(x):
r = x
# Low bit set means it fits into 1 byte.
if r < (1 << 7):
return struct.pack("B", 1 + 2 * r)
# Low 2 bits 1 0 means it fits into 2 bytes.
r -= (1 << 7)
if r < (1 << 14):
return struct.pack("<H", 4 * r + 2)
r -= (1 << 14)
if r < (1 << 29):
return struct.pack("<I", 8 * r + 4)
return struct.pack("<BQ", 0, x)
def unserialize_likely_small_unsigned_integer(data, offset):
y = struct.unpack_from("B", data, offset)[0]
if y & 1:
return y // 2, 1
r = 1 << 7
if y & 2:
p = struct.unpack_from("B", data, offset + 1)[0]
r += (y // 4) + (p << 6)
return r, 2
r += 1 << 14
if y & 4:
(p1, p2, p3) = struct.unpack_from("BBB", data, offset+1)
r += ((y // 8) + (p1 << (5 + 8 * 0))
+ (p2 << (5 + 8 * 1)) + (p3 << (5 + 8 * 2)))
return r, 3
return struct.unpack_from(">Q", data, offset+1)[0], 8
def serialize_cstring(s):
bstring = s.encode('utf8')
l = len(bstring)
b = serialize_likely_small_unsigned_integer(l)
b += struct.pack('{}s'.format(l), bstring)
return b
def unserialize_cstring(data, offset):
l, size = unserialize_likely_small_unsigned_integer(data, offset)
name = struct.unpack_from('{}s'.format(l), data, offset+size)[0]
return name.decode('utf8'), size + len(name)
def unserialize_msgtbl(data, offset):
msgtable_by_id = {}
msgtable_by_name = {}
i = 0
nmsg = struct.unpack_from(">I", data, offset)[0]
o = 4
while i < nmsg:
(msgid, size) = unserialize_likely_small_unsigned_integer(
data, offset + o)
o += size
(name, size) = unserialize_cstring(data, offset + o)
o += size
msgtable_by_id[msgid] = name
msgtable_by_name[name] = msgid
i += 1
return msgtable_by_id, msgtable_by_name, o
def serialize_msgtbl(messages):
offset = 0
# XXX 100K?
data = bytearray(100000)
nmsg = len(messages)
data = struct.pack(">I", nmsg)
for k, v in messages.items():
name = k + '_' + v.crc[2:]
data += serialize_likely_small_unsigned_integer(v._vl_msg_id)
data += serialize_cstring(name)
return data
def apitrace2json(messages, filename):
result = []
with open(filename, 'rb') as file:
bytes_read = file.read()
# Read header
(nitems, msgtbl_size, wrapped) = struct.unpack_from(">IIB",
bytes_read, 0)
logging.debug('nitems: {} message table size: {} wrapped: {}'
.format(nitems, msgtbl_size, wrapped))
if wrapped:
sys.stdout.write('Wrapped/incomplete trace, results may vary')
offset = 9
msgtbl_by_id, msgtbl_by_name, size = unserialize_msgtbl(bytes_read,
offset)
offset += size
i = 0
while i < nitems:
size = struct.unpack_from(">I", bytes_read, offset)[0]
offset += 4
if size == 0:
break
msgid = struct.unpack_from(">H", bytes_read, offset)[0]
name = msgtbl_by_id[msgid]
n = name[:name.rfind("_")]
msgobj = messages[n]
if n + '_' + msgobj.crc[2:] != name:
sys.exit("CRC Mismatch between JSON API definition "
"and trace. {}".format(name))
x, s = msgobj.unpack(bytes_read[offset:offset+size])
msgname = type(x).__name__
offset += size
# Replace named tuple illegal _0
y = x._asdict()
y.pop('_0')
result.append({'name': msgname, 'args': y})
i += 1
file.close()
return result
def json2apitrace(messages, filename):
"""Input JSON file and API message definition. Output API trace
bytestring."""
msgs = []
with open(filename, 'r') as file:
msgs = json.load(file, object_hook=vpp_decode)
result = b''
for m in msgs:
name = m['name']
msgobj = messages[name]
m['args']['_vl_msg_id'] = messages[name]._vl_msg_id
b = msgobj.pack(m['args'])
result += struct.pack('>I', len(b))
result += b
return len(msgs), result
class VPPEncoder(json.JSONEncoder):
def default(self, o):
if type(o) is bytes:
return "base64:" + base64.b64encode(o).decode('utf-8')
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
def encode(self, obj):
def hint_tuples(item):
if isinstance(item, tuple):
return hint_tuples(item._asdict())
if isinstance(item, list):
return [hint_tuples(e) for e in item]
if isinstance(item, dict):
return {key: hint_tuples(value) for key, value in item.items()}
else:
return item
return super(VPPEncoder, self).encode(hint_tuples(obj))
def vpp_decode(obj):
for k, v in obj.items():
if type(v) is str and v.startswith('base64:'):
s = v.lstrip('base64:')
obj[k] = base64.b64decode(v[7:])
return obj
def vpp_encoder(obj):
if isinstance(obj, IPv6Network):
return str(obj)
if isinstance(obj, IPv4Network):
return str(obj)
if isinstance(obj, IPv6Address):
return str(obj)
if isinstance(obj, IPv4Address):
return str(obj)
if isinstance(obj, MACAddress):
return str(obj)
if type(obj) is bytes:
return "base64:" + base64.b64encode(obj).decode('ascii')
raise TypeError('Unknown object {} {}\n'.format(type(obj), obj))
message_filter = {
'control_ping',
'memclnt_create',
'memclnt_delete',
'get_first_msg_id',
}
argument_filter = {
'client_index',
'context',
}
def topython(messages, services):
import pprint
pp = pprint.PrettyPrinter()
s = '''\
#!/usr/bin/env python3
from vpp_papi import VPP, VppEnum
vpp = VPP(use_socket=True)
vpp.connect(name='vppapitrace')
'''
for m in messages:
if m['name'] not in services:
s += '# ignoring reply message: {}\n'.format(m['name'])
continue
if m['name'] in message_filter:
s += '# ignoring message {}\n'.format(m['name'])
continue
for k in argument_filter:
try:
m['args'].pop(k)
except KeyError:
pass
a = pp.pformat(m['args'])
s += 'rv = vpp.api.{}(**{})\n'.format(m['name'], a)
s += 'print("RV:", rv)\n'
s += 'vpp.disconnect()\n'
return s
def todump_items(k, v, level):
klen = len(k) if k else 0
spaces = ' ' * level + ' ' * (klen + 3)
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=spaces, width=60)
s = ''
if type(v) is dict:
if k:
s += ' ' * level + '{}:\n'.format(k)
for k2, v2 in v.items():
s += todump_items(k2, v2, level + 1)
return s
if type(v) is list:
for v2 in v:
s += '{}'.format(todump_items(k, v2, level))
return s
if type(v) is bytes:
w = wrapper.fill(bytes.hex(v))
s += ' ' * level + '{}: {}\n'.format(k, w)
else:
if type(v) is str:
v = wrapper.fill(v)
s += ' ' * level + '{}: {}\n'.format(k, v)
return s
def todump(messages, services):
import pprint
pp = pprint.PrettyPrinter()
s = ''
for m in messages:
if m['name'] not in services:
s += '# ignoring reply message: {}\n'.format(m['name'])
continue
#if m['name'] in message_filter:
# s += '# ignoring message {}\n'.format(m['name'])
# continue
for k in argument_filter:
try:
m['args'].pop(k)
except KeyError:
pass
a = pp.pformat(m['args'])
s += '{}:\n'.format(m['name'])
s += todump_items(None, m['args'], 0)
return s
def init_api(apidir):
# Read API definitions
apifiles = VPPApiJSONFiles.find_api_files(api_dir=apidir)
messages = {}
services = {}
for file in apifiles:
with open(file) as apidef_file:
m, s = VPPApiJSONFiles.process_json_file(apidef_file)
messages.update(m)
services.update(s)
return messages, services
def replaymsgs(vpp, msgs):
for m in msgs:
name = m['name']
if name not in vpp.services:
continue
if name == 'control_ping':
continue
try:
m['args'].pop('client_index')
except KeyError:
pass
if m['args']['context'] == 0:
m['args']['context'] = 1
f = vpp.get_function(name)
rv = f(**m['args'])
print('RV {}'.format(rv))
def replay(args):
"""Replay into running VPP instance"""
from vpp_papi import VPP
JSON = 1
APITRACE = 2
filename, file_extension = os.path.splitext(args.input)
input_type = JSON if file_extension == '.json' else APITRACE
vpp = VPP(use_socket=args.socket)
rv = vpp.connect(name='vppapireplay', chroot_prefix=args.shmprefix)
if rv != 0:
sys.exit('Cannot connect to VPP')
if input_type == JSON:
with open(args.input, 'r') as file:
msgs = json.load(file, object_hook=vpp_decode)
else:
msgs = apitrace2json(messages, args.input)
replaymsgs(vpp, msgs)
vpp.disconnect()
def generate(args):
"""Generate JSON"""
JSON = 1
APITRACE = 2
PYTHON = 3
DUMP = 4
filename, file_extension = os.path.splitext(args.input)
input_type = JSON if file_extension == '.json' else APITRACE
filename, file_extension = os.path.splitext(args.output)
if args.todump:
output_type = DUMP
else:
if file_extension == '.json' or filename == '-':
output_type = JSON
elif file_extension == '.py':
output_type = PYTHON
else:
output_type = APITRACE
if input_type == output_type:
sys.exit("error: Nothing to convert between")
if input_type != JSON and output_type == APITRACE:
sys.exit("error: Input file must be JSON file: {}".format(args.input))
messages, services = init_api(args.apidir)
if input_type == JSON and output_type == APITRACE:
i = 0
for k, v in messages.items():
v._vl_msg_id = i
i += 1
n, result = json2apitrace(messages, args.input)
msgtbl = serialize_msgtbl(messages)
print('API messages: {}'.format(n))
header = struct.pack(">IIB", n, len(msgtbl), 0)
with open(args.output, 'wb') as outfile:
outfile.write(header)
outfile.write(msgtbl)
outfile.write(result)
return
if input_type == APITRACE:
result = apitrace2json(messages, args.input)
if output_type == PYTHON:
s = json.dumps(result, cls=VPPEncoder, default=vpp_encoder)
x = json.loads(s, object_hook=vpp_decode)
s = topython(x, services)
elif output_type == DUMP:
s = json.dumps(result, cls=VPPEncoder, default=vpp_encoder)
x = json.loads(s, object_hook=vpp_decode)
s = todump(x, services)
else:
s = json.dumps(result, cls=VPPEncoder,
default=vpp_encoder, indent=4 * ' ')
elif output_type == PYTHON:
with open(args.input, 'r') as file:
x = json.load(file, object_hook=vpp_decode)
s = topython(x, services)
else:
sys.exit('Input file must be API trace file: {}'.format(args.input))
if args.output == '-':
sys.stdout.write(s + '\n')
else:
print('Generating {} from API trace: {}'
.format(args.output, args.input))
with open(args.output, 'w') as outfile:
outfile.write(s)
def general(args):
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true',
help='enable debug mode')
parser.add_argument('--apidir',
help='Location of JSON API definitions')
parser.set_defaults(func=general)
subparsers = parser.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help')
parser_convert = subparsers.add_parser('convert',
help='Convert API trace to JSON or Python and back')
parser_convert.add_argument('input',
help='Input file (API trace | JSON)')
parser_convert.add_argument('--todump', action='store_true', help='Output text format')
parser_convert.add_argument('output',
help='Output file (Python | JSON | API trace)')
parser_convert.set_defaults(func=generate)
parser_replay = subparsers.add_parser('replay',
help='Replay messages to running VPP instance')
parser_replay.add_argument('input', help='Input file (API trace | JSON)')
parser_replay.add_argument('--socket', action='store_true',
help='use default socket to connect to VPP')
parser_replay.add_argument('--shmprefix',
help='connect to VPP on shared memory prefix')
parser_replay.set_defaults(func=replay)
args = parser.parse_args()
if args.debug:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
args.func(args)
main()
|
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc as wexc
from neutron.api.v2 import base
from neutron.common import constants as n_const
from neutron import context
from neutron.extensions import portbindings
from neutron.manager import NeutronManager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc
from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver
from neutron.plugins.ml2.drivers import type_vlan as vlan_config
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
PHYS_NET = 'physnet1'
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
VLAN_START = 1000
VLAN_END = 1100
NEXUS_IP_ADDR = '1.1.1.1'
NETWORK_NAME = 'test_network'
NETWORK_NAME_2 = 'test_network_2'
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
BOUND_SEGMENT1 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START}
BOUND_SEGMENT2 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START + 1}
class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the ML2 plugin to use VLANs in the range of 1000-1100.
- Configure the Cisco mechanism driver to use an imaginary switch
at NEXUS_IP_ADDR.
- Create a mock NETCONF client (ncclient) for the Cisco mechanism
driver
"""
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['cisco_nexus'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
# Configure the ML2 VLAN parameters
phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
vlan_config.cfg.CONF.set_override('network_vlan_ranges',
[phys_vrange],
'ml2_type_vlan')
# Configure the Cisco Nexus mechanism driver
nexus_config = {
(NEXUS_IP_ADDR, 'username'): 'admin',
(NEXUS_IP_ADDR, 'password'): 'mySecretPassword',
(NEXUS_IP_ADDR, 'ssh_port'): 22,
(NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2}
nexus_patch = mock.patch.dict(
cisco_config.ML2MechCiscoConfig.nexus_dict,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# The NETCONF client module is not included in the DevStack
# distribution, so mock this module for unit testing.
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
# Mock port context values for bound_segments and 'status'.
self.mock_bound_segment = mock.patch.object(
driver_context.PortContext,
'bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment = mock.patch.object(
driver_context.PortContext,
'original_bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_original_bound_segment.return_value = None
mock_status = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_status_active').start()
mock_status.return_value = n_const.PORT_STATUS_ACTIVE
super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN)
self.port_create_status = 'DOWN'
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Confirm last config sent to Nexus contains specified keywords."""
last_cfg = (self.mock_ncclient.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
add_keyword_expected=False):
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True):
vlan_deleted = self._is_in_last_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and
vlan_deleted == vlan_deletion_expected)
class TestCiscoBasicGet(CiscoML2MechanismTestCase,
test_db_plugin.TestBasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestPortsV2):
@contextlib.contextmanager
def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1,
device_id=DEVICE_ID_1,
host_id=COMP_HOST_NAME):
"""Create network, subnet, and port resources for test cases.
Create a network, subnet, port and then update the port, yield the
result, then delete the port, subnet and network.
:param name: Name of network to be created.
:param cidr: cidr address of subnetwork to be created.
:param device_id: Device ID to use for port to be created/updated.
:param host_id: Host ID to use for port create/update.
"""
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
with self.port(subnet=subnet, cidr=cidr) as port:
data = {'port': {portbindings.HOST_ID: host_id,
'device_id': device_id,
'device_owner': 'compute:none',
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
yield req.get_response(self.api)
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API chooses the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Confirm that for the first VLAN configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch contains the keyword 'add'.
"""
# First vlan should be configured without 'add' keyword
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
self.mock_bound_segment.return_value = BOUND_SEGMENT2
# Second vlan should be configured with 'add' keyword
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=True))
# Return to first segment for delete port calls.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('connect.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConnectFailed)
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
with self.port(subnet=subnet, fmt=self.fmt) as port:
data = {'port': {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
req.get_response(self.api)
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
yield
# Create network and subnet
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1,
vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_vm_migration(self):
"""Verify VM (live) migration.
Simulate the following:
Nova informs neutron of live-migration with port-update(new host).
This should trigger two update_port_pre/postcommit() calls.
The first one should only change the current host_id and remove the
binding resulting in the mechanism drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bound_segment: previous value
PortContext.current['binding:host_id']: current (new) value
PortContext.bound_segment: None
The second one binds the new host resulting in the mechanism
drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bound_segment: None
PortContext.current['binding:host_id']: previous value
PortContext.bound_segment: new value
"""
# Create network, subnet and port.
with self._create_resources() as result:
# Verify initial database entry.
# Use port_id to verify that 1st host name was used.
binding = nexus_db_v2.get_nexusvm_binding(VLAN_START, DEVICE_ID_1)
self.assertEqual(binding.port_id, NEXUS_INTERFACE)
port = self.deserialize(self.fmt, result)
port_id = port['port']['id']
# Trigger update event to unbind segment.
# Results in port being deleted from nexus DB and switch.
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
self.mock_bound_segment.return_value = None
self.mock_original_bound_segment.return_value = BOUND_SEGMENT1
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been deleted.
self.assertRaises(c_exc.NexusPortBindingNotFound,
nexus_db_v2.get_nexusvm_binding,
VLAN_START, DEVICE_ID_1)
# Trigger update event to bind segment with new host.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment.return_value = None
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been added using new host name.
# Use port_id to verify that 2nd host name was used.
binding = nexus_db_v2.get_nexusvm_binding(VLAN_START, DEVICE_ID_1)
self.assertEqual(binding.port_id, NEXUS_INTERFACE_2)
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
def mock_edit_config_a(target, config):
if all(word in config for word in ['state', 'active']):
raise Exception("Can't modify state for extended")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_a):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def mock_edit_config_b(target, config):
if all(word in config for word in ['no', 'shutdown']):
raise Exception("Command is only allowed on VLAN")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_b):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
def mock_edit_config(target, config):
if all(word in config for word in ['state', 'active']):
raise ValueError
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config):
with self._create_resources() as result:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(self._is_in_last_nexus_cfg(['<no>', '<vlan>']))
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_host_not_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_resources(host_id='fake_host') as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusComputeHostNotConfigured)
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty host_id and device_id values during port creation.
"""
with self._create_resources(device_id='', host_id='') as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusMissingRequiredFields)
class TestCiscoNetworksV2(CiscoML2MechanismTestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_network
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
class TestCiscoSubnetsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestSubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_obj = NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
class TestCiscoPortsV2XML(TestCiscoPortsV2):
fmt = 'xml'
class TestCiscoNetworksV2XML(TestCiscoNetworksV2):
fmt = 'xml'
class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2):
fmt = 'xml'
|
|
import json
import unittest
import webtest
from datamodel import Library, Version, Content, Status
from api import app
import util
from test_base import TestBase
class ApiTestBase(TestBase):
def setUp(self):
TestBase.setUp(self)
self.app = webtest.TestApp(app)
class PublishTest(ApiTestBase):
def test_add(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
response = self.app.post('/api/publish/owner/repo')
self.assertEqual(response.status_int, 200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].url, util.ingest_library_task('owner', 'repo'))
def test_add_scope(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
response = self.app.post('/api/publish/@scope/package')
self.assertEqual(response.status_int, 200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].url, util.ingest_library_task('@scope', 'package'))
def test_add_no_scope(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
response = self.app.post('/api/publish/package')
self.assertEqual(response.status_int, 200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].url, util.ingest_library_task('@@npm', 'package'))
class PreviewCommitTest(ApiTestBase):
def setUp(self):
ApiTestBase.setUp(self)
util.SECRETS['recaptcha'] = 'secret'
def test_resolve_pull(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
self.respond_to('https://api.github.com/repos/org/repo/git/refs/pull/1/head', '{"ref": "refs/pull/1/head", "object": {"sha": "pullsha"}}')
response = self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo/pull/1'})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'org/repo/pullsha')
def test_resolve_commitsha(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
response = self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo/commit/commitsha'})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'org/repo/commitsha')
def test_resolve_treebranch(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
self.respond_to('https://api.github.com/repos/org/repo/git/refs/heads/branch', '{"ref": "refs/heads/branch", "object": {"sha": "branchsha"}}')
response = self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo/tree/branch'})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'org/repo/branchsha')
def test_resolve_repo(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
self.respond_to('https://api.github.com/repos/org/repo/git/refs/heads/master', '{"ref": "refs/heads/master", "object": {"sha": "mastersha"}}')
response = self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo'})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'org/repo/mastersha')
def test_resolve_pullsha(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
response = self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo/pull/1/commits/pullcommitsha'})
self.assertEqual(response.status_int, 200)
self.assertEqual(response.normal_body, 'org/repo/pullcommitsha')
def test_invalid_branch(self):
self.respond_to('https://www.google.com/recaptcha/api/siteverify', '{"success": true}')
self.respond_to('https://api.github.com/repos/org/repo#invalid/git/refs//branch', '{}')
self.app.post('/api/preview-commit', params={'url': 'https://github.com/org/repo#invalid/branch'}, status=400)
class PreviewTest(ApiTestBase):
def setUp(self):
ApiTestBase.setUp(self)
def test_normal(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"access_token": "access_token"}')
self.respond_to('https://api.github.com/repos/owner/repo', '{"permissions": {"admin": true}}')
self.respond_to('https://api.github.com/repos/owner/repo/hooks', '[]')
self.respond_to('https://api.github.com/repos/owner/repo/hooks', {'status': 201})
self.app.post('/api/preview', params={'code': 'code', 'repo': 'owner/repo'}, status=200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
def test_bad_code(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"error": "error"}')
self.app.post('/api/preview', params={'code': 'code', 'repo': 'owner/repo'}, status=401)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 0)
def test_no_repo_access(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"access_token": "access_token"}')
self.respond_to('https://api.github.com/repos/owner/repo', '{"permissions": {"admin": false}}')
self.app.post('/api/preview', params={'code': 'code', 'repo': 'owner/repo'}, status=401)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 0)
def test_existing_webhook(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"access_token": "access_token"}')
self.respond_to('https://api.github.com/repos/owner/repo', '{"permissions": {"admin": true}}')
hooks = [{'active': True, 'config': {'url': 'http://localhost/api/preview-event', 'content_type': 'json'}}]
self.respond_to('https://api.github.com/repos/owner/repo/hooks', json.dumps(hooks))
self.app.post('/api/preview', params={'code': 'code', 'repo': 'owner/repo'}, status=200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 0)
class PreviewEventHandler(ApiTestBase):
def setUp(self):
ApiTestBase.setUp(self)
util.SECRETS['github_client_id'] = 'github_client_id'
util.SECRETS['github_client_secret'] = 'github_client_secret'
def test_normal(self):
headers = {'X-Github-Event': 'pull_request'}
payload = {
'action': 'opened',
'repository': {
'owner': {'login': 'owner'},
'name': 'repo',
'full_name': 'owner/repo'
},
'pull_request': {
'head': {
'sha': 'sha',
'repo': {
'owner': {'login': 'pull_owner'},
'name': 'pull_repo',
'full_name': 'pull_owner/pull_repo'
}
},
'url': 'github_pr_url'
}
}
library = Library(id='owner/repo')
library.put()
self.respond_to('https://api.github.com/repos/owner/repo/statuses', {'status': 201})
self.app.post('/api/preview-event', params=json.dumps(payload), headers=headers, status=200)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
def test_no_header(self):
self.app.post('/api/preview-event', status=202)
tasks = self.tasks.get_filtered_tasks()
self.assertEqual(len(tasks), 0)
class StarTest(ApiTestBase):
def setUp(self):
ApiTestBase.setUp(self)
def test_normal(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"access_token": "access_token"}')
self.respond_to('https://api.github.com/user/starred/owner/repo', {'status': 404})
self.respond_to('https://api.github.com/user/starred/owner/repo', {'status': 204})
self.app.post('/api/star/owner/repo', params={'code': 'code'}, status=204)
def test_already_starred(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"access_token": "access_token"}')
self.respond_to('https://api.github.com/user/starred/owner/repo', {'status': 204})
self.app.post('/api/star/owner/repo', params={'code': 'code'}, status=202)
def test_bad_code(self):
self.respond_to('https://github.com/login/oauth/access_token', '{"error": "error"}')
self.app.post('/api/star/owner/repo', params={'code': 'code'}, status=401)
class DocsTest(ApiTestBase):
def setUp(self):
ApiTestBase.setUp(self)
def test_compressed(self):
library_key = Library(id='owner/repo').put()
version_key = Version(id='v1.1.1', parent=library_key, sha='sha', status='ready').put()
content = Content(id='analysis', parent=version_key, status=Status.pending)
content.json = dict({"analyzerData": "some data"})
content.status = Status.ready
content.put()
response = self.app.get('/api/docs/owner/repo/v1.1.1?use_analyzer_data')
self.assertEqual(response.status_int, 200)
self.assertEqual(json.loads(response.normal_body).get('analysis'), "some data")
class GetMetaTest(ApiTestBase):
def test_npm_scoped(self):
library_key = Library(id='@scope/package', status='ready').put()
Version(id='v1.1.1', parent=library_key, sha='sha', status='ready').put()
response = self.app.get('/api/meta/@scope/package/v1.1.1')
self.assertEqual(response.status_int, 200)
body = json.loads(response.normal_body)
self.assertEqual(body.get('apiKey'), '@scope/package')
self.assertEqual(body.get('npmScope'), '@scope')
self.assertEqual(body.get('npmPackage'), 'package')
def test_npm_unscoped(self):
library_key = Library(id='@@npm/package', status='ready').put()
Version(id='v1.1.1', parent=library_key, sha='sha', status='ready').put()
response = self.app.get('/api/meta/@@npm/package/v1.1.1')
self.assertEqual(response.status_int, 200)
body = json.loads(response.normal_body)
self.assertEqual(body.get('apiKey'), '@@npm/package')
self.assertEqual(body.get('npmScope'), None)
self.assertEqual(body.get('npmPackage'), 'package')
def test_invalid_homepage(self):
library_key = Library(id='owner/repo', status='ready', metadata='{"owner":{"login":"owner"},"name":"repo", "license": {"spdx_id": "MIT"}, "homepage": "javascript:alert()"}').put()
Version(id='v1.0.0', parent=library_key, sha='sha', status='ready').put()
response = self.app.get('/api/meta/owner/repo/v1.0.0')
self.assertEqual(response.status_int, 200)
body = json.loads(response.normal_body)
self.assertEqual(body.get('owner'), 'owner')
self.assertEqual(body.get('repo'), 'repo')
self.assertIsNone(body.get('homepage'))
def test_null_homepage(self):
library_key = Library(id='owner/repo', status='ready', metadata='{"owner":{"login":"owner"},"name":"repo", "license": {"spdx_id": "MIT"}, "homepage": null}').put()
Version(id='v1.0.0', parent=library_key, sha='sha', status='ready').put()
response = self.app.get('/api/meta/owner/repo/v1.0.0')
self.assertEqual(response.status_int, 200)
body = json.loads(response.normal_body)
self.assertEqual(body.get('owner'), 'owner')
self.assertEqual(body.get('repo'), 'repo')
self.assertIsNone(body.get('homepage'))
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.